Esempio n. 1
0
 def __init__(self):
     super(EventProcessor, self).__init__()
     self.message_handler = MessageHandler()
     #self.gps = GPS()
     Message.init(self.message_handler.plugin.address())
     Detector.start_plugins()
     Reactor.add_plugin_events()
Esempio n. 2
0
 def run(self):
     '''Reads assignments from Assignment_* folders, student numbers from
        their Assignment_x folder, generates a zip of each students' code
        files and runs the Detector on it'''
     assignment_id_map = {}
     assignments = self.get_folder_names(self.path)
     database = DatabaseManager()
     for assignment in assignments:
         document_assignment_number = self.get_assignment_number(assignment)
         if document_assignment_number not in  (6, 5):
             continue
         if document_assignment_number not in assignment_id_map:
             name = 'Assignment %d' % document_assignment_number
             assignment_id_map[document_assignment_number] = database.store_assignment(name, name, '2015-01-01')
         assignment_number = assignment_id_map[document_assignment_number]
         assignment_path = os.path.join(self.path, assignment)
         student_numbers = self.get_folder_names(assignment_path)
         for student_number in student_numbers:
             student_folder = os.path.join(assignment_path, student_number)
             output_file = StringIO()
             zip = zipfile.ZipFile(output_file, 'w')
             for file in self.get_file_names(student_folder):
                 zip.write(os.path.join(student_folder, file), file)
                 print('zipping: ' + file)
             zip.close()
             detector = Detector()
             detector.run(output_file, assignment_number,student_number)
Esempio n. 3
0
class DetectorUI(QtGui.QWidget):
    def __init__(self, parent=None):
        QtGui.QWidget.__init__(self, parent)
        self.ui = Ui_Form()
        self.ui.setupUi(self)
        QtCore.QObject.connect(self.ui.startButton,QtCore.SIGNAL("clicked()"), self.start)
        QtCore.QObject.connect(self.ui.pathButton,QtCore.SIGNAL("clicked()"), self.set_source_file_path)
        QtCore.QObject.connect(self.ui.patternCheckBox,QtCore.SIGNAL("clicked()"), self.patternClicked)
        self.detector = Detector()
        
    def start(self):
        if self.detector.isRunning():
            self.setWindowTitle('Stopped')
            self.detector.stop()
            self.ui.startButton.setText("Start")
        else:
            self.setWindowTitle('Running')
            self.detector.start()
            self.ui.startButton.setText("Stop")
            
    def set_source_file_path(self):
        dir = os.path.dirname(".")
        fileName = QtGui.QFileDialog.getOpenFileName (self,  "Open data file", dir , "Image Files (*.txt)")
        #Todo set the txt file as the data source
        self.ui.pathEdit.setText(fileName)

    def patternClicked(self):
        if self.ui.patternCheckBox.isChecked() :
            #output data pattern
            print "patern enabeled"
        else:
            #output data source
            print "patern disable"
 def __init__(self, modelNb=3):
   """Load the tensorflow model of VGG16-GAP trained on caltech
   
   Keyword arguments:
     modelNb -- iteration of the model to consider
   """
   dataset_path = '/home/cuda/datasets/perso_db/'
   trainset_path = dataset_path+'train.pkl'
   testset_path  = dataset_path+'test.pkl'
   weight_path = '../caffe_layers_value.pickle'
   model_path = '../models/perso/model-'+str(modelNb)
   
   # load labels
   testset    = pickle.load( open(testset_path,  "rb") )
   self.label_dict = testset.keys()
   n_labels = len(self.label_dict)
   
   # Initialize some tensorflow variables
   batch_size = 1
   self.images_tf = tf.placeholder( tf.float32, [None, 224, 224, 3], name="images")
   self.labels_tf = tf.placeholder( tf.int64, [None], name='labels')
   
   detector = Detector( weight_path, n_labels )
   c1,c2,c3,c4,conv5, self.conv6, gap, self.output = detector.inference( self.images_tf )
   self.classmap = detector.get_classmap( self.labels_tf, self.conv6 )
   
   self.sess = tf.InteractiveSession()
   saver = tf.train.Saver()
   saver.restore( self.sess, model_path )
Esempio n. 5
0
def main(argv=None):  # pylint: disable=unused-argument
    assert args.ckpt > 0 or args.batch_eval
    assert args.detect or args.segment, "Either detect or segment should be True"
    if args.trunk == 'resnet50':
        net = ResNet
        depth = 50
    if args.trunk == 'resnet101':
        net = ResNet
        depth = 101
    if args.trunk == 'vgg16':
        net = VGG
        depth = 16

    net = net(config=net_config, depth=depth, training=False)

    if args.dataset == 'voc07' or args.dataset == 'voc07+12':
        loader = VOCLoader('07', 'test')
    if args.dataset == 'voc12':
        loader = VOCLoader('12', 'val', segmentation=args.segment)
    if args.dataset == 'coco':
        loader = COCOLoader(args.split)

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                          log_device_placement=False)) as sess:
        detector = Detector(sess, net, loader, net_config, no_gt=args.no_seg_gt)
        if args.dataset == 'coco':
            tester = COCOEval(detector, loader)
        else:
            tester = Evaluation(detector, loader, iou_thresh=args.voc_iou_thresh)
        if not args.batch_eval:
            detector.restore_from_ckpt(args.ckpt)
            tester.evaluate_network(args.ckpt)
        else:
            log.info('Evaluating %s' % args.run_name)
            ckpts_folder = CKPT_ROOT + args.run_name + '/'
            out_file = ckpts_folder + evaluation_logfile

            max_checked = get_last_eval(out_file)
            log.debug("Maximum checked ckpt is %i" % max_checked)
            with open(out_file, 'a') as f:
                start = max(args.min_ckpt, max_checked+1)
                ckpt_files = glob(ckpts_folder + '*.data*')
                folder_has_nums = np.array(list((map(filename2num, ckpt_files))), dtype='int')
                nums_available = sorted(folder_has_nums[folder_has_nums >= start])
                nums_to_eval = [nums_available[-1]]
                for n in reversed(nums_available):
                    if nums_to_eval[-1] - n >= args.step:
                        nums_to_eval.append(n)
                nums_to_eval.reverse()

                for ckpt in nums_to_eval:
                    log.info("Evaluation of ckpt %i" % ckpt)
                    tester.reset()
                    detector.restore_from_ckpt(ckpt)
                    res = tester.evaluate_network(ckpt)
                    f.write(res)
                    f.flush()
Esempio n. 6
0
 def populateUI(self):
     self.availableObjectsList.addItems(Detector.getDefaultAvailableObjects())
     for sourceMode in self.__sourceModes:
         self.sourceCBox.addItem(sourceMode)
     for displayMode in self.__displayModes:
         self.displayCBox.addItem(displayMode)
     for shapeMode in self.__shapeModes:
         self.shapeCBox.addItem(shapeMode)
     for fillMode in self.__fillModes:
         self.fillCBox.addItem(fillMode)
     for bgMode in self.__bgModes:
         self.bgCBox.addItem(bgMode)
     model = QtGui.QStandardItemModel(self)
     func = lambda node, parent: self.populateTree(node, parent)
     Detector.getDefaultObjectsTree().map(model, func)
     self.objectsTree.setModel(model)
Esempio n. 7
0
    def free_at_position(new_sudoku, y, x):
        """
              This method is returning all available choices
              in sudoku at current position.
              """
        now_available = Solver.available[:]

        init_avail = set(now_available)

        first_step_avail = init_avail.intersection(Detector.free_in_column(new_sudoku, x))

        second_step_avail = first_step_avail.intersection(Detector.free_in_row(new_sudoku, y))

        third_step_avail = second_step_avail.intersection(Detector.free_in_squere3x3(new_sudoku, y, x))

        return list(third_step_avail)
Esempio n. 8
0
def simulate_basic_with_pods(ptypy_pars_tree=None,sim_pars=None,save=False):
    """
    Basic Simulation
    """
    p = DEFAULT.copy()
    ppt = ptypy_pars_tree
    if ppt is not None:
        p.update(ppt.get('simulation'))
    if sim_pars is not None:
        p.update(sim_pars)
        
    P = ptypy.core.Ptycho(ppt,level=1)

    # make a data source that has is basicaly empty
    P.datasource = make_sim_datasource(P.modelm,p.pos_drift,p.pos_scale,p.pos_noise)
    
    P.modelm.new_data()
    u.parallel.barrier()
    P.print_stats()
    
    # Propagate and apply psf for simulationg partial coherence (if not done so with modes)
    for name,pod in P.pods.iteritems():
        if not pod.active: continue
        pod.diff += conv(u.abs2(pod.fw(pod.exit)),p.psf)
    
    # Filter storage data similar to a detector.
    if p.detector is not None:
        Det = Detector(p.detector)
        save_dtype = Det.dtype
        for ID,Sdiff in P.diff.S.items():
            # get the mask storage too although their content will be overriden
            Smask = P.mask.S[ID]
            dat, mask = Det.filter(Sdiff.data)
            if p.frame_size is not None:
                hplanes = u.expect2(p.frame_size)-u.expect2(dat.shape[-2:])
                dat = u.crop_pad(dat,hplanes,axes=[-2,-1]).astype(dat.dtype)
                mask = u.crop_pad(mask,hplanes,axes=[-2,-1]).astype(mask.dtype)
            Sdiff.fill(dat)
            Smask.fill(mask) 
    else:
        save_dtype = None        
    
    if save:
        P.modelm.collect_diff_mask_meta(save=save,dtype=save_dtype)
           
    u.parallel.barrier()
    return P
Esempio n. 9
0
 def __init__(self, parent=None):
     QtGui.QWidget.__init__(self, parent)
     self.ui = Ui_Form()
     self.ui.setupUi(self)
     QtCore.QObject.connect(self.ui.startButton,QtCore.SIGNAL("clicked()"), self.start)
     QtCore.QObject.connect(self.ui.pathButton,QtCore.SIGNAL("clicked()"), self.set_source_file_path)
     QtCore.QObject.connect(self.ui.patternCheckBox,QtCore.SIGNAL("clicked()"), self.patternClicked)
     self.detector = Detector()
Esempio n. 10
0
 def init_detectot(self):
     assert args.detect or args.segment, "Either detect or segment should be True"
     assert args.ckpt > 0, "Specify the number of checkpoint"
     net = ResNet(config=net_config, depth=50, training=False)
     self.loader = Loader(opj(EVAL_DIR, 'demodemo'))
     self.detector = Detector(self.sess, net, self.loader, net_config, no_gt=args.no_seg_gt,
                              folder=opj(self.loader.folder, 'output'))
     self.detector.restore_from_ckpt(args.ckpt)
Esempio n. 11
0
def is_person(image):
    det = Detector(image)
    faces = len(det.face())
    print "FACE: ", det.drawColors[det.drawn-1 % len(det.drawColors)], faces
    uppers = len(det.upper_body())
    print "UPPR: ", det.drawColors[det.drawn-1 % len(det.drawColors)], uppers
    fulls = len(det.full_body())
    print "FULL: ", det.drawColors[det.drawn-1 % len(det.drawColors)], fulls
    peds = len(det.pedestrian())
    print "PEDS: ", det.drawColors[det.drawn-1 % len(det.drawColors)], peds
    det.draw()
    det.overlay()
   
    return faces + uppers + fulls + peds
Esempio n. 12
0
 def __init__(self):
     rospy.init_node('detection_node')
     rospy.loginfo("Loading model")
     self.DET_PATH = os.path.join(os.path.dirname(__file__),
                                  'efficientdet_d1_coco17_tpu-32')
     self.mydetector = Detector(self.DET_PATH)
     self.objects = []
     self.counter = 10
     self.connectPepper()
    def __init__(self, proxy_map, startup_check=False):
        super(SpecificWorker, self).__init__(proxy_map)
        self.Period = 2000
        if startup_check:
            self.startup_check()
        else:
            self.timer.timeout.connect(self.compute)
            self.timer.start(self.Period)

        # Load center track, our tracking module
        opt = opts().init()
        self.detector = Detector(opt)

        # Reid reture extractor
        self.reid_extractor = FeatureExtractor(
            model_name='shufflenet',
            model_path='/home/shubh/Downloads/shufflenet-bee1b265.pth.tar',
            device='cuda')
def test_sliding_window_function():

    intruder_1 = IntruderSignal(known_intruder_1)
    intruder_2 = IntruderSignal(known_intruder_2)

    radar_signal = RadarSignal(radar_sample)


    
    min_hamming, min_hamming_start_index = Detector.sliding_window(radar_signal, intruder_1)

    
    assert (min_hamming, min_hamming_start_index) == (8, 1373)

    min_hamming, min_hamming_start_index = Detector.sliding_window(radar_signal, intruder_2)


    assert (min_hamming, min_hamming_start_index) == (8, 42)
Esempio n. 15
0
    def __init__(self):
        self.running = False
        self.isPaused = False
        self.detector = Detector()
        self.frameSource = FrameSource()
        self.music = Music()
        self.window = Window()

        self.finishSounds = {
            Finishsounds.success: 'mario-victory.wav',
            Finishsounds.failure: 'siren.wav',
            Finishsounds.timeIsUp: 'buzzer.wav'
        }
        self.soundtrackFile = 'mi-full.wav'
        self.duration = 30
        self.numKeypoints = 0
        self.currentKeypoints = None
        self.previousKeypoints = None
Esempio n. 16
0
class FrameAnalyzer:
    def __init__(self, cap=None):
        self.detector = Detector()
        self.color_counts = {"red": 0, "yellow": 0, "green": 0, "orange": 0, "white": 0, "black": 0, "blue": 0}
        self.detected = 0
        self.line_offset = 6   
        
        if cap is not None: 
            self.pos_line = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT) * 0.763) #Posição da linha de contagem
        else:
            self.pos_line = 300

        print("analyzer initialized")

    def reset(self):
        self.color_counts = {"red": 0, "yellow": 0, "green": 0, "orange": 0, "white": 0, "black": 0, "blue": 0}
        self.detected = 0     

    def analyze(self, frame):
        frame_ = frame.copy()
        coordinates = self.detector.detect(frame)

        if coordinates is None:
            return 

        #print(self.color_counts)

        for c1, c2 in coordinates:
            roi = frame_[c1[1]:c2[1], c1[0]:c2[0], :]

            color_histogram_feature_extraction.color_histogram_of_test_image(roi)
            color = knn_classifier.main('training.data', 'test.data')

            y = (c1[1] + c2[1]) / 2

            if y<(self.pos_line+self.line_offset) and y>(self.pos_line-self.line_offset):
                self.color_counts[color] +=1
                self.detected += 1
                cv2.line(frame, (0, self.pos_line ), (frame.shape[1], self.pos_line), (230, 126, 34), 3) 

            # rendering
            cv2.rectangle(frame, c1, c2, (0,0,0), 2)
            t_size = cv2.getTextSize(color, cv2.FONT_HERSHEY_PLAIN, 1 , 1)[0]
            c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4
            cv2.rectangle(frame, c1, c2,(0,0,0),-1)
            cv2.putText(frame, color, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, [225,255,255], 1)

    def plot_results(self, path='output/result.png'):
        colors = []
        colors.append(self.color_counts["red"])
        colors.append(self.color_counts["yellow"])
        colors.append(self.color_counts["green"])
        colors.append(self.color_counts["orange"])
        colors.append(self.color_counts["white"])
        colors.append(self.color_counts["black"])
        colors.append(self.color_counts["blue"])
        bar_draw(colors, path)
Esempio n. 17
0
def load_modules():
    modules = {}
    # load detectors from database
    from detector import Detector
    Detector.load_to_dict(modules)

    # load alarm from database
    from alarm import Alarm
    Alarm.load_to_dict(modules)

    # load rfid
    from rfid import Rfid
    Rfid.load_to_dict(modules)

    from door import Door
    Door.load_to_dict(modules)

    return modules
Esempio n. 18
0
    def __init__(self, parent=None):
        super(Window, self).__init__(parent)

        self.detector = Detector()
        self.mediaThread = MediaThread(self)
        sys.stdout = common.EmittingStream(
            textWritten=self.normalOutputWritten)
        self.debugSignal.connect(self.debugTable)
        self.currentFrame = None
        self.bgColor = QColor(255, 255, 255)
        self.bgPath = ''

        self.classifiersParameters = {}

        self.setupUI()
        self.populateUI()
        self.connectUI()
        self.initUI()
Esempio n. 19
0
 def __init__(self):
     self.corner_detection_model = Detector(
         path_to_model='./models/identity_corner/model.tflite',
         path_to_labels='./models/identity_corner/label_map.pbtxt',
         nms_threshold=0.2,
         score_threshold=0.3)
     self.text_detection_model = Detector(
         path_to_model='./models/identity_card/model.tflite',
         path_to_labels='./models/identity_card/label_map.pbtxt',
         nms_threshold=0.2,
         score_threshold=0.2)
     self.text_detection_discharge = DetectorTF2(
         path_to_model='./models/discharge_record',
         path_to_labels='./models/discharge_record/label_map.pbtxt',
         nms_threshold=0.33,
         score_threshold=0.33)
     self.text_recognition_model = TextRecognition(
         path_to_checkpoint='./models/text_recogintion/transformerocr.pth')
Esempio n. 20
0
    def __init__(self, env, video_device):
        try:
            self.API_ENDPOINT = env['ApiEndpoint']
            self.FACE_AREA_THRESHOLD = env['FaceAreaThreshold']
            self.NAME_TTL_SEC = env['NameTtlSec']
            self.FACE_SIMILARITY_THRESHOLD = env['FaceSimilarityThreshold']
            self.COGNITO_USERPOOL_ID = env['CognitoUserPoolId']
            self.COGNITO_USERPOOL_CLIENT_ID = env['CognitoUserPoolClientId']
            self.REGION = env['Region']
        except KeyError:
            print('Invalid config file')
            raise

        self.recent_name_list = []
        self.registered_name_set = set()
        self.video_capture = VideoCapture(env, video_device)
        self.detector = Detector(env)
        self.viewer = Viewer(env)
Esempio n. 21
0
    def __init__(self):
        rospy.init_node('crazyflie_tracker')
        self.counter = 0
        self.countNotFound = 0
        self.pub_tf = tf.TransformBroadcaster()
        self.bridge = CvBridge()
        self.cf_state = 'idle'
        self.cf_cmd_vel = Twist()
        path = '/home/potix2/sampling'
        fps = 20
        self.detector = Detector(CaptureCamera(path, 640, 480, fps))
        self.detector.add_capture_callback(self._capture_state)
        self.detector.add_capture_callback(self._capture_cmd_vel)
        self.detector.set_mask_range_callback(self._mask_range)

        # publish transform rate at 50Hz
        self.rate = rospy.Rate(50.0)

        self.target_u = 0
        self.target_v = 0
        self.target_d = 0

        self.last_d = 0


        #self.r = 0
        self.y = 0
        self.r = -1.5708
        #self.y = -3.1415
        self.p = 0

        # subscribe to kinect image messages
        rospy.wait_for_message("/camera/rgb/camera_info", CameraInfo)
        rospy.wait_for_message("/camera/depth/camera_info", CameraInfo)

        # Subscribe to Realsense R200 camera_info to get image frame height and width
        rospy.Subscriber('/camera/rgb/camera_info', CameraInfo, self.camera_data, queue_size=1)
        rospy.Subscriber("/camera/rgb/image_rect_color", Image, self.image_callback, queue_size=1)
        rospy.Subscriber('/camera/depth/camera_info', CameraInfo, self.depth_camera_data, queue_size=1)
        rospy.Subscriber("/camera/depth/image", Image, self.depth_callback, queue_size=1)
        rospy.Subscriber('/crazyflie/state', String, self._cf_state_callback, queue_size=1)
        rospy.Subscriber('/crazyflie/cmd_vel', Twist, self._cf_cmd_vel_callback, queue_size=1)

        self.rate.sleep()
def run_demo(args):
    ie = IECore()
    detector_person = Detector(ie, path_to_model_xml=args.model_od,
                              device=args.device,
                              label_class=args.person_label)

    single_human_pose_estimator = HumanPoseEstimator(ie, path_to_model_xml=args.model_hpe,
                                                  device=args.device)
    if args.input != '':
        img = cv2.imread(args.input[0], cv2.IMREAD_COLOR)
        frames_reader, delay = (VideoReader(args.input), 1) if img is None else (ImageReader(args.input), 0)
    else:
        raise ValueError('--input has to be set')

    presenter = monitors.Presenter(args.utilization_monitors, 25)
    for frame in frames_reader:
        bboxes = detector_person.detect(frame)
        human_poses = [single_human_pose_estimator.estimate(frame, bbox) for bbox in bboxes]

        presenter.drawGraphs(frame)

        colors = [(0, 0, 255),
                  (255, 0, 0), (0, 255, 0), (255, 0, 0), (0, 255, 0),
                  (255, 0, 0), (0, 255, 0), (255, 0, 0), (0, 255, 0),
                  (255, 0, 0), (0, 255, 0), (255, 0, 0), (0, 255, 0),
                  (255, 0, 0), (0, 255, 0), (255, 0, 0), (0, 255, 0)]

        for pose, bbox in zip(human_poses, bboxes):
            cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[0] + bbox[2], bbox[1] + bbox[3]), (255, 0, 0), 2)
            for id_kpt, kpt in enumerate(pose):
                cv2.circle(frame, (int(kpt[0]), int(kpt[1])), 3, colors[id_kpt], -1)

        cv2.putText(frame, 'summary: {:.1f} FPS (estimation: {:.1f} FPS / detection: {:.1f} FPS)'.format(
            float(1 / (detector_person.infer_time + single_human_pose_estimator.infer_time * len(human_poses))),
            float(1 / single_human_pose_estimator.infer_time),
            float(1 / detector_person.infer_time)), (5, 15), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 200))
        if args.no_show:
            continue
        cv2.imshow('Human Pose Estimation Demo', frame)
        key = cv2.waitKey(delay)
        if key == 27:
            return
        presenter.handleKey(key)
    print(presenter.reportMeans())
Esempio n. 23
0
 def __init__(self, parent=None):
     super().__init__()
     self.detector = Detector()
     # self.height self.width
     # 定义部件
     self.menubar = None
     self.file_menu = None
     self.pic_action = None
     self.pic_label = None
     self.output_label = None
     self.prev_button = None
     self.next_button = None
     self.detect_button = None
     self.slider = None
     # 图片显示
     self.chosen_pics = ["image.png"]
     self.cur_index = 0
     # 初始化
     self.init_ui()
def main():
    dir_path = os.path.dirname(os.path.realpath(__file__))
    lines = "*门冬胰岛素 300u×1.000 Sig.5-5-5u 三餐前 qd"
    lines = lines.lower()
    f_output = open(dir_path + r'\results\med_prescription_parser.json', 'w', errors='ignore',
                    encoding='utf-8')
    det = Detector()
    try:
        prescs_lines = det.detector(lines)
        for line in prescs_lines:
            print(line)
            preparser = PrescParser()
            lines_res = preparser.parse(line)
            f_output.write(JSONObjectEncoder().to_json(lines_res))
            f_output.write('\n')
    except exceptions.NoPrescException:
        print(lines)
    finally:
        f_output.close()
Esempio n. 25
0
 def __init__(self, cam_address, cam_user, cam_pass, detector_options = {}):
     self.detector = Detector(**detector_options)
     self.cam_address = cam_address
     self.cam_user = cam_user
     self.cam_pass = cam_pass
     self.byts = bytes()
     try:
         cv2.namedWindow(self.detector.winName, cv2.WINDOW_AUTOSIZE)
     except AttributeError, e:
         pass
Esempio n. 26
0
def main(argv=None):  # pylint: disable=unused-argument
    assert args.detect or args.segment, "Either detect or segment should be True"
    assert args.ckpt > 0, "Specify the number of checkpoint"
    net = ResNet(config=net_config, depth=50, training=False)
    loader = Loader(osp.join(EVAL_DIR, 'demodemo'))


    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                          log_device_placement=False)) as sess:
        detector = Detector(sess, net, loader, net_config, no_gt=args.no_seg_gt,
                            folder=osp.join(loader.folder, 'output'))
        detector.restore_from_ckpt(args.ckpt)
        for name in loader.get_filenames():
            image = loader.load_image(name)
            h, w = image.shape[:2]
            print('Processing {}'.format(name + loader.data_format))
            detector.feed_forward(img=image, name=name, w=w, h=h, draw=True,
                                  seg_gt=None, gt_bboxes=None, gt_cats=None)
    print('Done')
Esempio n. 27
0
def test_single_net(prefix, epoch, stage, attribute='landmark_pose'):
    model_path = '%s-%s' % (prefix, epoch)
    # load pnet model
    if stage == 12:
        detector = FcnDetector(P_Net, model_path)
    elif stage == 24:
        detector = Detector(R_Net, 24, 1, model_path)
    elif stage == 48:
        if 'landmark_pose' in attribute:
            detector = Detector(O_AUX_Net, 48, 1, model_path, aux_idx=3)
            #detector = Detector(JDAP_48Net_Landmark_Pose_Mean_Shape, 48, 1, model_path, aux_idx=3)
        elif 'landmark' in attribute:
            detector = Detector(JDAP_48Net_Landmark_Mean_Shape, 48, 1, model_path, aux_idx=1)
            #etector = Detector(JDAP_48Net_Landmark, 48, 1, model_path, aux_idx=1)
        elif 'pose' in attribute:
            detector = Detector(JDAP_48Net_Pose, 48, 1, model_path, aux_idx=2)
        else:
            detector = Detector(O_Net, 48, 1, model_path)
    return detector
Esempio n. 28
0
    def init_components(self, **kwargs):
        print('Setting up spectrometer')

        source = kwargs.get('source', 'blue')
        detector = kwargs.get('detector', 0)
        assert isinstance(source, str)
        assert isinstance(detector, int)

        self.source = Source(source)
        self.detector = Detector(detector)
Esempio n. 29
0
    def execute(self, message, draw=False):

        train = True if len(message) == 4 else False

        image = NNConnector.fileToImage(message[0])
        gtKeyPoints = NNConnector.byteToKPs(message[1])
        cvKeyPoints = NNConnector.byteToKPs(message[2])

        test_pts = Detector(image, gtKeyPoints, cvKeyPoints, train)
        return np.array2string(test_pts(draw))
Esempio n. 30
0
def recognizeCardNames(inputFolder, outputFolder, visualize):
    sym = SymspellMTGNames()
    lstm = LSTMClf()
    detector = Detector()
    allResults = []

    imgPaths = findInputs(inputFolder)
    for imgPath in imgPaths:
        _dir, imgName = path.split(imgPath)
        img = cv2.imread(str(imgPath), cv2.IMREAD_GRAYSCALE)
        imgc = cv2.imread(str(imgPath))
        img = squareResize(img)
        imgc = squareResize(imgc, grayScale=False)
        labels_boxes = detector.getNameBoxes(img, imgc)

        cards = []
        boxes = []

        for _label, box in labels_boxes:
            cropped = None
            try:
                cropped = crop(img, box)
            except:
                # Exception is thrown if the box has invalid size
                continue

            boxes.append(box)
            cards.append("")

            prediction = lstm.read_image(
                cropped.transpose())  # Transpose to column major format
            if len(prediction) > 0:
                # Add detection if there's a valid card name left after post processing
                predStr = prediction.lstrip()
                if len(predStr) > 0:
                    corrected, _distance = sym.lookup(predStr)
                    if corrected != None:
                        cards[len(cards) - 1] = corrected

        allResults.append((imgName, imgc, cards, boxes))

    return allResults
def main():
    args = build_argparser().parse_args()

    ie = IECore()
    detector = Detector(ie, args.model, args.prob_threshold, args.device)

    img = cv2.imread(args.input[0], cv2.IMREAD_COLOR)
    frames_reader, delay = (VideoReader(args.input), 1) if img is None else (ImageReader(args.input), 0)

    if args.labels:
        with open(args.labels, 'r') as f:
            labels_map = [x.strip() for x in f]
    else:
        labels_map = None

    presenter = monitors.Presenter(args.utilization_monitors, 25)
    for frame in frames_reader:
        detections = detector.detect(frame)
        presenter.drawGraphs(frame)
        for det in detections:
            xmin, ymin, xmax, ymax = det[:4].astype(np.int)
            xmin = max(0, xmin)
            ymin = max(0, ymin)
            xmax = min(frame.shape[1], xmax)
            ymax = min(frame.shape[0], ymax)
            class_id = det[5]
            det_label = labels_map[int(class_id)] if labels_map else str(int(class_id))
            color = (min(class_id * 12.5, 255), min(class_id * 7, 255), min(class_id * 3, 255))
            cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color, 2)
            cv2.putText(frame, det_label + ' ' + str(round(det[4] * 100, 1)) + ' %', (xmin, ymin - 7),
                         cv2.FONT_HERSHEY_COMPLEX, 0.6, color, 1)

        cv2.putText(frame, 'summary: {:.1f} FPS'.format(
            1.0 / detector.infer_time), (5, 15), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 200))
        if args.no_show:
            continue
        cv2.imshow('CenterNet Detection Demo', frame)
        key = cv2.waitKey(delay)
        if key == 27:
            return
        presenter.handleKey(key)
    print(presenter.reportMeans())
Esempio n. 32
0
def run():
    # Step 1: Initialization
    # 视频: video_helper.py
    # 检测: detector.py
    # 结果接收:acceptor.py
    # 参数配置: config.py
    # 总控: multiple_object_controller.py
    configs = Configs()
    detector = Detector(configs)
    acceptor = Acceptor(configs)
    video_helper = VideoHelper(configs)
    object_controller = MultipleObjectController(configs, video_helper)

    # step 2: 总体流程:main loop
    # A: 对物体,每帧检测,不要跟踪 (可以要平滑)
    # B: 对物体,要跟踪: a. 此帧有检测 (+observation correction)
    #                  b. 此帧无检测(只跟踪,pure predicton)
    cur_frame_counter = 0
    detection_loop_counter = 0
    while video_helper.not_finished(cur_frame_counter):
        # 0. get frame
        frame = video_helper.get_frame()

        # 1.1 每帧都检测
        if not configs.NUM_JUMP_FRAMES:
            detects = detector.detect(frame)
            object_controller.update(detects)
        else:
            # 1.2 隔帧检测
            # 1.2.1 此帧有检测
            if detection_loop_counter % configs.NUM_JUMP_FRAMES == 0:
                detection_loop_counter = 0
                detects = detector.detect(frame)
                object_controller.update(detects)  # 核心
            # 1.2.2 此帧无检测
            else:
                object_controller.update_without_detection()  # 核心

        # deal with acceptor
        # ask acceptor do something
        cur_frame_counter += 1
        detection_loop_counter += 1
Esempio n. 33
0
    def __init__(self):
        super(TFDetectNode, self).__init__()

        # init the node
        rospy.init_node('tf_detect', anonymous=False)

        # Get the parameters
        (model_name, num_of_classes, label_file, camera_namespace, video_name,
            num_workers) \
        = self.get_parameters()

        # Create Detector
        self._detector = Detector(model_name, num_of_classes, label_file,
                                  num_workers)

        self._bridge = CvBridge()

        # Advertise the result of Object Detector
        self.pub_detections_data = rospy.Publisher('/detection/vision_objects',
                                                   DetectedObjectArray,
                                                   queue_size=1)
        # self.pub_detections_data = rospy.Publisher('/detections', DetectionArray, queue_size=1)
        # self.pub_detections_data = rospy.Publisher('/detections', DetectionArray, queue_size=1)

        # Advertise the result of Object Detector
        self.pub_detections_image = rospy.Publisher('/result_ripe',
                                                    Image,
                                                    queue_size=1)
        # self.pub_detections_image = rospy.Publisher( '/object_detection/detections_image', Image, queue_size=1)

        self.sub_rgb = rospy.Subscriber('/image_raw',
                                        Image,
                                        self.rgb_callback,
                                        queue_size=1,
                                        buff_size=2**24)
        # USE CAMERA TEST
        # USE CAMERA TEST
        # USE CAMERA TEST
        # self.sub_rgb = rospy.Subscriber('usb_cam/image_raw',Image, self.rgb_callback, queue_size=1, buff_size=2**24)

        # spin
        rospy.spin()
Esempio n. 34
0
def main():
    bulb = LED(environ.get('GPIO_LED'))
    water_gun = WaterGun(environ.get('GPIO_GUN'))

    with Detector(environ.get('PATH_TO_MODEL')) as detector:
        while detector.check():
            if detector.is_detect():
                bulb.blink(.5, .3)
                water_gun.fire(1)
            else:
                bulb.off()
def main():
    seq = []
    images = glob.glob(path + '*.tif')
    for i in images:
        image = cv2.imread(i, cv2.IMREAD_GRAYSCALE)
        seq.append(image)

    preprocessor = Preprocessor(seq)
    detector = Detector(preprocessor)
    matcher = Matcher(detector)
    drawer = Drawer(matcher, preprocessor)

    masks = preprocessor.get_masks()

    print('Generating all frames and cell states...')
    drawer.load()
    print('Successfully loaded all images')

    # Save all generated images and their masks to disk
    counter = 1
    for g in drawer.get_gen_images():
        annotated = cv2.imwrite(path + f'gen/{counter}.tif', g)
        mask = cv2.imwrite(path + f'gen/{counter}_mask.tif',
                           masks[counter - 1])
        if not annotated or not mask:
            print(f'Failed to save')
        counter += 1
    print('Saved all images')

    # Now standby for user to issue commands for retrieval
    while True:
        string = input(
            'Input a frame and cell ID (optional) separated by a space...\n')
        if string:
            string = string.split(' ')
            frame = int(string[0])
            if len(string) > 1:
                try:
                    id = int(string[1])
                    display_image = drawer.serve(frame, id)
                except ValueError:
                    print(f'Not an integer')
                    display_image = drawer.serve(frame)
            else:
                display_image = drawer.serve(frame)
            # plt.imshow(display_image)
            # plt.axis('off')
            # plt.show()
            # cv2.imshow('image',display_image)
            # cv2.waitKey(0)
            # cv2.destroyAllWindows()

        else:
            break
Esempio n. 36
0
    def __init__(self):
        Detector.__init__(self)
        self.bridge = CvBridge()
        self.light_classifier = TLClassifier()
        self.state = TrafficLight.UNKNOWN
        self.state_count = 0
        self.dbw_enabled = False

        # the number of waypoints car brake but over the stopline
        self.brake_buffer = rospy.get_param('~brake_buffer', 5)
        rospy.Subscriber('/image_color',
                         Image,
                         self.image_cb,
                         queue_size=1,
                         buff_size=5760000)
        # subscribe the dbw_enabled to check car's position and orientation and set correct direction
        rospy.Subscriber('/vehicle/dbw_enabled', Bool, self.dbw_enabled_cb)

        self.upcoming_red_light_pub.publish(-1)
        rospy.spin()
Esempio n. 37
0
def video(params=None):
    cap = cv2.VideoCapture(0)
    #cap.open("rtmp://rtmp01open.ys7.com/openlive/bd02a353615b4a12b1404f605218cb73.hd")

    K.set_learning_phase(0)
    det = Detector()

    while True:
        ret, img = cap.read()
        if not ret or len(img.shape) != 3:
            continue
        img = cv2.resize(img, (640, 480))
        (height, width) = img.shape[:-1]

        det.predict(img)

        if img is not None:
            cv2.imshow("result", img)
        if cv2.waitKey(3) == 27:
            break
Esempio n. 38
0
    def video_thread(cls):
        detectorer = Detector()
        while Camera.running:
            is_night = Camera.is_night()
            Camera.frame = cls.get_video(is_night)

            if globals.OFFLINE_MODE:
                cv2.imshow('Depth', depth)

            detectorer.update_frame(Camera.frame)

            with Camera.read_lock:
                Camera.outputFrame = Camera.frame

                if globals.OFFLINE_MODE:
                    cv2.imshow('Video', Camera.outputFrame)

        if Camera.running is False:
            log('CAMERA', 'Camera stopped.')
            return
Esempio n. 39
0
    def playbackTest():
        """
        Test code to assure tracker works with detector.
        """
        def forwardImage(tuple):
            ind, frame = tuple
            detections = detector.getDetection(ind)

            image = cv2.applyColorMap(frame, cv2.COLORMAP_OCEAN)
            image = tracker.visualize(image, ind)

            figure.displayImage((ind, image))

        def startDetector():
            detector.initMOG()
            detector.computeAll()
            tracker.trackAll(detector.detections)
            playback_manager.play()

        app = QtWidgets.QApplication(sys.argv)
        main_window = QtWidgets.QMainWindow()
        playback_manager = PlaybackManager(app, main_window)
        detector = Detector(playback_manager)
        tracker = Tracker(detector)

        playback_manager.fps = 10
        playback_manager.openTestFile()
        playback_manager.frame_available.connect(forwardImage)
        detector.mog_parameters.nof_bg_frames = 500
        detector._show_detections = True
        playback_manager.mapping_done.connect(startDetector)

        figure = TestFigure(playback_manager.togglePlay)
        main_window.setCentralWidget(figure)

        LogObject().print(detector.parameters)
        LogObject().print(detector.parameters.mog_parameters)
        LogObject().print(tracker.parameters)

        main_window.show()
        sys.exit(app.exec_())
Esempio n. 40
0
def main(args):
  '''通过PNet或RNet生成下一个网络的输入'''
  size = args.input_size
  batch_size = config.batches
  min_face_size = config.min_face
  stride = config.stride
  thresh = config.thresh
  scale_factor = config.scale_factor
  #模型地址
  model_path = ['model/PNet/', 'model/RNet/', 'model/ONet']
  if size == 12:
    net = 'PNet'
    save_size = 24
  elif size == 24:
    net = 'RNet'
    save_size = 48
  #图片数据地址
  base_dir = 'data/WIDER_train/'
  #处理后的图片存放地址
  data_dir = 'data/%d' % (save_size)
  neg_dir = os.path.join(data_dir, 'negative')
  pos_dir = os.path.join(data_dir, 'positive')
  part_dir = os.path.join(data_dir, 'part')
  for dir_path in [neg_dir, pos_dir, part_dir]:
    if not os.path.exists(dir_path):
      os.makedirs(dir_path)
  detectors = [None, None, None]
  PNet = FcnDetector(P_Net, model_path[0])
  detectors[0] = PNet
  if net == 'RNet':
    RNet = Detector(R_Net, 24, batch_size[1], model_path[1])
    detectors[1] = RNet
  filename = 'data/wider_face_train_celeba.txt'
  # 读取文件的image和box对应函数在utils中
  data = read_anno(base_dir, filename)
  mtcnn_detector = MtcnnDetector(detectors,
                                 min_face_size=min_face_size,
                                 stride=stride,
                                 threshold=thresh)
  # save_path = data_dir
  # save_file = os.path.join(save_path, 'detections.pkl')
  # if not os.path.exists(save_file):
  # 将data制作成迭代器
  print('载入数据')
  test_data = TestLoader(data['images'])
  detectors, _ = mtcnn_detector.detect_face(test_data)
  print('完成识别')

  # with open(save_file, 'wb') as f:
  #     pickle.dump(detectors, f, 1)

  print('开始生成图像')
  save_hard_example(save_size, data, neg_dir, pos_dir, part_dir, detectors)
Esempio n. 41
0
class Processor:
    def __init__(self, config: Dict[str, str]):
        if not 'detector_model' in config:
            print_err('Error: not found "detector_model" in config')
            raise ValueError
        self.detector = Detector(config['detector_model'])

        self.registry = None
        if 'chrome_web_driver' in config:
            self.registry = RegistryRequester(
                selenium_driver_path=config['chrome_web_driver'])
        else:
            self.registry = RegistryRequester()

    def __call__(self, image: Image, north: float, south: float, west: float,
                 east: float):
        buildings = self.detector.detect(image)
        bottom_pxl = image.size[1] - 1
        geo_to_pxl_ratio = (north - south) / image.size[1]
        result = []

        for building in buildings:
            x1, y1, x2, y2, theta = building

            # A little workaround with bottom pxl due to PIL's coordinate system starting at the top left corner.
            sx2, sy2 = rotate_point2d(x2, bottom_pxl - y2, -theta)
            square = abs(
                pxl_to_geo(sx2 - x1, geo_to_pxl_ratio) *
                (pxl_to_geo(sy2 - (bottom_pxl - y1), geo_to_pxl_ratio)))

            cx, cy = (sx2 - x1) / 2, (sy2 - (bottom_pxl - y1)) / 2
            cx, cy = rotate_point2d(cx, cy, theta)
            lat, lon = south + pxl_to_geo(
                cy, geo_to_pxl_ratio), west + pxl_to_geo(cx, geo_to_pxl_ratio)

            info = self.registry(lat, lon)

            home_in_registry = self.registry.is_home(lat, lon)

            result.append({
                'x1': x1,
                'y1': y1,
                'x2': x2,
                'y2': y2,
                'theta': theta,
                'lat': lat,
                'lon': lon,
                'building_found_in_registry': home_in_registry,
                'square': square,
                'cad_no': info['Кадастровый номер']
            })

        return result
Esempio n. 42
0
 def __init__(
     self,
     model_def="/home/haitao/Project/tongji_sanitation_car/Yolo/config/yolov3-custom.cfg",
     load_path="/home/haitao/Project/tongji_sanitation_car/Yolo/parameters/yolov3_ckpt_base.pth"
 ):
     self.yolo = Detector(torch.device("cpu"),
                          model_def,
                          load_path,
                          reg_threshold=0.9,
                          cls_threshold=0.2,
                          nms_threshold=0.2,
                          image_size=416)
Esempio n. 43
0
 def __init__(self,device="cpu", mod_path=path+'/scripts/perception/det_2021-02-20_11-44-05-076696.pt'):
     self.image_pub = rospy.Publisher("/myresult", Image, queue_size=1)
     self.detect_pub = rospy.Publisher("/Sign", Sign, queue_size=1)
     #create network
     self.detector = Detector() #.to(device)
     #load weights
     self.model = utils.load_model(self.detector,mod_path,device)
     self.bridge = CvBridge()
     self.image_sub = rospy.Subscriber("/cf1/camera/image_raw", Image, self.callback, queue_size=1, buff_size=2**28)
     self.tf_buf   = tf2_ros.Buffer()
     self.tf_lstn  = tf2_ros.TransformListener(self.tf_buf)
     self.broadcaster = tf2_ros.TransformBroadcaster()
Esempio n. 44
0
  def __init__(self):
    """
    Configuration
    """

    # Camera settings
    self.FRAME_WIDTH = 341
    self.FRAME_HEIGHT = 256
    self.flip_camera = True # Mirror image
    self.camera = cv2.VideoCapture(1)

    # ...you can also use a test video for input
    #video = "/Users/matthiasendler/Code/snippets/python/tracker/final/assets/test_video/10.mov"
    #self.camera = cv2.VideoCapture(video)
    #self.skip_input(400) # Skip to an interesting part of the video

    if not self.camera.isOpened():
        print "couldn't load webcam"
        return
    #self.camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, self.FRAME_WIDTH)
    #self.camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, self.FRAME_HEIGHT)

    self.filters_dir = "filters/" # Filter settings in trackbar
    self.filters_file = "filters_default"

    # Load filter settings
    current_config = self.filters_dir + self.filters_file
    self.filters = Filters(current_config)

    # No actions will be triggered in test mode
    # (can be used to adjust settings at runtime)
    self.test_mode = False

    # Create a hand detector
    # In fact, this is a wrapper for many detectors
    # to increase detection confidence
    self.detector = Detector(self.filters.config)

    # Knowledge base for all detectors
    self.kb = KB()
    # Create gesture recognizer.
    # A gesture consists of a motion and a hand state.
    self.gesture = Gesture()

    # The action module executes keyboard and mouse commands
    self.action = Action()

    # Show output of detectors
    self.output = Output()

    self.run()
class Cropper(object):
    """Cropper"""
    def __init__(self):
        super(Cropper, self).__init__()
        self.detector  = Detector()

    @staticmethod
    def _bounding_rect(faces):
        top,    left  =  sys.maxint,  sys.maxint
        bottom, right = -sys.maxint, -sys.maxint
        for (x, y, w, h) in faces:
            if x < left:
                left = x
            if x+w > right:
                right = x+w
            if y < top:
                top = y
            if y+h > bottom:
                bottom = y+h
        return top, left, bottom, right

    def crop(self, img, target_width, target_height):
        original_height,  original_width  = img.shape[:2]
        faces = self.detector.detect_faces(img)
        if len(faces) == 0: # no detected faces
            target_center_x = original_width / 2
            target_center_y = original_height / 2
        else:
            top, left, bottom, right  = self._bounding_rect(faces)
            target_center_x = (left + right) / 2
            target_center_y = (top + bottom) / 2
        target_left = target_center_x - target_width / 2
        target_right = target_left + target_width
        target_top = target_center_y - target_height / 2
        target_bottom = target_top + target_height
        if target_top < 0:
            delta = abs(target_top)
            target_top += delta
            target_bottom += delta
            if target_bottom > original_height:
                target_bottom = original_height
        if target_left < 0:
            delta = abs(target_left)
            target_left += delta
            target_right += delta
            if target_right > original_width:
                target_right = original_width
        return img[target_top:target_bottom, target_left:target_right]
Esempio n. 46
0
    def __init__(self, parent=None):
        super(Window, self).__init__(parent)

        self.detector = Detector()
        self.mediaThread = MediaThread(self)
        sys.stdout = common.EmittingStream(textWritten=self.normalOutputWritten)
        self.debugSignal.connect(self.debugTable)
        self.currentFrame = None
        self.bgColor = QColor(255, 255, 255)
        self.bgPath = ''

        self.classifiersParameters = {}

        self.setupUI()
        self.populateUI()
        self.connectUI()
        self.initUI()
Esempio n. 47
0
 def __init__(self,
              camera_id=None,
              show_display=False,
              width=FRAME_WIDTH,
              height=FRAME_HEIGHT,
              extra_time=EXTRA_TIME,
              video_directory=VIDEO_DIR,
              video_prefix=VIDEO_PREFIX,
              do_compress=DO_COMPRESS):
     self.camera = Camera(camera_id, prop_set={width: width, height: height})
     self.total_pixels = height * width
     self.detector = Detector()
     self.display = None
     if show_display:
         self.display = Display((width, height))
     self.extra_time_delta = timedelta(seconds=extra_time)
     self.video_prefix = os.path.join(video_directory, video_prefix)
     self.do_compress = do_compress
Esempio n. 48
0
 def populateTree(self, node, parent):
     """Create a QTreeView node under 'parent'.
     """
     item = QtGui.QStandardItem(node)
     h, s, v = Detector.getDefaultHSVColor(node)
     color = QColor()
     color.setHsvF(h, s, v)
     item.setIcon(self.getIcon(color))
     # Unique hash for QStandardItem
     key = hash(item)
     while key in self.classifiersParameters:
         key += 1
     item.setData(key)
     cp = ClassifierParameters(item.data(), node, node, color,
                               self.SHAPE_RECT, self.FILL_OUTLINE)
     self.classifiersParameters[key] = cp
     parent.appendRow(item)
     return item
Esempio n. 49
0
class Spectrometer(object):
    '''This class represents the whole spectrometer.

    It bundles the detector and source in one object.

    :params detector: the selected detector (0 - builtin, default)
    :type detector: int
    :params source: the selected light source ('blue' is default)
    :type source: str

    '''

    def __init__(self, **kwargs):
        self.init_components(**kwargs)

    def init_components(self, **kwargs):
        print('Setting up spectrometer')

        source = kwargs.get('source', 'blue')
        detector = kwargs.get('detector', 0)
        assert isinstance(source, str)
        assert isinstance(detector, int)

        self.source = Source(source)
        self.detector = Detector(detector)

    def measure(self, num_frames, num_dropped_frames, kind, **kwargs):
        '''Executes a measurment.

        :params num_frames: number of captured frames
        :type num_frames: int
        :params num_dropped_frames: number of frames to drop before collecting spectrum frames
        :type num_dropped_frames: int
        :params kind: The type of measurment. Choices are: 'background', 'spectrum', 'stream'
        :type kind: str

        '''
        if kind == 'spectrum':
            #self.source_on()
            self.detector.measure_spectrum(num_frames, num_dropped_frames, **kwargs)
            #self.source_off()
        elif kind == 'background':
            #self.source_on()
            self.detector.measure_background(num_frames, num_dropped_frames, **kwargs)
            #self.source_off()
        elif kind == 'stream':
            #self.source_on()
            self.detector.stream()
            #self.source_off()
        else:
            sys.exit('Measurment type unknow.')
Esempio n. 50
0
class Tracker(object):
  """
  This is the main program which gives a high-level view
  of all the running subsystems. It connects camera input with
  output in form of "actions" (such as keyboard shortcuts on the users behalf).
  This is done by locating a hand in an image and detecting features,
  like the number of fingers, and trying to match that data with a
  known gesture.
  """

  def __init__(self):
    """
    Configuration
    """

    # Camera settings
    self.FRAME_WIDTH = 341
    self.FRAME_HEIGHT = 256
    self.flip_camera = True # Mirror image
    self.camera = cv2.VideoCapture(1)

    # ...you can also use a test video for input
    #video = "/Users/matthiasendler/Code/snippets/python/tracker/final/assets/test_video/10.mov"
    #self.camera = cv2.VideoCapture(video)
    #self.skip_input(400) # Skip to an interesting part of the video

    if not self.camera.isOpened():
        print "couldn't load webcam"
        return
    #self.camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, self.FRAME_WIDTH)
    #self.camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, self.FRAME_HEIGHT)

    self.filters_dir = "filters/" # Filter settings in trackbar
    self.filters_file = "filters_default"

    # Load filter settings
    current_config = self.filters_dir + self.filters_file
    self.filters = Filters(current_config)

    # No actions will be triggered in test mode
    # (can be used to adjust settings at runtime)
    self.test_mode = False

    # Create a hand detector
    # In fact, this is a wrapper for many detectors
    # to increase detection confidence
    self.detector = Detector(self.filters.config)

    # Knowledge base for all detectors
    self.kb = KB()
    # Create gesture recognizer.
    # A gesture consists of a motion and a hand state.
    self.gesture = Gesture()

    # The action module executes keyboard and mouse commands
    self.action = Action()

    # Show output of detectors
    self.output = Output()

    self.run()

  def run(self):
    """
    In each step: Read the input image and keys,
    process it and react on it (e.g. with an action).
    """
    while True:
      img = self.get_input()
      hand = self.process(img)
      ref = self.action.get_reference_point()
      self.output.show(img, hand, ref)

  def process(self, img):
    """
    Process input
    """
    # Run detection
    hand = self.detector.detect(img)
    # Store result in knowledge base
    self.kb.update(hand)
    if not self.test_mode:
      # Try to interprete as gesture
      self.interprete(hand)
    return hand

  def interprete(self, hand):
    """
    Try to interprete the input as a gesture
    """
    self.gesture.add_hand(hand)
    operation = self.gesture.detect_gesture()
    self.action.execute(operation)

  def get_input(self):
    """
    Get input from camera and keyboard
    """
    self.get_key()
    _, img = self.camera.read()
    img = cv2.resize(img, (self.FRAME_WIDTH, self.FRAME_HEIGHT))
    if self.flip_camera:
      img = cv2.flip(img, 1)
    return img


  def get_key(self):
    """
    Read keyboard input
    """
    key = cv2.waitKey(self.filters.config["wait_between_frames"])
    if key == ord('+'):
      # Reduce program speed
      self.filters.config["wait_between_frames"] += 500
    if key == ord('-'):
      # Increase program speed
      if self.filters.config["wait_between_frames"] >= 500:
        self.filters.config["wait_between_frames"] -= 500
    #if key == ord('s'):
    # Save config
    #  self.filters.save()
    if key == ord('r'):
      # Reset all detectors
      self.detector.reset()
      self.action.reset()
    if key == ord('d'):
      # Make a screenshot
      self.output.make_screenshot()
    if key == ord('p') or key == ord(' '):
      # Pause
      cv2.waitKey()
    if key == ord('t'):
      # Test mode
      self.test_mode = not self.test_mode
    if key == ord('1'):
      self.output.toggle_estimate()
    if key == ord('2'):
      self.output.toggle_detectors()
    if key == ord('3'):
      self.output.toggle_skin()
    if key == ord('f'):
      self.toggle_filters()
    if key == 63235: # Right arrow
      self.skip_input(20)
    if key == 27 or key == ord('q'):
      # Abort program on ESC, q or space
      exit()

  def toggle_filters(self):
    """
    Load the next filter settings
    """
    self.filters_file = self.next_filters_file()
    current_config = self.filters_dir + self.filters_file
    self.filters.set_config(current_config)

  def next_filters_file(self):
    """
    Get the next filter settings
    """
    filters = listdir(self.filters_dir)
    for i, f in enumerate(filters):
      if f == self.filters_file:
        return filters[(i+1) % len(filters)]

  def skip_input(self, x=1):
    """
    Skip to a different part of a video sequence.
    """
    for i in range(0,x):
      self.camera.grab()
Esempio n. 51
0
            os.unlink(filename)


    # DETECTOR           DETECTOR              DETECTOR

    threads = {}

    for key in config['detectors']:
        cameraName = config['detectors'][key]['camera']

        if cameraName not in config['cameras']:
            raise KeyError('Failed to find "{0}" in camera listing for detection'.format(cameraName))

        camera = Camera.create(config, cameraName)
        detector = Detector.create(config, camera)
        threads['detector_'.format(cameraName)] = detector

        detector.FaceDetected += OnFaceDetection

        fps = config['detectors'][key]['fps']
        area = (config['detectors'][key]['area.x'], config['detectors'][key]['area.y'], config['detectors'][key]['area.h'], config['detectors'][key]['area.w'])
        print 'Creating detector for {0}'.format(cameraName)
        thread = threading.Thread(target=detector.faces, args=(fps, area))
        thread.daemon = True
        thread.start()

    for key in config['motion']:
        cameraName = config['motion'][key]['camera']

        if cameraName not in config['cameras']:
Esempio n. 52
0
def execute(inputfile, extractor_out, detector_out=None, sorter_out=None):
    detector = Detector(inputfile, outpath=detector_out)
    sorter = SlideSorter(sources.ListSource(detector.detect_slides()), outpath=sorter_out)
    extractor = ContentExtractor(sources.ListSource(sorter.sort()), output_dir=extractor_out)
    extractor.analyze()
weight_path = '../caffe_layers_value.pickle'
model_path = '../models/caltech256/model-2'
jpg_folder_path = "../img_test"

imgPath = [join(jpg_folder_path, f) for f in listdir(jpg_folder_path) if isfile(join(jpg_folder_path, f))]

# load the caltech model
f = open(model_label_path,"rb")
label_dict = pickle.load(f)
n_labels = len(label_dict)
batch_size = 1

images_tf = tf.placeholder( tf.float32, [None, 224, 224, 3], name="images")
labels_tf = tf.placeholder( tf.int64, [None], name='labels')

detector = Detector( weight_path, n_labels )
c1,c2,c3,c4,conv5, conv6, gap, output = detector.inference( images_tf )
classmap = detector.get_classmap( labels_tf, conv6 )

sess = tf.InteractiveSession()
saver = tf.train.Saver()

saver.restore( sess, model_path )


# Fast forward images & save them
for idx,imgP in enumerate(imgPath):
    img = Image.open(imgP)
    img = img.resize([224,224])
    img = np.array(img)
    img = img.reshape(1,224,224,3)
Esempio n. 54
0
E-mail      :   [email protected]

"""

from config import TEST_IMG
from matplotlib import image
from matplotlib import pyplot
import pylab
from time import time
from detector import Detector

start_time = time()

img = image.imread(TEST_IMG)

if len(img.shape) == 3:
    imgSingleChannel = img[:,:, 1]
else:
    imgSingleChannel = img

det = Detector()

rectangles = det.scanImgOverScale(imgSingleChannel)

end_time = time()

print "Number of rectangles: ", len(rectangles)
print "Cost time: ", end_time - start_time

det.showResult(img, rectangles)
 def __init__(self):
     super(Cropper, self).__init__()
     self.detector  = Detector()
Esempio n. 56
0
class Watcher(object):

    def __init__(self,
                 camera_id=None,
                 show_display=False,
                 width=FRAME_WIDTH,
                 height=FRAME_HEIGHT,
                 extra_time=EXTRA_TIME,
                 video_directory=VIDEO_DIR,
                 video_prefix=VIDEO_PREFIX,
                 do_compress=DO_COMPRESS):
        self.camera = Camera(camera_id, prop_set={width: width, height: height})
        self.total_pixels = height * width
        self.detector = Detector()
        self.display = None
        if show_display:
            self.display = Display((width, height))
        self.extra_time_delta = timedelta(seconds=extra_time)
        self.video_prefix = os.path.join(video_directory, video_prefix)
        self.do_compress = do_compress

    def get_frame(self):
        """
        Get a frame with the camera, transform to greyscale and convert to Numpy
        array.

        """
        now = datetime.now()
        image = self.camera.getImage().toGray()
        return VideoFrame(image=image, timestamp=now)

    def main_loop(self):
        frame_pre = self.get_frame()
        frame = self.get_frame()
        frame_post = self.get_frame()
        video_buffer = []
        record_until = datetime.now()
        while True:
            detect = self.detector.detect_motion(frame_pre, frame, frame_post)
            if detect['has_motion']:
                record_until = frame.timestamp + self.extra_time_delta

            if self.display:
                detect['difference_image'].save(self.display)

            if frame.timestamp < record_until:
                video_buffer.append(frame)
            else:
                if video_buffer:
                    buffer_fname = self.save_video(video_buffer)
                    if self.do_compress and buffer_fname:
                        proc = Process(target=self.compress_video,
                                       args=(buffer_fname,))
                        proc.start()
                video_buffer = []

            frame_pre = frame
            frame = frame_post
            frame_post = self.get_frame()

    def save_video(self, video_buffer):
        """Use avconv to process the video buffer."""
        buffer_fname = None
        if not video_buffer:
            return None
        start = video_buffer[0].timestamp
        end = video_buffer[-1].timestamp
        delta = (end - start).seconds
        if delta:
            fps = len(video_buffer) / delta
            print len(video_buffer), "frames in", delta, "seconds:", fps
            timestamp = video_buffer[0].timestamp.isoformat()
            timestamp = timestamp[:timestamp.rfind('.')]
            buffer_fname = '%s%s.avi' % (self.video_prefix, timestamp)
            video_streamer = VideoStream(fps=fps,
                                         filename=buffer_fname,
                                         framefill=False)
            map(video_streamer.writeFrame, [frame.image for frame in video_buffer])
            print "Saved video:", buffer_fname
        return buffer_fname


    def compress_video(self, input_fname):
        output_fname = input_fname + '.mp4'
        params = '-i %s -c:a copy -c:v libx264 -crf 23 -s:v 640x480 %s' % (
            input_fname, output_fname)
        call('avconv ' + params, shell=True)
        os.remove(input_fname)
Esempio n. 57
0
def main():
    main_parser = argparse.ArgumentParser(prog="cosmicpi", description="CosmicPi acquisition process", add_help=False)
    main_parser.add_argument("--config", help="Path to configuration file", default="/etc/cosmicpi.yaml")
    args, remaining_argv = main_parser.parse_known_args()

    # Merge the default config with the configuration file
    config = load_config(args.config)

    # Parse the command line for overrides
    parser = argparse.ArgumentParser(parents=[main_parser])
    parser.set_defaults(**config)

    parser.add_argument("-i", "--host",       **arg("broker.host",          "Message broker host"))
    parser.add_argument("-p", "--port",       **arg("broker.port",          "Message broker port", type=int))
    parser.add_argument("-a", "--username",   **arg("broker.username",      "Message broker username"))
    parser.add_argument("-b", "--password",   **arg("broker.password",      "Message broker password"))
    parser.add_argument("-n", "--no-publish", **arg("broker.enabled",       "Disable event publication"))
    parser.add_argument("-u", "--usb",        **arg("usb.device",           "USB device name"))
    parser.add_argument("-d", "--debug",      **arg("debug",                "Enable debug mode"))
    parser.add_argument("-o", "--log-config", **arg("logging.config",       "Path to logging configuration"))
    parser.add_argument("-l", "--no-log",     **arg("logging.enabled",      "Disable file logging"))
    parser.add_argument("-v", "--no-vib",     **arg("monitoring.vibration", "Disable vibration monitoring"))
    parser.add_argument("-w", "--no-weather", **arg("monitoring.weather",   "Disable weather monitoring"))
    parser.add_argument("-c", "--no-cosmics", **arg("monitoring.cosmics",   "Disable cosmic ray monitoring"))
    parser.add_argument("-k", "--patk",       **arg("patok",                "Server push notification token"))

    options = parser.parse_args()

    log_config = options.logging["config"]
    print ("INFO: using logging configuration from %s" % log_config)
    logging.config.fileConfig(log_config, disable_existing_loggers=False)
    console = logging.getLogger(__name__)

    if options.debug:
        print_config(options)

    try:
        publisher = EventPublisher(options)
    except:
        console.error("Exception: Can't connect to broker")
        sys.exit(1)

    try:
        usb = UsbHandler(options.usb['device'], 9600, 60)
        usb.open()
    except Exception as e:
        console.error("Exception: Can't open USB device: %s" % e)
        sys.exit(1)

    detector = Detector(usb, publisher, options)

    try:
        detector.start()
        command_handler = CommandHandler(detector, usb, options)
        command_handler.start()

        while True:
            time.sleep(1)

    except Exception as e:
        console.info("Exception: main: %s" % e)
        traceback.print_exc()

    finally:
        detector.stop()
        console.info("Quitting ...")
        time.sleep(1)
        usb.close()
        publisher.close()
        sys.exit(0)
Esempio n. 58
0
#! /usr/bin/env python
# Main near-duplicate detection "runner"
#
# Written by Parker Moore (pjm336)
# http://www.parkermoore.de

import operator, copy
from detector import Detector

if __name__ == "__main__":
    # run the program
    detector = Detector('./test')
    print "Checking for duplicates using NDD..."
    duplicates = detector.check_for_duplicates()
    if duplicates:
        print "Duplicates found (Jaccard coefficient > 0.5):"
        print duplicates
    
    print "Printing three nearest neighbors of the first 10 files..."
    filenames_of_first_ten = [detector.filename("file%02d.txt") % (f,) for f in xrange(10)]
    filenames_of_first_one_hundred = [detector.filename("file%02d.txt") % (f,) for f in xrange(100)]
    for index1, j in enumerate(filenames_of_first_ten):
        jaccard_coefficients = [0] * 100
        for index2, d in enumerate(filenames_of_first_one_hundred):
            if index1 != index2:
                jaccard_coefficients[index2] = detector.index.get_jaccard(j, d)
        three_nearest = []
        nearest_count = -1
        jcos = copy.deepcopy(jaccard_coefficients)
        while len(three_nearest) < 3:
            index,coefficient = max(enumerate(jcos), key=operator.itemgetter(1))
trainset = pd.DataFrame({'image_path': image_paths_train})
testset  = pd.DataFrame({'image_path': image_paths_test })

trainset['label_name'] = trainset['image_path'].map(lambda x: x.split('/')[-2])
testset['label_name'] = testset['image_path'].map(lambda x: x.split('/')[-2])

trainset['label'] = trainset['label_name'].map( label_dict )
testset['label'] = testset['label_name'].map( label_dict )

train_phase = tf.placeholder( tf.bool )
learning_rate = tf.placeholder( tf.float32, [])
images_tf = tf.placeholder( tf.float32, [None, 224, 224, 3], name="images")
labels_tf = tf.placeholder( tf.int64, [None], name='labels')

detector = Detector(weight_path, n_labels)

p1,p2,p3,p4,conv5, conv6, gap, output = detector.inference(images_tf)
loss_tf = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits( output, labels_tf ))

weights_only = filter( lambda x: x.name.endswith('W:0'), tf.trainable_variables() )
weight_decay = tf.reduce_sum(tf.pack([tf.nn.l2_loss(x) for x in weights_only])) * weight_decay_rate
loss_tf += weight_decay

sess = tf.InteractiveSession()
saver = tf.train.Saver( max_to_keep=50 )

#optimizer = tf.train.RMSPropOptimizer( learning_rate )
optimizer = tf.train.MomentumOptimizer( learning_rate, momentum )
grads_and_vars = optimizer.compute_gradients( loss_tf )
grads_and_vars = [(tf.clip_by_value(gv[0], -5., 5.), gv[1]) for gv in grads_and_vars]
Esempio n. 60
0
class Window(QWidget, WindowUI):

    SOURCE_FILE = 'File'
    SOURCE_CAMERA = 'Camera'

    DISPLAY_INPUT = 'Input'
    DISPLAY_PREPROCESSED = 'Pre-processed'

    SHAPE_RECT = 'Rectangle'
    SHAPE_ELLIPSE = 'Ellipse'

    FILL_NONE = 'None'
    FILL_OUTLINE = 'Outline'
    FILL_COLOR = 'Color'
    FILL_BLUR = 'Blur'
    FILL_IMAGE = 'Image'
    FILL_MASK = 'Mask'

    BG_INPUT = 'Input'
    BG_COLOR = 'Color'
    BG_TRANSPARENT = 'Transparent'
    BG_IMAGE = 'Image'

    __sourceModes = [SOURCE_FILE, SOURCE_CAMERA]
    __displayModes = [DISPLAY_INPUT, DISPLAY_PREPROCESSED]
    __shapeModes = [SHAPE_RECT, SHAPE_ELLIPSE]
    __fillModes = [FILL_NONE, FILL_OUTLINE, FILL_BLUR, FILL_IMAGE, FILL_MASK, FILL_COLOR]
    __bgModes = [BG_INPUT, BG_COLOR, BG_TRANSPARENT, BG_IMAGE]

    IMAGE_FILTERS = '*.jpg *.png *.jpeg *.bmp'
    VIDEO_FILTERS = '*.avi *.mp4 *.mov'
    MASK_PATH = 'other/mask.png'

    debugSignal = QtCore.pyqtSignal(object, int)

    def __init__(self, parent=None):
        super(Window, self).__init__(parent)

        self.detector = Detector()
        self.mediaThread = MediaThread(self)
        sys.stdout = common.EmittingStream(textWritten=self.normalOutputWritten)
        self.debugSignal.connect(self.debugTable)
        self.currentFrame = None
        self.bgColor = QColor(255, 255, 255)
        self.bgPath = ''

        self.classifiersParameters = {}

        self.setupUI()
        self.populateUI()
        self.connectUI()
        self.initUI()

    def __del__(self):
        sys.stdout = sys.__stdout__

    def keyPressEvent(self, e):
        if e.key() == QtCore.Qt.Key_Escape:
            self.close()

    def populateUI(self):
        self.availableObjectsList.addItems(Detector.getDefaultAvailableObjects())
        for sourceMode in self.__sourceModes:
            self.sourceCBox.addItem(sourceMode)
        for displayMode in self.__displayModes:
            self.displayCBox.addItem(displayMode)
        for shapeMode in self.__shapeModes:
            self.shapeCBox.addItem(shapeMode)
        for fillMode in self.__fillModes:
            self.fillCBox.addItem(fillMode)
        for bgMode in self.__bgModes:
            self.bgCBox.addItem(bgMode)
        model = QtGui.QStandardItemModel(self)
        func = lambda node, parent: self.populateTree(node, parent)
        Detector.getDefaultObjectsTree().map(model, func)
        self.objectsTree.setModel(model)

    def connectUI(self):
        self.hsplitter.splitterMoved.connect(self.splitterMoved)
        self.vsplitter.splitterMoved.connect(self.splitterMoved)
        self.showDetails.clicked[bool].connect(self.toggleDebugInfo)
        self.addButton.clicked.connect(self.addObject)
        self.removeButton.clicked.connect(self.removeObject)
        self.sourceCBox.currentIndexChanged.connect(self.togglePath)
        self.sourcePathButton.clicked.connect(self.loadMedia)
        self.playButton.clicked.connect(self.play)
        self.refreshButton.clicked.connect(self.refresh)
        self.nextFrameButton.clicked.connect(self.nextFrame)
        self.objectsTree.customSelectionChanged.connect(self.showClassifierParameters)
        self.colorPicker.clicked.connect(self.colorDialog)
        self.classifierName.textChanged.connect(self.updateClassifierParameters)
        self.scaleFactor.valueChanged.connect(self.updateScaleFactor)
        self.minNeighbors.valueChanged.connect(self.updateMinNeighbors)
        self.minWidth.valueChanged.connect(self.updateMinWidth)
        self.minHeight.valueChanged.connect(self.updateMinHeight)
        self.shapeCBox.currentIndexChanged.connect(self.updateShape)
        self.fillCBox.currentIndexChanged.connect(self.updateFill)
        self.autoNeighbors.clicked.connect(self.calcNeighbors)
        self.stabilize.stateChanged.connect(self.updateStabilize)
        self.tracking.stateChanged.connect(self.updateTracking)
        self.showName.stateChanged.connect(self.updateShowName)
        self.fillPath.clicked.connect(self.loadFillPath)
        self.bgColorPicker.clicked.connect(self.bgColorDialog)
        self.bgPathButton.clicked.connect(self.bgPathDialog)
        self.bgCBox.currentIndexChanged.connect(self.toggleBgParams)

    def initUI(self):
        self.showClassifierParameters(None, None)
        self.equalizeHist.setChecked(True)
        self.toggleFillPath()
        self.toggleBgParams(self.bgCBox.currentIndex())
        common.setPickerColor(self.bgColor, self.bgColorPicker)
        self.togglePlayButton(False)

    ########################################################
    # Utils

    def getIcon(self, color):
        """Returns a colored icon.
        """
        pixmap = QPixmap(14, 14)
        pixmap.fill(color)
        return QIcon(pixmap)

    def populateTree(self, node, parent):
        """Create a QTreeView node under 'parent'.
        """
        item = QtGui.QStandardItem(node)
        h, s, v = Detector.getDefaultHSVColor(node)
        color = QColor()
        color.setHsvF(h, s, v)
        item.setIcon(self.getIcon(color))
        # Unique hash for QStandardItem
        key = hash(item)
        while key in self.classifiersParameters:
            key += 1
        item.setData(key)
        cp = ClassifierParameters(item.data(), node, node, color,
                                  self.SHAPE_RECT, self.FILL_OUTLINE)
        self.classifiersParameters[key] = cp
        parent.appendRow(item)
        return item

    def getMask(self, pixmap, x, y, w, h, shape, overlay,
                bg=QtCore.Qt.transparent, fg=QtCore.Qt.black, progressive=False):
        """Create a shape mask with the same size of pixmap.
        """
        mask = QPixmap(pixmap.width(), pixmap.height())
        mask.fill(bg)
        path = QtGui.QPainterPath()

        if progressive:
            progressive = QPixmap(pixmap.width(), pixmap.height())
            progressive.fill(QtCore.Qt.transparent)
            progressiveMask = QPixmap(self.MASK_PATH)
            progressiveMask = progressiveMask.scaled(w, h, QtCore.Qt.IgnoreAspectRatio)
            progressivePainter = QtGui.QPainter(progressive)
            progressivePainter.drawPixmap(x, y, progressiveMask)
            del progressivePainter
            fg = QtGui.QBrush(progressive)

        painter = QtGui.QPainter(mask)
        if shape == self.SHAPE_ELLIPSE:
            path.addEllipse(x, y, w, h)
            painter.fillPath(path, fg)
        elif shape == self.SHAPE_RECT:
            painter.fillRect(x, y, w, h, fg)

        painter.setCompositionMode(QtGui.QPainter.CompositionMode_SourceAtop)
        if overlay:
            painter.drawPixmap(0, 0, overlay)
        return mask

    def drawTracking(self, painter, tracking, scale):
        """Draw lines between each position in tracking list.
        """
        if not tracking:
            return
        hashTable = {}
        # Group tracks by item hash
        for track in tracking:
            for rect, hash in track:
                x, y, w, h = common.scaleRect(rect, scale)
                x, y = x + w / 2, y + h / 2
                if hash in hashTable:
                    hashTable[hash].append((x, y))
                else:
                    hashTable[hash] = [(x, y)]
        # Draw
        for hash, points in hashTable.iteritems():
            prev = None
            for point in points:
                if prev:
                    x1, y1 = point
                    x2, y2 = prev
                    painter.drawLine(x1, y1, x2, y2)
                prev = point

    def drawBackground(self, pixmap):
        """Draw background in pixmap.
        """
        w, h = pixmap.width(), pixmap.height()
        mode = self.__bgModes[self.bgCBox.currentIndex()]
        source = QPixmap(pixmap)
        painter = QtGui.QPainter(pixmap)
        if mode == self.BG_COLOR:
            painter.fillRect(0, 0, w, h, self.bgColor)
        if mode == self.BG_TRANSPARENT or mode == self.BG_IMAGE or mode == self.BG_INPUT:
            painter.drawPixmap(0, 0, common.checkerboard(pixmap.size()))
        if mode == self.BG_IMAGE and self.bgPath:
            bgPixmap = QPixmap(self.bgPath)
            if not bgPixmap.isNull():
                bgPixmap = bgPixmap.scaled(w, h, QtCore.Qt.IgnoreAspectRatio)
                painter.drawPixmap(0, 0, bgPixmap)
        if mode == self.BG_INPUT:
            painter.drawPixmap(0, 0, source)

    def fillMask(self, pixmap, painter, roi, param, source):
        """Draw mask in roi.
        """
        x, y, w, h = roi
        masked = self.getMask(pixmap, x, y, w, h, param.shape,
                              source, progressive=True)
        painter.drawPixmap(0, 0, masked)

    def fillBlur(self, pixmap, painter, roi, param, source):
        """Draw blur in roi.
        """
        x, y, w, h = roi
        blurred = self.getMask(pixmap, x, y, w, h, param.shape,
                               common.blurPixmap(pixmap, 20))
        painter.drawPixmap(0, 0, blurred)


    def fillImage(self, pixmap, painter, roi, param, source):
        """Draw image in roi.
        """
        x, y, w, h = roi
        fillPixmap = QPixmap(param.fillPath)
        if not fillPixmap.isNull():
            fillPixmap = fillPixmap.scaled(w, h, QtCore.Qt.IgnoreAspectRatio)
            mask = self.getMask(pixmap, x, y, w, h, param.shape, None,
                                QtCore.Qt.white, QtCore.Qt.black)
            painter.setClipRegion(QtGui.QRegion(QtGui.QBitmap(mask)))
            painter.drawPixmap(x, y, fillPixmap)
            painter.setClipping(False)

    def fillColor(self, pixmap, painter, roi, param, source):
        """Draw color in roi.
        """
        x, y, w, h = roi
        if param.shape == self.SHAPE_ELLIPSE:
            path = QtGui.QPainterPath()
            path.addEllipse(x, y, w, h)
            painter.fillPath(path, param.color)
        elif param.shape == self.SHAPE_RECT:
            painter.fillRect(x, y, w, h, param.color)

    def fillOutline(self, pixmap, painter, roi, param, source):
        """Draw outline in roi.
        """
        x, y, w, h = roi
        drawFunc = painter.drawRect
        if param.shape == self.SHAPE_ELLIPSE:
            drawFunc = painter.drawEllipse
        drawFunc(x, y, w, h)

    def drawRects(self, source, rectsTree, scale):
        """Draw rectangles in 'rectsTree' on 'source'.
        """
        pixmap = QPixmap(source)
        self.drawBackground(pixmap)

        def drawRect(node, parentHash):
            painter = QtGui.QPainter(pixmap)
            roi, param, tracking = node.data
            x, y, w, h = common.scaleRect(roi, scale)
            painter.setPen(param.color)
            funcTable = {
                self.FILL_MASK: self.fillMask,
                self.FILL_BLUR: self.fillBlur,
                self.FILL_IMAGE: self.fillImage,
                self.FILL_COLOR: self.fillColor,
                self.FILL_OUTLINE: self.fillOutline
                         }
            for fill, func in funcTable.iteritems():
                if param.fill == fill:
                    func(pixmap, painter, (x, y, w, h), param, source)
            if param.tracking:
                self.drawTracking(painter, tracking, scale)
            if param.showName:
                painter.drawText(x, y, param.name)
            return param.hash

        h, w = pixmap.height(), pixmap.width()
        rectsTree.map(None, drawRect)
        painter = QtGui.QPainter(source)
        painter.drawPixmap(0, 0, pixmap)

    def debugTable(self, args, append=False):
        """Display debug info into table.
        """
        rows = len(args)
        if not append:
            self.debugCursor = self.debugText.textCursor()
            format = QtGui.QTextTableFormat()
            constraints = [QtGui.QTextLength(QtGui.QTextLength.FixedLength,
                                             size) for arg, size in args]
            format.setColumnWidthConstraints(constraints)
            format.setBorder(0)
            format.setCellPadding(2)
            format.setCellSpacing(0)
            self.debugCursor.insertTable(1, rows, format)
        scroll = self.debugText.verticalScrollBar()
        for arg, size in args:
            if arg:
                self.debugCursor.insertText(arg)
                self.debugCursor.movePosition(QtGui.QTextCursor.NextCell)
                scroll.setSliderPosition(scroll.maximum())

    def debugEmitter(self, args, append=False):
        """Debug signal to allow threaded debug infos.
        """
        self.debugSignal.emit(args, append)

    def detect(self, img, autoNeighbors=False, autoNeighborsParam=None):
        """Detect objects in img.
        """
        self.currentFrame = img
        item = None
        indexes = self.objectsTree.selectedIndexes()
        if autoNeighbors and indexes:
            item = self.objectsTree.model().itemFromIndex(indexes[0])
        # Detect on full size image
        tree, extracted = common.getObjectsTree(self.objectsTree,
                                                self.classifiersParameters,
                                                indexes, item)
        equalizeHist = self.equalizeHist.isChecked()
        rectsTree = self.detector.detect(img, tree, equalizeHist,
                                         self.debugEmitter, extracted, autoNeighborsParam)
        return rectsTree

    def displayImage(self, img):
        """Display numpy 'img' in 'mediaLabel'.
        """
        rectsTree = self.detect(img)
        displayMode = self.__displayModes[self.displayCBox.currentIndex()]
        if displayMode == self.DISPLAY_PREPROCESSED:
            img = self.detector.preprocessed
        pixmap = common.np2Qt(img)
        w = float(pixmap.width())
        # Scale image
        pixmap = common.fitImageToScreen(pixmap)
        scaleFactor = pixmap.width() / w
        # Draw scaled rectangles
        self.drawRects(pixmap, rectsTree, scaleFactor)
        self.mediaLabel.setPixmap(pixmap)
        self.mediaLabel.setFixedSize(pixmap.size())

    def displayMedia(self, path):
        """Load and display media.
        """
        if self.mediaThread.isRunning():
            self.mediaThread.stop()
            self.mediaThread.wait()
        sourceMode = self.__sourceModes[self.sourceCBox.currentIndex()]
        self.mediaThread.start()
        self.togglePlayButton(True)

    def getSourceMode(self):
        """Return the current source mode.
        """
        return self.__sourceModes[self.sourceCBox.currentIndex()]

    def getCurrentClassifierParameters(self):
        """Return current classifier parameters.
        """
        index = self.objectsTree.currentIndex()
        item = index.model().itemFromIndex(index)
        param = self.classifiersParameters[item.data()]
        return item, param

    def toggleFillPath(self):
        """Display fill path dialog button if needed.
        """
        index = self.fillCBox.currentIndex()
        fillMode = self.__fillModes[index]
        if fillMode == self.FILL_IMAGE:
            self.fillPath.show()
        else:
            self.fillPath.hide()

    def togglePlayButton(self, play=True):
        """Switch between play and pause icons.
        """
        if play is True:
            self.playButton.setIcon(QIcon('assets/pause.png'))
        else:
            self.playButton.setIcon(QIcon('assets/play.png'))

    ########################################################
    # Signals handlers

    def normalOutputWritten(self, text):
        """Append text to debug infos.
        """
        cursor = self.debugText.textCursor()
        cursor.movePosition(QtGui.QTextCursor.End)
        cursor.insertText(text)
        self.debugText.setTextCursor(cursor)
        QApplication.processEvents()

    def play(self):
        """Play current media.
        """
        if self.mediaThread.isRunning():
            self.mediaThread.stop()
            self.mediaThread.wait()
        else:
            self.displayMedia(self.sourcePath.text())

    def refresh(self):
        """Refresh current media.
        """
        if self.mediaThread.isRunning():
            self.mediaThread.stop()
        self.mediaThread.clean()
        self.displayMedia(self.sourcePath.text())

    def nextFrame(self):
        """Go to next frame of current media.
        """
        self.mediaThread.setNextFrameMode(True)
        self.displayMedia(self.sourcePath.text())

    def loadFillPath(self):
        """Display file dialog to choose fill image.
        """
        filters = self.tr('Image (' + self.IMAGE_FILTERS + ')')
        path, filters = QFileDialog.getOpenFileName(self, self.tr('Open file'),
                                                    '.', filters)
        if path:
            item, param = self.getCurrentClassifierParameters()
            param.fillPath = path

    def loadMedia(self):
        """Load image or video.
        """
        filters = ""
        filters = self.tr('Image (' + self.IMAGE_FILTERS + self.VIDEO_FILTERS + ')')
        path, filters = QFileDialog.getOpenFileName(self, self.tr('Open file'), '.',
                                                    filters)
        if path:
            self.sourcePath.setText(path)
            self.mediaThread.stop()
            self.mediaThread.clean()
            self.displayMedia(path)

    def togglePath(self, index):
        """Hide path for camera mode.
        """
        if self.__sourceModes[index] == self.SOURCE_CAMERA:
            self.sourcePath.hide()
            self.sourcePathButton.hide()
            self.displayMedia(self.sourcePath.text())
        else:
            self.sourcePath.show()
            self.sourcePathButton.show()

    def colorDialog(self):
        """Open color dialog to pick a color.
        """
        color = QColorDialog.getColor()

        if color.isValid():
            common.setPickerColor(color, self.colorPicker)
            self.updateClassifierParameters(color=color)

    def bgColorDialog(self):
        """Open color dialog to pick a color.
        """
        color = QColorDialog.getColor()
        if color.isValid():
            self.bgColor = color
            common.setPickerColor(color, self.bgColorPicker)

    def bgPathDialog(self):
        """Open file dialog to select background image.
        """
        filters = self.tr('Image (' + self.IMAGE_FILTERS + ')')
        path, filters = QFileDialog.getOpenFileName(self, self.tr('Open file'),
                                                    '.', filters)
        if path:
            self.bgPath = path

    def toggleBgParams(self, index):
        """Hide / Show color picker and button path for background.
        """
        mode = self.__bgModes[index]
        if mode == self.BG_COLOR:
            self.bgColorPicker.show()
        else:
            self.bgColorPicker.hide()
        if mode == self.BG_IMAGE:
            self.bgPathButton.show()
        else:
            self.bgPathButton.hide()

    def calcNeighbors(self):
        """Automatically calculate minimum neighbors.
        """
        running = False
        if self.mediaThread.isRunning():
            running = True
            self.mediaThread.stop()
            self.mediaThread.wait()
        self.detect(self.currentFrame, autoNeighbors=True,
                    autoNeighborsParam=self.autoNeighborsParam.value())
        self.showClassifierParameters(None, None)
        if running:
            self.displayMedia(self.sourcePath.text())
        else:
            self.displayImage(self.currentFrame)

    def updateClassifierParameters(self, name=None, color=None):
        """Update classifier parameters.
        """
        item, param = self.getCurrentClassifierParameters()
        if name:
            item.setText(name)
            param.name = name
        if color:
            item.setIcon(self.getIcon(color))
            param.color = color

    def updateStabilize(self, checked):
        """Update stabilize classifier parameter.
        """
        running = False
        if self.mediaThread.isRunning():
            running = True
            self.mediaThread.stop()
            self.mediaThread.wait()
        item, param = self.getCurrentClassifierParameters()
        param.stabilize = checked
        self.detect(self.currentFrame)
        if running:
            self.displayMedia(self.sourcePath.text())
        else:
            self.displayImage(self.currentFrame)

    def updateTracking(self, checked):
        """Update tracking classifier parameter.
        """
        item, param = self.getCurrentClassifierParameters()
        param.tracking = checked

    def updateScaleFactor(self, value):
        """Update scale factor classifier parameter.
        """
        item, param = self.getCurrentClassifierParameters()
        param.scaleFactor = value

    def updateShape(self, index):
        """Update shape classifier parameter.
        """
        item, param = self.getCurrentClassifierParameters()
        param.shape = self.__shapeModes[index]

    def updateFill(self, index):
        """Update fill classifier parameter.
        """
        item, param = self.getCurrentClassifierParameters()
        param.fill = self.__fillModes[index]
        self.toggleFillPath()

    def updateShowName(self, checked):
        """Update show name classifier parameter.
        """
        item, param = self.getCurrentClassifierParameters()
        param.showName = checked

    def updateMinNeighbors(self, value):
        """Update min neighbors classifier parameter.
        """
        item, param = self.getCurrentClassifierParameters()
        param.minNeighbors = value

    def updateMinWidth(self, value):
        """Update minimum width classifier parameter.
        """
        item, param = self.getCurrentClassifierParameters()
        w, h = param.minSize
        param.minSize = (value, h)

    def updateMinHeight(self, value):
        """Update minimum height classifier parameter.
        """
        item, param = self.getCurrentClassifierParameters()
        w, h = param.minSize
        param.minSize = (w, value)

    def showClassifierParameters(self, selected, deselected):
        """Show the selected classifier parameters.
        """
        selectedCount = len(self.objectsTree.selectedIndexes())
        if selectedCount == 1:
            self.displayBox.setEnabled(True)
            self.parametersBox.setEnabled(True)
            index = self.objectsTree.currentIndex()
            item = index.model().itemFromIndex(index)
            param = self.classifiersParameters[item.data()]
            self.classifierName.setText(param.name)
            common.setPickerColor(param.color, self.colorPicker)
            self.classifierType.setText('(' + param.classifier + ')')
            self.scaleFactor.setValue(param.scaleFactor)
            self.minNeighbors.setValue(param.minNeighbors)
            w, h = param.minSize
            self.minWidth.setValue(w)
            self.minHeight.setValue(h)
            self.shapeCBox.setCurrentIndex(self.__shapeModes.index(param.shape))
            self.fillCBox.setCurrentIndex(self.__fillModes.index(param.fill))
            self.stabilize.setChecked(param.stabilize)
            self.showName.setChecked(param.showName)
            self.tracking.setChecked(param.tracking)
        else:
            self.displayBox.setEnabled(False)
            self.parametersBox.setEnabled(False)

    def addObject(self):
        """Add object to detect.
        """
        selected = self.availableObjectsList.selectedItems()
        for item in selected:
            row = (self.availableObjectsList.row(item) + 1) % self.availableObjectsList.count()
            self.availableObjectsList.setCurrentRow(row)
            node = item.text()
            self.populateTree(node, self.objectsTree.model())

    def removeObject(self):
        """Remove object to detect.
        """
        selected = self.objectsTree.selectedIndexes()
        while selected and selected[0].model():
            index = selected[0]
            item = index.model().itemFromIndex(index)
            del self.classifiersParameters[item.data()]
            if item.parent():
                index.model().removeRow(item.row(), item.parent().index())
            else:
                index.model().removeRow(item.row())
            selected = self.objectsTree.selectedIndexes()

    def splitterMoved(self, pos, index):
        """Avoid segfault when QListWidget has focus and
        is going to be collapsed.
        """
        focusedWidget = QApplication.focusWidget()
        if focusedWidget:
            focusedWidget.clearFocus()

    def toggleDebugInfo(self, pressed):
        """Toggle debug infos widget.
        """
        if pressed:
            self.debugText.show()
            self.showDetails.setText(self.tr('Details <<<'))
        else:
            self.debugText.hide()
            self.showDetails.setText(self.tr('Details >>>'))