Exemplo n.º 1
0
    def __init__(self):
        super(Detector, self).__init__()
        self.dim1Lower = 40
        self.dim1Upper = 100
        self.dim2Lower = 50
        self.dim2Upper = 200
        self.numObjects = -1

        trackerArgs = {
            'upperBound': 9999,
            'lowerBound': -9999,
            'rightBound': 9999,
            'leftBound': -9999,
            'timeToDie': 0,
            'timeToLive': 0,
        }
        self.tracker = Tracker(**trackerArgs)

        self.clock = 1
        self.guiMode = False
        self.averageColour = [0, 0, 0]
        self.averageSize = 0

        self.beltXmin = 0
        self.beltXmax = 500
Exemplo n.º 2
0
    def __init__(self, env, n_people, rng, x_range, y_range, start_time, init_percent_sick, Human):
        self.env = env
        self.rng = rng
        self.x_range = x_range
        self.y_range = y_range
        self.total_area = (x_range[1] - x_range[0]) * (y_range[1] - y_range[0])
        self.n_people = n_people
        self.start_time = start_time
        self.init_percent_sick = init_percent_sick
        self.last_date_to_check_tests = self.env.timestamp.date()
        self.test_count_today = defaultdict(int)
        self.test_type_preference = list(zip(*sorted(TEST_TYPES.items(), key=lambda x:x[1]['preference'])))[0]
        print("Initializing locations ...")
        self.initialize_locations()

        self.humans = []
        self.households = OrderedSet()
        print("Initializing humans ...")
        self.initialize_humans(Human)

        self.log_static_info()

        print("Computing their preferences")
        self._compute_preferences()
        self.tracker = Tracker(env, self)
        self.tracker.track_initialized_covid_params(self.humans)
Exemplo n.º 3
0
    def __init__(self, gps=False, servo_port=SERVO_PORT):
        # devices
        self._gps = gps
        self.windsensor = WindSensor(I2C(WINDSENSOR_I2C_ADDRESS))
        self.compass = Compass(I2C(COMPASS_I2C_ADDRESS),
                               I2C(ACCELEROMETER_I2C_ADDRESS))
        self.red_led = GpioWriter(17, os)
        self.green_led = GpioWriter(18, os)

        # Navigation
        self.globe = Globe()
        self.timer = Timer()
        self.application_logger = self._rotating_logger(APPLICATION_NAME)
        self.position_logger = self._rotating_logger("position")
        self.exchange = Exchange(self.application_logger)
        self.timeshift = TimeShift(self.exchange, self.timer.time)
        self.event_source = EventSource(self.exchange, self.timer,
                                        self.application_logger,
                                        CONFIG['event source'])

        self.sensors = Sensors(self.gps, self.windsensor, self.compass,
                               self.timer.time, self.exchange,
                               self.position_logger, CONFIG['sensors'])
        self.gps_console_writer = GpsConsoleWriter(self.gps)
        self.rudder_servo = Servo(serial.Serial(servo_port),
                                  RUDDER_SERVO_CHANNEL, RUDDER_MIN_PULSE,
                                  RUDDER_MIN_ANGLE, RUDDER_MAX_PULSE,
                                  RUDDER_MAX_ANGLE)
        self.steerer = Steerer(self.rudder_servo, self.application_logger,
                               CONFIG['steerer'])
        self.helm = Helm(self.exchange, self.sensors, self.steerer,
                         self.application_logger, CONFIG['helm'])
        self.course_steerer = CourseSteerer(self.sensors, self.helm,
                                            self.timer,
                                            CONFIG['course steerer'])
        self.navigator = Navigator(self.sensors, self.globe, self.exchange,
                                   self.application_logger,
                                   CONFIG['navigator'])
        self.self_test = SelfTest(self.red_led, self.green_led, self.timer,
                                  self.rudder_servo, RUDDER_MIN_ANGLE,
                                  RUDDER_MAX_ANGLE)

        # Tracking
        self.tracking_logger = self._rotating_logger("track")
        self.tracking_sensors = Sensors(self.gps, self.windsensor,
                                        self.compass, self.timer.time,
                                        self.exchange, self.tracking_logger,
                                        CONFIG['sensors'])
        self.tracker = Tracker(self.tracking_logger, self.tracking_sensors,
                               self.timer)
Exemplo n.º 4
0
    def __init__(self, ):
        super(Detector, self).__init__()

        trackerArgs = {
            'upperBound': 9999,
            'lowerBound': -9999,
            'rightBound': 9999,
            'leftBound': -9999,
            'timeToDie': 1,
            'timeToLive': 1,
        }

        self.tracker = Tracker(**trackerArgs)
        self.sizeLower = 0
        self.sizeUpper = 0
Exemplo n.º 5
0
    def __init__(self, ):
        super(Detector,
              self).__init__(pathToCached='~/Samples/cached/Line30',
                             pathToSamples='~/Samples/samples/Line30')

        trackerArgs = {
            'upperBound': 200,
            'lowerBound': 70,
            'rightBound': 9999,
            'leftBound': -9999,
            'timeToDie': 5,
            'timeToLive': 3,
        }
        self.tracker = Tracker(**trackerArgs)
        self.sizeLower = 0
        self.sizeUpper = 0
Exemplo n.º 6
0
    def __init__(self,gps=False,servo_port=SERVO_PORT):
        # devices
        self._gps = gps
        self.windsensor = WindSensor(I2C(WINDSENSOR_I2C_ADDRESS))
        self.compass = Compass(I2C(COMPASS_I2C_ADDRESS),I2C(ACCELEROMETER_I2C_ADDRESS))
        self.red_led = GpioWriter(17,os)
        self.green_led = GpioWriter(18,os)

        # Navigation
        self.globe = Globe()
        self.timer = Timer()
        self.application_logger = self._rotating_logger(APPLICATION_NAME)
        self.position_logger = self._rotating_logger("position")
        self.exchange = Exchange(self.application_logger)
        self.timeshift = TimeShift(self.exchange,self.timer.time)
        self.event_source = EventSource(self.exchange,self.timer,self.application_logger,CONFIG['event source'])

        self.sensors = Sensors(self.gps,self.windsensor,self.compass,self.timer.time,self.exchange,self.position_logger,CONFIG['sensors'])
        self.gps_console_writer = GpsConsoleWriter(self.gps)
        self.rudder_servo = Servo(serial.Serial(servo_port),RUDDER_SERVO_CHANNEL,RUDDER_MIN_PULSE,RUDDER_MIN_ANGLE,RUDDER_MAX_PULSE,RUDDER_MAX_ANGLE)
        self.steerer = Steerer(self.rudder_servo,self.application_logger,CONFIG['steerer'])
        self.helm = Helm(self.exchange,self.sensors,self.steerer,self.application_logger,CONFIG['helm'])
        self.course_steerer = CourseSteerer(self.sensors,self.helm,self.timer,CONFIG['course steerer'])
        self.navigator = Navigator(self.sensors,self.globe,self.exchange,self.application_logger,CONFIG['navigator'])
        self.self_test = SelfTest(self.red_led,self.green_led,self.timer,self.rudder_servo,RUDDER_MIN_ANGLE,RUDDER_MAX_ANGLE)

        # Tracking
        self.tracking_logger = self._rotating_logger("track")
        self.tracking_sensors = Sensors(self.gps,self.windsensor,self.compass,self.timer.time,self.exchange,self.tracking_logger,CONFIG['sensors'])
        self.tracker = Tracker(self.tracking_logger,self.tracking_sensors,self.timer)
Exemplo n.º 7
0
    def __init__(self, cfg):
        self.cfg_ = self.build_cfg(cfg)
        self.detector_ = cv2.FastFeatureDetector_create(threshold=19,
                                                        nonmaxSuppression=True)
        self.extractor_ = cv2.ORB_create(2048, edgeThreshold=19)
        #self.extractor_ = cv2.xfeatures2d.SURF_create()
        self.matcher_ = Matcher(ex=self.extractor_)
        self.tracker_ = Tracker(pLK=cfg['pLK'])
        self.kf_ = build_ekf()
        self.db_ = self.build_db()
        self.state_ = PipelineState.INIT

        # higher-level handles?
        self.initializer_ = MapInitializer(db=self.build_db(),
                                           matcher=self.matcher_,
                                           tracker=self.tracker_,
                                           cfg=self.cfg_)
Exemplo n.º 8
0
    def test_should_call_update_stats(self):
        stub_callback = StubTimer()
        sensors = Mock()

        Tracker(Mock(), sensors, stub_callback).track(300)
        stub_callback.signal_time_elapsed()

        sensors.update_averages.assert_called_once_with(None)
Exemplo n.º 9
0
    def __init__(self, dim1Lower, dim1Upper, dim2Lower, dim2Upper, name,
                 initialRoi, **kwargs):
        self.dim1Lower = dim1Lower
        self.dim1Upper = dim1Upper
        self.dim2Lower = dim2Lower
        self.dim2Upper = dim2Upper
        self.transformer = eval('Transformer(name)')
        self.detect = eval('self.' + name)
        self.detectDebug = eval('self.' + name + 'Debug')
        self.numObjects = -1
        self.tracker = Tracker(**kwargs)

        self.counter = 10000
        self.roiX1, self.roiY1, self.roiX2, self.roiY2 = initialRoi
        self.guiMode = False
        self.averageColour = [0, 0, 0]
        self.averageSize = 0
        self.dimTracking = kwargs.get("dimensionTracking")
        self.colourTracking = kwargs.get("colourTracking")
Exemplo n.º 10
0
    def test_should_log_current_position_callback_every_time_callback_fires(
            self):
        stub_callback = StubTimer()
        sensors = Mock()

        Tracker(Mock(), sensors, stub_callback).track(300)
        stub_callback.signal_time_elapsed()
        stub_callback.signal_time_elapsed()
        stub_callback.signal_time_elapsed()

        self.assertEqual(sensors.log_values.call_count, 3)
Exemplo n.º 11
0
    def test_should_log_welcome_message_and_column_headers(self):
        now = datetime.datetime.now()
        mock_logger = Mock()
        mock_sensors = Mock()

        Tracker(mock_logger, mock_sensors, StubTimer()).track(300)

        mock_logger.info.assert_has_calls([
            call('Pi-Nav starting tracking ' + now.strftime("%Y-%m-%d")),
            call(
                'latitude, longitute, +-lat, +-long, speed, track, +-speed, +-track, |, wind, avg wind, abs wind, |, comp, avg comp'
            )
        ])
Exemplo n.º 12
0
    def __init__(self):
        super(Detector,
              self).__init__(pathToCached='~/Samples/cached/Line11',
                             pathToSamples='~/Samples/samples/Line11')
        # self.sizeLower = 40
        # self.sizeUpper = 150
        self.sizeLower = 60
        self.sizeUpper = 200

        trackerArgs = {
            'upperBound': 300,
            'lowerBound': 220,
            'rightBound': 270,
            'leftBound': 30,
            'timeToDie': 1,
            'timeToLive': 0,
        }

        self.tracker = Tracker(**trackerArgs)

        self.guiMode = False
        self.averageColour = [0, 0, 0]
        self.averageSize = 0
Exemplo n.º 13
0
    def __init__(self):
        self.globe = Globe()
        self.console_logger = self._console_logger()
        self.exchange = Exchange(self.console_logger)
        self.gps = SimulatedGPS(CHORLTON.position,0,0.1)
        self.vehicle = SimulatedVehicle(self.gps, self.globe,self.console_logger,single_step=False)
        self.timeshift = TimeShift(self.exchange,self.vehicle.timer.time)
        self.event_source = EventSource(self.exchange,self.vehicle.timer,self.console_logger,CONFIG['event source'])
        self.sensors = Sensors(self.vehicle.gps, self.vehicle.windsensor,self.vehicle.compass,self.vehicle.timer.time,self.exchange,self.console_logger,CONFIG['sensors'])
        self.steerer = Steerer(self.vehicle.rudder,self.console_logger,CONFIG['steerer'])
        self.helm = Helm(self.exchange, self.sensors, self.steerer, self.console_logger, CONFIG['helm'])
        self.course_steerer = CourseSteerer(self.sensors,self.helm,self.vehicle.timer, CONFIG['course steerer'])
        self.navigator_simulator = Navigator(self.sensors,self.globe,self.exchange,self.console_logger,CONFIG['navigator'])

        self.tracking_timer = Timer()
        self.tracker_simulator = Tracker(self.console_logger,self.sensors,self.tracking_timer)
Exemplo n.º 14
0
    def __init__(self, env, n_people, rng, x_range, y_range, start_time,
                 init_percent_sick, Human, sim_days):
        self.env = env
        self.rng = rng
        self.x_range = x_range
        self.y_range = y_range
        self.total_area = (x_range[1] - x_range[0]) * (y_range[1] - y_range[0])
        self.n_people = n_people
        self.start_time = start_time
        self.init_percent_sick = init_percent_sick
        self.sim_days = sim_days
        print("Initializing locations ...")
        self.initialize_locations()

        self.humans = []
        self.households = OrderedSet()
        print("Initializing humans ...")
        self.initialize_humans(Human)

        print("Computing their preferences")
        self._compute_preferences()
        self.tracker = Tracker(env, self)
Exemplo n.º 15
0
def all_overlaps():
    '''runs overlap test on all JSON test data'''
    test_files = []
    fails = 0
    for root, dirs, files in os.walk(TEST_CONF['videoPath']):
        for f in files:
            if f.endswith('.json'):
                test_files.append(os.path.join(root, f))

    for f in test_files:
        video_file = f[:-4] + 'mp4'
        expected_data = json.load(open(f, 'r'))
        actual_data = Tracker(video_file, 853, 480).analyse()
        overlap_pcts = test.generate_frame_overlaps(actual_data, expected_data)
        avg = round((sum(overlap_pcts) / len(overlap_pcts)), 1)
        name = '/'.join(f[:-5].split('/')[-2:])
        if (avg < 50):
            fails += 1
            linestart = RED + CROSS
        else:
            linestart = GREEN + TICK
        print linestart + ' ' + str(avg) + '%' + ' ' + name + ENDC

    sys.exit(fails)
Exemplo n.º 16
0
class PAYGService(object):
    SERVICE_NAME = 'com.victronenergy.paygo'

    def __init__(self, conn):
        self.conn = conn
        self.tracker = Tracker()

    def service_available(self):
        payg_enabled = self.tracker.query(self.conn, self.SERVICE_NAME,
                                          "/Status/PaygoEnabled")
        if payg_enabled is None:
            return False
        else:
            return True

    def is_active(self):
        is_active = self.tracker.query(self.conn, self.SERVICE_NAME,
                                       "/Status/CurrentlyActive")
        if is_active:
            return True
        else:
            return False

    def is_payg_enabled(self):
        payg_enabled = self.tracker.query(self.conn, self.SERVICE_NAME,
                                          "/Status/PaygoEnabled")
        if payg_enabled is not None:
            return payg_enabled
        else:
            return True

    def token_entry_allowed(self):
        if not self._get_blocked_until_date():
            return True
        if datetime.now() >= self._get_blocked_until_date():
            return True
        else:
            return False

    def get_minutes_of_token_block(self):
        if not self._get_blocked_until_date():
            return 0
        td = self._get_blocked_until_date() - datetime.now()
        days_left = td.days
        minutes_left = int(round(float(td.seconds) / 60, 0))
        return minutes_left + (days_left * 60 * 24)

    def get_number_of_days_and_hours_left(self):
        if not self._get_expiration_date():
            return 0, 0
        td = self._get_expiration_date() - datetime.now()
        days_left = td.days
        hours_left = int(round(float(td.seconds) / 3600, 0))
        if hours_left == 24:
            days_left += 1
            hours_left = 0
        return days_left, hours_left

    def update_device_status_if_code_valid(self, token):
        self._dbus_write(self.SERVICE_NAME, "/Tokens/SetToken", token)
        token_valid = self.tracker.query(self.conn, self.SERVICE_NAME,
                                         "/Tokens/LastTokenValid")
        return token_valid

    def update_lvd_value(self, new_lvd_volts):
        self._dbus_write(self.SERVICE_NAME, "/LVD/Threshold", new_lvd_volts)
        return True

    def get_lvd_value(self):
        lvd_value = self.tracker.query(self.conn, self.SERVICE_NAME,
                                       "/LVD/Threshold")
        if lvd_value is not None:
            return lvd_value
        return None

    def _get_expiration_date(self):
        expiration_date = self.tracker.query(self.conn, self.SERVICE_NAME,
                                             "/Status/ActiveUntilDate")
        if expiration_date is not None:
            return self._datetime_from_unix_timestamp(expiration_date)
        return None

    def _get_blocked_until_date(self):
        blocked_until_date = self.tracker.query(
            self.conn, self.SERVICE_NAME, "/Tokens/EntryBlockedUntilDate")
        if blocked_until_date is not None:
            return self._datetime_from_unix_timestamp(blocked_until_date)
        return None

    def _dbus_write(self, service_name, path, value):
        return self.conn.call_blocking(service_name, path, None, "SetValue",
                                       's', [str(value)])

    def _datetime_from_unix_timestamp(self, timestamp):
        return datetime(1970, 1, 1) + timedelta(seconds=timestamp)
Exemplo n.º 17
0
class Pipeline(object):
    def __init__(self, cfg):
        self.cfg_ = self.build_cfg(cfg)
        self.detector_ = cv2.FastFeatureDetector_create(threshold=19,
                                                        nonmaxSuppression=True)
        self.extractor_ = cv2.ORB_create(2048, edgeThreshold=19)
        #self.extractor_ = cv2.xfeatures2d.SURF_create()
        self.matcher_ = Matcher(ex=self.extractor_)
        self.tracker_ = Tracker(pLK=cfg['pLK'])
        self.kf_ = build_ekf()
        self.db_ = self.build_db()
        self.state_ = PipelineState.INIT

        # higher-level handles?
        self.initializer_ = MapInitializer(db=self.build_db(),
                                           matcher=self.matcher_,
                                           tracker=self.tracker_,
                                           cfg=self.cfg_)

    def build_cfg(self, cfg):
        # build derived values

        # apply scale
        w = int(cfg['scale'] * cfg['w'])
        h = int(cfg['scale'] * cfg['h'])
        K0 = cfg['K']
        K = cfg['scale'] * cfg['K']
        K[2, 2] = 1.0

        # image shape
        shape = (h, w, 3)  # TODO : handle monochrome

        # first, make a copy from argument
        cfg = dict(cfg)

        # insert derived values
        cfg['w'] = w
        cfg['h'] = h
        cfg['shape'] = shape
        cfg['K0'] = K0
        cfg['K'] = K

        # create dynamic type
        #ks = cfg.keys()
        #cfg_t = namedtuple('PipelineConfig', ks)
        # setup dot-referenced aliases
        # for k, v in cfg.iteritems():
        #    setattr(cfg, k, v)
        return cfg

    def build_db(self):
        cfg = self.cfg_
        ex = self.extractor_
        img_fmt = (cfg['shape'], np.uint8)
        dsc_t = (np.uint8 if ex.descriptorType() == cv2.CV_8U else np.float32)
        dsc_fmt = (self.extractor_.descriptorSize(), dsc_t)
        return DB(img_fmt=img_fmt, dsc_fmt=dsc_fmt)

    def motion_model(self, f0, f1, stamp, use_kalman=False):
        if not use_kalman:
            # simple `repetition` model
            txn0, rxn0 = f0['pose'][L_POS], f0['pose'][A_POS]
            txn1, rxn1 = f1['pose'][L_POS], f1['pose'][A_POS]
            R0 = tx.euler_matrix(*rxn0)
            R1 = tx.euler_matrix(*rxn1)

            T0 = tx.compose_matrix(angles=rxn0, translate=txn0)
            T1 = tx.compose_matrix(angles=rxn1, translate=txn1)

            Tv = np.dot(T1, vm.inv(T0))  # Tv * T0 = T1
            T2 = np.dot(Tv, T1)

            txn = tx.translation_from_matrix(T2)
            rxn = tx.euler_from_matrix(T2)

            x = f1['pose'].copy()
            P = f1['cov'].copy()
            x[0:3] = txn
            x[9:12] = rxn
            return x, P
        else:
            # dt MUST NOT BE None
            self.kf_.x = f0['pose']
            self.kf_.P = f0['cov']
            dt = (f1['stamp'] - f0['stamp'])
            self.kf_.predict(dt)

            txn, rxn = f1['pose'][L_POS], f1['pose'][A_POS]
            z = np.concatenate([txn, rxn])
            self.kf_.update(z)
            dt = (stamp - f1['stamp'])
            self.kf_.predict(dt)
            return self.kf_.x.copy(), self.kf_.P.copy()

    def is_keyframe(self, frame):
        # TODO : more robust keyframe heuristic
        # == possibly filter for richness of tracking features?
        feat = (frame['feat']).item()
        return len(feat.kpt) > 100  # TODO: arbitrary threshold

    def build_frame(self, img, stamp):
        """ build a simple frame """
        # automatic index assignment
        # NOTE: multiple frames will have the same index
        # if not appended to self.db_.frame
        # TODO : separate out feature processing parts?
        index = self.db_.frame.size

        # by default, not a keyframe
        is_kf = False

        # extract features
        kpt = self.detector_.detect(img)
        kpt, dsc = self.extractor_.compute(img, kpt)
        # kpt, dsc = self.extractor_.detectAndCompute(img, None)
        feat = Feature(kpt, dsc, cv2.KeyPoint.convert(kpt))

        # apply motion model? initialize pose anyway
        if self.db_.frame_.size >= 2:
            print('motion-model')
            x, P = self.motion_model(f0=self.db_.frame_[-2],
                                     f1=self.db_.frame_[-1],
                                     stamp=stamp,
                                     use_kalman=True)
        else:
            x = np.zeros(self.cfg_['state_size'])
            P = 1e-6 * np.eye(self.cfg_['state_size'])

        frame = (index, stamp, img, x, P, is_kf, feat)
        res = np.array(frame, dtype=self.db_.frame.dtype)
        return res

    def transition(self, new_state):
        print('[state] ({} -> {})'.format(self.state_, new_state))
        self.state_ = new_state

    def init_map(self, img, stamp, data):
        """ initialize map """
        # fetch prv+cur frames
        # populate frame from motion model
        frame = self.build_frame(img, stamp)
        suc = self.initializer_.compute(frame, data)
        # print(data['dbg-tv'])
        if not suc:
            return

        self.db_.extend(self.initializer_.db_)
        self.transition(PipelineState.TRACK)

        #print self.db_.landmark_['pos'][self.db_.landmark_['tri']][:5]
        #print self.initializer_.db_.landmark_['pos'][self.db_.landmark_['tri']][:5]

        if True:
            frame0 = self.db_.frame_[0]
            frame1 = self.db_.frame_[1]
            img0, img1 = frame0['image'], frame1['image']
            feat0, feat1 = frame0['feat'], frame1['feat']
            pt0m, pt1m = feat0.pt[data['mi0']], feat1.pt[data['mi1']]
            msk = data['msk_cld']

            print('frame pair : {}-{}'.format(frame0['index'],
                                              frame1['index']))

            viz0 = cv2.drawKeypoints(img0, feat0.kpt, None)
            viz1 = cv2.drawKeypoints(img1, feat1.kpt, None)
            viz = draw_matches(viz0, viz1, pt0m[msk], pt1m[msk])
            data['viz'] = viz

            # == if cfg['dbg-cloud']:
            dr_data = {}
            cld_viz, col_viz = DenseRec(self.cfg_['K']).compute(img0,
                                                                img1,
                                                                P1=data['P0'],
                                                                P2=data['P1'],
                                                                data=dr_data)
            cdist = vm.norm(cld_viz)
            data['cld_viz'] = cld_viz[cdist < np.percentile(cdist, 95)]
            data['col_viz'] = col_viz[cdist < np.percentile(cdist, 95)]
        self.initializer_.reset()
        return

    def bundle_adjust(self, frame0, frame1):
        idx0, idx1 = max(frame0['index'], frame1['index'] - 8), frame1['index']
        #idx0, idx1 = keyframe['index'], frame1['index']
        obs = self.db_.observation
        msk = np.logical_and(idx0 <= obs['src_idx'], obs['src_idx'] <= idx1)

        # parse observation
        i_src = obs['src_idx'][msk]
        #print('i_src', i_src)
        i_lmk = obs['lmk_idx'][msk]
        p_obs = obs['point'][msk]

        # index pruning relevant sources
        i_src_alt, i_a2s, i_s2a = index_remap(i_src)
        i_lmk_alt, i_a2l, i_l2a = index_remap(i_lmk)

        # 1. select targets based on new index
        i_src = i_s2a
        i_lmk = i_l2a
        frames = self.db_.frame[i_a2s[i_src_alt]]
        landmarks = self.db_.landmark[i_a2l[i_lmk_alt]]

        # parse data
        txn = frames['pose'][:, L_POS]
        rxn = frames['pose'][:, A_POS]
        lmk = landmarks['pos']

        data_ba = {}
        # NOTE : txn/rxn will be internally inverted to reduce duplicate compute.
        suc = BundleAdjustment(
            i_src,
            i_lmk,
            p_obs,  # << observation
            txn,
            rxn,
            lmk,
            self.cfg_['K'],
            axa=True).compute(data=data_ba)  # << data

        if suc:
            # TODO : apply post-processing kalman filter?
            #print('{}->{}'.format(txn, data_ba['txn']))
            #print('{}->{}'.format(rxn, data_ba['rxn']))
            txn = data_ba['txn']
            rxn = data_ba['rxn']
            lmk = data_ba['lmk']
            self.db_.frame['pose'][i_a2s[i_src_alt], L_POS] = txn
            self.db_.frame['pose'][i_a2s[i_src_alt], A_POS] = rxn
            self.db_.landmark['pos'][i_a2l[i_lmk_alt]] = lmk

    @profile(sort='cumtime')
    def track(self, img, stamp, data={}):
        """ Track landmarks"""
        # unroll data
        # fetch frame pair
        # TODO : add landmarks along the way
        # TODO : update landmarks through optimization
        mapframe = self.db_.keyframe[0]  # first keyframe = map frame
        keyframe = self.db_.keyframe[-1]  # last **keyframe**
        frame0 = self.db_.frame[-1]  # last **frame**
        frame1 = self.build_frame(img, stamp)
        # print('prior position',
        #         frame1['pose'][L_POS], frame1['pose'][A_POS])
        landmark = self.db_.landmark

        img1 = frame1['image']
        feat1 = frame1['feat'].item()

        # bypass match_local for already tracking points ...
        pt0_l = landmark['pt'][landmark['track']]
        pt1_l, msk_t = self.tracker_.track(frame0['image'],
                                           img1,
                                           pt0_l,
                                           return_msk=True)

        # apply tracking mask
        pt0_l = pt0_l[msk_t]
        pt1_l = pt1_l[msk_t]

        # update tracking status
        landmark['track'][landmark['track'].nonzero()[0][~msk_t]] = False
        landmark['pt'][landmark['track']] = pt1_l

        # search additional points
        cld0_l = landmark['pos'][~landmark['track']]
        dsc_l = landmark['dsc'][~landmark['track']]

        msk_prj = None
        if len(cld0_l) >= 128:
            # merge with projections
            pt0_cld_l = project_to_frame(cld0_l,
                                         source_frame=mapframe,
                                         target_frame=frame1,
                                         K=self.cfg_['K'],
                                         D=self.cfg_['D'])

            # in-frame projection mask
            msk_prj = np.logical_and.reduce([
                0 <= pt0_cld_l[..., 0],
                pt0_cld_l[..., 0] < self.cfg_['w'],
                0 <= pt0_cld_l[..., 1],
                pt0_cld_l[..., 1] < self.cfg_['h'],
            ])

        pt0 = pt0_l
        pt1 = pt1_l
        cld0 = landmark['pos'][landmark['track']]
        obs_lmk_idx = landmark['index'][landmark['track']]

        search_map = (False and len(cld0) <= 256 and (msk_prj is not None)
                      and (msk_prj.sum() >= 16))

        if search_map:
            # sample points from the map
            mi0, mi1 = match_local(
                pt0_cld_l[msk_prj],
                feat1.pt,
                dsc_l[msk_prj],
                feat1.dsc,
                hamming=(not feat1.dsc.dtype == np.float32),
            )

            # collect all parts
            pt0 = np.concatenate([pt0, pt0_cld_l[msk_prj][mi0]], axis=0)
            pt1 = np.concatenate([pt1, feat1.pt[mi1]], axis=0)
            cld0 = np.concatenate(
                [cld0, landmark['pos'][~landmark['track']][msk_prj][mi0]],
                axis=0)

            obs_lmk_idx = np.concatenate([
                obs_lmk_idx,
                landmark['index'][~landmark['track']][msk_prj][mi0]
            ],
                                         axis=0)

        # debug ...
        # pt_dbg = project_to_frame(
        #        landmark['pos'][landmark['track']],
        #        frame1,
        #        self.cfg_['K'], self.cfg_['D'])
        ##img_dbg = draw_points(img1.copy(), pt_dbg, color=(255,0,0) )
        ##draw_points(img_dbg, pt1, color=(0,0,255) )
        #img_dbg = draw_matches(img1, img1, pt_dbg, pt1)
        #cv2.imshow('dbg', img_dbg)
        #print_ratio(len(pt0_l), len(pt0), name='point source')

        # if len(mi0) <= 0:
        #    viz1 = draw_points(img1.copy(), pt0)
        #    viz2 = draw_points(img1.copy(), feat1.pt)
        #    viz = np.concatenate([viz1, viz2], axis=1)
        #    cv2.imshow('pnp', viz)
        #    return False

        #print_ratio(len(mi0), len(pt0))
        # suc, rvec, tvec = cv2.solvePnP(
        #        cld0[:, None], pt1[:, None],
        #        self.cfg_['K'], self.cfg_['D'],
        #        flags = cv2.SOLVEPNP_EPNP
        #        ) # T(rv,tv) . cld = cam
        #inl = None
        #print 'euler', tx.euler_from_matrix(cv2.Rodrigues(rvec)[0])

        T_i = tx.inverse_matrix(
            tx.compose_matrix(translate=frame1['pose'][L_POS],
                              angles=frame1['pose'][A_POS]))
        rvec0 = cv2.Rodrigues(T_i[:3, :3])[0]
        tvec0 = T_i[:3, 3:]

        if len(pt1) >= 1024:
            # prune
            nmx_idx = non_max(pt1, landmark['rsp'][obs_lmk_idx])
            print_ratio(len(nmx_idx), len(pt1), name='non_max')
            cld_pnp, pt1_pnp = cld0[nmx_idx], pt1[nmx_idx]
        else:
            cld_pnp, pt1_pnp = cld0, pt1

        # hmm ... pose-only BA vs. PnP
        if False:
            data_pnp = {}
            #print('txn-prv', frame0['pose'][L_POS])
            #print('rxn-prv', frame0['pose'][A_POS])
            #print('txn-in', frame1['pose'][L_POS])
            #print('rxn-in', frame1['pose'][A_POS])
            suc = BundleAdjustment(i_src=np.full(len(cld_pnp), 0),
                                   i_lmk=np.arange(len(cld_pnp)),
                                   p_obs=pt1_pnp,
                                   txn=frame1['pose'][L_POS][None, ...],
                                   rxn=frame1['pose'][A_POS][None, ...],
                                   lmk=cld_pnp,
                                   K=self.cfg_['K'],
                                   pose_only=True).compute(crit=dict(
                                       loss='soft_l1',
                                       xtol=1e-8,
                                       f_scale=np.sqrt(5.991)),
                                                           data=data_pnp)
            #print('txn-out', data_pnp['txn'][0])
            #print('rxn-out', data_pnp['rxn'][0])
            T = tx.compose_matrix(translate=data_pnp['txn'][0],
                                  angles=data_pnp['rxn'][0])
            Ti = tx.inverse_matrix(T)
            rxn_pnp = np.float32(tx.euler_from_matrix(Ti))
            txn_pnp = tx.translation_from_matrix(Ti)
            rvec = cv2.Rodrigues(Ti[:3, :3])[0]
            tvec = txn_pnp
            prj = cvu.project_points(cld_pnp, rvec, tvec, self.cfg_['K'],
                                     self.cfg_['D'])
            err = vm.norm(prj - pt1_pnp)
            inl = np.where(err <= 1.0)[0]
        else:
            # == if(cfg['dbg_pnp']):
            #print 'frame1-pose', frame1['pose']
            # dbg = draw_matches(img1, img1,
            #        project_to_frame(cld_pnp, source_frame=mapframe, target_frame=frame1,
            #        K=self.cfg_['K'], D=self.cfg_['D']),
            #        pt1_pnp)
            #cv2.imshow('pnp', dbg)
            # cv2.waitKey(0)

            suc, rvec, tvec, inl = cv2.solvePnPRansac(
                cld_pnp[:, None],
                pt1_pnp[:, None],
                self.cfg_['K'],
                self.cfg_['D'],
                useExtrinsicGuess=True,
                rvec=rvec0,
                tvec=tvec0,
                iterationsCount=1024,
                reprojectionError=1.0,
                confidence=0.999,
                flags=cv2.SOLVEPNP_EPNP
                # flags=cv2.SOLVEPNP_DLS
                # flags=cv2.SOLVEPNP_ITERATIVE
                # minInliersCount=0.5*_['pt0']
            )

        n_pnp_in = len(cld_pnp)
        n_pnp_out = len(inl) if (inl is not None) else 0
        #print 'inl', inl
        print n_pnp_in
        print n_pnp_out

        suc = (suc and (inl is not None)
               and (n_pnp_out >= 128 or n_pnp_out >= 0.25 * n_pnp_in))
        print('pnp success : {}'.format(suc))
        if inl is not None:
            print_ratio(n_pnp_out, n_pnp_in, name='pnp')

        # visualize match statistics
        viz_pt0 = project_to_frame(
            cld0,
            source_frame=mapframe,
            target_frame=keyframe,  # TODO: keyframe may no longer be true?
            K=self.cfg_['K'],
            D=self.cfg_['D'])
        viz_msk = np.logical_and.reduce([
            0 <= viz_pt0[:, 0],
            viz_pt0[:, 0] < self.cfg_['w'],
            0 <= viz_pt0[:, 1],
            viz_pt0[:, 1] < self.cfg_['h'],
        ])
        viz1 = draw_points(img1.copy(), feat1.pt)
        viz = draw_matches(keyframe['image'], viz1, viz_pt0[viz_msk],
                           pt1[viz_msk])
        data['viz'] = viz

        # obtained position!
        R = cv2.Rodrigues(rvec)[0]
        t = np.float32(tvec)
        R, t = vm.Rti(R, t)
        rxn = np.reshape(tx.euler_from_matrix(R), 3)
        txn = t.ravel()

        if suc:
            # print('pnp-txn', t)
            # print('pnp-rxn', tx.euler_from_matrix(R))
            # motion_update()
            if self.cfg_['kalman']:
                # kalman_update()
                self.kf_.x = frame0['pose']
                self.kf_.P = frame0['cov']
                self.kf_.predict(frame1['stamp'] - frame0['stamp'])
                self.kf_.update(np.concatenate([txn, rxn]))
                frame1['pose'] = self.kf_.x
                frame1['cov'] = self.kf_.P
            else:
                # hard_update()
                frame1['pose'][L_POS] = t.ravel()
                frame1['pose'][A_POS] = tx.euler_from_matrix(R)

            self.db_.observation.extend(
                dict(
                    # observation frame source
                    src_idx=np.full_like(obs_lmk_idx, frame1['index']),
                    lmk_idx=obs_lmk_idx,  # landmark index
                    point=pt1))
        self.db_.frame.append(frame1)
        x = 1

        need_kf = np.logical_or.reduce([
            not suc,  # PNP failed -- try new keyframe
            # PNP was decent but would be better to have a new frame
            suc and (n_pnp_out < 128),
            # = frame is somewhat stale
            (frame1['index'] - keyframe['index'] > 32) and (msk_t.sum() < 256)
        ]) and self.is_keyframe(frame1)
        #need_kf = False

        # ?? better criteria for running BA?
        run_ba = (frame1['index'] % 8) == 0
        #run_ba = False
        #run_ba = need_kf

        if run_ba:
            self.bundle_adjust(keyframe, frame1)

        if need_kf:
            for index in reversed(range(keyframe['index'], frame1['index'])):
                feat0, feat1 = self.db_.frame[index]['feat'], frame1[
                    'feat'].item()
                mi0, mi1 = self.matcher_.match(feat0.dsc,
                                               feat1.dsc,
                                               lowe=0.8,
                                               fold=False)
                data_tv = {}
                suc_tv, det_tv = TwoView(feat0.pt[mi0], feat1.pt[mi1],
                                         self.cfg_['K']).compute(data=data_tv)

                if suc_tv:
                    print('======================= NEW KEYFRAME ===')
                    xfm0 = pose_to_xfm(self.db_.frame[index]['pose'])
                    xfm1 = pose_to_xfm(frame1['pose'])
                    scale_ref = np.linalg.norm(
                        tx.translation_from_matrix(vm.Ti(xfm1).dot(xfm0)))
                    scale_tv = np.linalg.norm(data_tv['t'])
                    # TODO : does NOT consider "duplicate" landmark identities

                    # IMPORTANT: frame1  is a `copy` of "last_frame"
                    #frame1['is_kf'] = True
                    self.db_.frame[-1]['is_kf'] = True

                    lmk_idx0 = self.db_.landmark.size
                    print('lmk_idx0', lmk_idx0)
                    msk_cld = data_tv['msk_cld']
                    cld1 = data_tv['cld1'][msk_cld] * (scale_ref / scale_tv)
                    cld = transform_cloud(
                        cld1,
                        source_frame=frame1,
                        target_frame=mapframe,
                    )
                    col = extract_color(frame1['image'],
                                        feat1.pt[mi1][msk_cld])

                    local_map = dict(
                        index=lmk_idx0 + np.arange(len(cld)),  # landmark index
                        src=np.full(len(cld), frame1['index']),  # source index
                        dsc=feat1.dsc[mi1][msk_cld],  # landmark descriptor
                        rsp=[
                            feat1.kpt[i].response
                            for i in np.arange(len(feat1.kpt))[mi1][msk_cld]
                        ],  # response "strength"

                        # tracking point initialization
                        pt0=feat1.pt[mi1][msk_cld],
                        invd=1.0 / cld1[..., 2],
                        depth=cld1[..., 2],
                        pos=cld,  # landmark position [[ map frame ]]
                        # tracking status
                        track=np.ones(len(cld), dtype=np.bool),

                        # tracking point initialization
                        pt=feat1.pt[mi1][msk_cld],
                        tri=np.ones(len(cld), dtype=np.bool),
                        col=col,  # debug : point color information
                    )
                    # hmm?
                    self.db_.landmark.extend(local_map)
                    break
            else:
                print('Attempted new keyframe but failed')

    def process(self, img, stamp, data={}):
        if self.state_ == PipelineState.IDLE:
            return
        # if self.state_ == PipelineState.NEED_REF:
        #     return self.init_ref(img, stamp, data)
        # elif self.state_ == PipelineState.NEED_MAP:
        #     return self.init_map(img, stamp, data)
        if self.state_ == PipelineState.INIT:
            return self.init_map(img, stamp,
                                 data)  # self.initializer_.compute(
        elif self.state_ == PipelineState.TRACK:
            return self.track(img, stamp, data)

    def save(self, path):
        if not os.path.exists(path):
            os.makedirs(path)

        def D_(p):
            return os.path.join(path, p)

        np.save(D_('config.npy'), self.cfg_)
        self.db_.save(path)
Exemplo n.º 18
0
    def test_log_method_should_return_true_to_ensure_logging_continues(self):
        sensors = Mock()
        tracker = Tracker(Mock(), sensors, Mock())

        self.assertTrue(tracker.log_position())
Exemplo n.º 19
0
class Detector(BaseDetector):
    def __init__(self, ):
        super(Detector, self).__init__()

        trackerArgs = {
            'upperBound': 9999,
            'lowerBound': -9999,
            'rightBound': 9999,
            'leftBound': -9999,
            'timeToDie': 1,
            'timeToLive': 1,
        }

        self.tracker = Tracker(**trackerArgs)
        self.sizeLower = 0
        self.sizeUpper = 0

    def transform(self, img):
        contrast = cv2.inRange(img,
                               lowerb=(120, 120, 80),
                               upperb=(200, 200, 150))
        # cv2.bitwise_not(src=contrast, dst=contrast)
        kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 15))
        cv2.morphologyEx(src=contrast,
                         dst=contrast,
                         op=cv2.MORPH_ERODE,
                         kernel=kernel,
                         iterations=1)
        cv2.medianBlur(src=contrast, dst=contrast, ksize=11)
        return contrast

    def resize(self, img, ymin=200, ymax=400, xmin=300, xmax=1000):
        return img[ymin:ymax, xmin:xmax]

    def detect(self, feed):
        img = self.resize(feed)
        contrast = self.transform(img)
        contours, h = cv2.findContours(contrast, cv2.RETR_EXTERNAL,
                                       cv2.CHAIN_APPROX_SIMPLE)
        rois = []
        for c in contours:
            approx = cv2.approxPolyDP(c, 0.01 * cv2.arcLength(c, True), True)
            x, y, w, h = cv2.boundingRect(c)
            if len(approx) < 1:
                continue
            x1 = x
            x2 = x1 + w
            y1 = y
            y2 = y1 + h
            rois.append([x1, y1, x2, y2])

        tracked, newRois = self.tracker.track(rois)
        self.numObjects = self.tracker.N

        for roi in rois:
            ImgUtils.drawRect(roi, img)
            detectedCentroid = ImgUtils.findRoiCentroid(roi)
            ImgUtils.drawCircle(detectedCentroid, img, colour=(255, 0, 0))
            ImgUtils.putText(coords=(roi[0] + 50, roi[1] + 50),
                             text=str(roi[2] - roi[0]),
                             img=img,
                             colour=(255, 255, 0),
                             fontSize=3)

        for objectId, centroid in tracked.items():
            ImgUtils.drawCircle((centroid[0], centroid[1]), img)
            ImgUtils.putText(coords=centroid,
                             text=str(objectId % 1000),
                             img=img,
                             colour=(0, 255, 0))

        # for roi in newRois:
        #     print(roi[3]-roi[1], roi[2]-roi[0])

        return img, contrast, []

    def detectDebug(self, img):
        # img = img[350:, 350:-400, :]
        contrast = np.copy(img)
        contrast = self.transform(contrast)
        contours, h = cv2.findContours(contrast, cv2.RETR_EXTERNAL,
                                       cv2.CHAIN_APPROX_SIMPLE)
        rois = []
        for c in contours:
            approx = cv2.approxPolyDP(c, 0.01 * cv2.arcLength(c, True), True)
            x, y, w, h = cv2.boundingRect(c)
            if len(approx) < 1 or w < 130 or h < 60:
                continue
            x1 = x
            x2 = x1 + w
            y1 = y
            y2 = y1 + h
            if w > 250 or h > 250:
                print(w, h)
            if x2 < 250:  # or y2 < 100:
                continue
            targetHeight = 130
            numParts = h // targetHeight
            if numParts < 1:
                rois.append([x1, y1, x2, y2])
            else:
                step = (h % targetHeight)
                y = y1
                for i in range(0, numParts):
                    r = [x1, y, x2, y + targetHeight + step]
                    y += (step + targetHeight)
                    rois.append(r)
        return rois
Exemplo n.º 20
0
#ask user which of input clumps we actually wish to track on this run
ctrack = input('Select which of clumps %s to track: ' % params[3])

try:
    #if only one clump chosen
    ctrack = [int(ctrack)]
except:
    #if more than one clump chosen
    ctrack = map(int, ctrack)

params[3] = ctrack

#Run clump tracker rountine

tracks = Tracker.runTracker(params)

wd = params[0]

#Iterate through dumps, create analyser instances, and plot as desired...

discs = []
iclumps = tracks[ctrack[0]][:, 1]
iclumps = map(int, iclumps)

#Check necessary directories for saving plots exist
if not os.path.exists('%s/clumpfiles' % wd):
    os.mkdir('%s/clumpfiles' % wd)
if not os.path.exists('%s/fragplots' % wd):
    os.mkdir('%s/fragplots' % wd)
Exemplo n.º 21
0
    # Intersection over Union (Jaccard) used for scoring
    score_func = db_eval_iou
    
    # get the DAVIS 2016 data loaders
    loaders = {k: DataLoader(DAVIS(p["/"],p[k], s)) for k in ['train','val']}

    # get model and load pre-trained weights
    model = load_model( STM(new_arch=new_arch), p["weights"])

    # set trainable parameters
    select_params( model, contains=weight_hint)

    # loss function
    criterion = CrossEntropyLoss()
    
    # optimizier
    optimizer = Adam(model.parameters(), lr=learning_rate)

    # create logger
    log = Tracker()
    log.create_dir()
    log.set_save_string('state_dict_M_{}_E_{}_J_{:5f}_T_{}.pth' )
    
    # train model and validate after each epoch
    train_model(loaders, model, criterion, optimizer, log, score_func,
                batch_size=batch_size, mode=mode,num_epochs=num_epochs)

    # plot log file for statistics
    log.plot()
    
Exemplo n.º 22
0
class Detector(BaseDetector):
    def __init__(self):
        super(Detector, self).__init__()
        self.dim1Lower = 40
        self.dim1Upper = 100
        self.dim2Lower = 50
        self.dim2Upper = 200
        self.numObjects = -1

        trackerArgs = {
            'upperBound': 9999,
            'lowerBound': -9999,
            'rightBound': 9999,
            'leftBound': -9999,
            'timeToDie': 0,
            'timeToLive': 0,
        }
        self.tracker = Tracker(**trackerArgs)

        self.clock = 1
        self.guiMode = False
        self.averageColour = [0, 0, 0]
        self.averageSize = 0

        self.beltXmin = 0
        self.beltXmax = 500

    def transform(self, img):
        # contrast = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)[:, :, 2]
        # contrast = cv2.bitwise_not(contrast)
        # contrast = cv2.boxFilter(src=contrast, ddepth=-1, ksize=(3, 17))
        # kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (9, 9))
        # contrast = cv2.morphologyEx(contrast, cv2.MORPH_DILATE, kernel, iterations=2)
        # contrast = cv2.threshold(src=contrast, maxval=255, thresh=200, type=cv2.THRESH_BINARY)[1]

        # contrast = cv2.inRange(img, lowerb=(0, 0, 0), upperb=(150, 150, 150))
        # kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7))

        contrast = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        cv2.inRange(src=contrast, dst=contrast, lowerb=0, upperb=160)
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
        cv2.morphologyEx(src=contrast,
                         dst=contrast,
                         op=cv2.MORPH_ERODE,
                         kernel=kernel,
                         iterations=1)

        # contrast = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)[:, :, 2]
        # contrast = cv2.medianBlur(contrast, 13, 9)
        # contrast = cv2.threshold(src=contrast, maxval=255, thresh=70, type=cv2.THRESH_BINARY)[1]
        return contrast

    def resize(self, img, ymin=0, ymax=270, xmin=0, xmax=500):
        return img[150:-100, 100:-100]

    def detectOld(self, img):
        self.clock += 1
        hBefore, wBefore, _ = img.shape
        img = self.resize(img)

        contrast = self.transform(img)
        rois = DetectionUtils.detectContours(contrast,
                                             widthLower=self.dim1Lower,
                                             widthUpper=self.dim1Upper,
                                             heightLower=self.dim2Lower,
                                             heigthUpper=self.dim2Upper)
        tracked, _ = self.tracker.track(rois)
        self.numObjects = self.tracker.N
        if self.guiMode:
            for roi in rois:
                ImgUtils.drawRect(roi, img)
                detectedCentroid = ImgUtils.findRoiCentroid(roi)
                ImgUtils.drawCircle(detectedCentroid, img, colour=(255, 0, 0))
            for objectId, centroid in tracked.items():
                ImgUtils.drawCircle((centroid[0], centroid[1]), img)
                ImgUtils.putText(coords=centroid,
                                 text=str(objectId % 1000),
                                 img=img,
                                 colour=(255, 0, 0))

        return img, contrast, []

    def detect(self, img):
        hBefore, wBefore, _ = img.shape
        img = self.resize(img)
        contrast = self.transform(img)
        rois, radii = DetectionUtils.houghDetect(contrast,
                                                 radiusMin=self.dim1Lower,
                                                 radiusMax=self.dim1Upper)
        tracked, newRois = self.tracker.track(rois)
        self.numObjects = self.tracker.N

        # Re-adjust in case the belt has moved
        if self.clock < 300:
            self.clock += 1
        else:
            xmin, xmax = DetectionUtils.getBeltCoordinates(img)
            if xmin is None and xmax is None:  # if we failed to detect any belt:
                pass
            elif xmin is None:  # assume only the right border was detected
                self.beltXmax = xmax
            else:
                if abs(self.beltXmax - xmax) < 100 and abs(
                        self.beltXmin - xmin) < 100:  # sanity check
                    self.beltXmin = xmin
                    self.beltXmax = xmax

        if self.guiMode:
            for roi in rois:
                ImgUtils.drawRect(roi, img)
                detectedCentroid = ImgUtils.findRoiCentroid(roi)
                ImgUtils.drawCircle(detectedCentroid, img, colour=(255, 0, 0))
                ImgUtils.putText(coords=(roi[0] + 50, roi[1] + 50),
                                 text=str(roi[2] - roi[0]),
                                 img=img,
                                 colour=(255, 255, 0),
                                 fontSize=3)
            for objectId, centroid in tracked.items():
                ImgUtils.drawCircle((centroid[0], centroid[1]), img)
                ImgUtils.putText(coords=centroid,
                                 text=str(objectId % 1000),
                                 img=img,
                                 colour=(255, 0, 0))

        out = []
        return img, contrast, out

    def detectDebug(self, img):
        contrast = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)[:, :, 2]
        contrast = cv2.bitwise_not(contrast)
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (9, 9))
        contrast = cv2.morphologyEx(contrast,
                                    cv2.MORPH_DILATE,
                                    kernel,
                                    iterations=2)
        contrast = cv2.threshold(src=contrast,
                                 maxval=255,
                                 thresh=200,
                                 type=cv2.THRESH_BINARY)[1]
        rois, _ = DetectionUtils.houghDetect(contrast, 70, 140)
        return rois
Exemplo n.º 23
0
    ## choose input source
    source_code = 3  # 1 from camera, 2 from computer camera, 3 from video file
    if source_code == 1:
        url = "rtsp://*****:*****@192.168.0.7:554/"
        cap = cv2.VideoCapture(url)
    elif source_code == 2:
        cap = cv2.VideoCapture(0)  # capture from camera
    elif source_code == 3:
        video_path = "./videos/first.mp4"
        cap = cv2.VideoCapture(video_path)

    cap.set(cv2.CAP_PROP_FPS, 15)

    # people = None
    preframe_num = 5
    tracker = Tracker(frame_num=preframe_num)
    current_lable = None
    index = 0
    count = 0
    while True:
        ret, img_raw = cap.read()
        if img_raw is None:
            continue
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        dets = detect_face(net, img_raw)
        if len(dets) == 0:
            continue
        count += 1
        boxes = [x[0:4] for x in dets]
        print("\n *******  count: ", count, "   ***************")
Exemplo n.º 24
0
class Detection:
    camera = cv2.VideoCapture()
    camera2 = cv2.VideoCapture()
    D = StandardDetector()
    T = Tracker()
    liveFeed = None
    frame = None
    rois = None

    def __init__(self):
        # self.camera.open('rtsp://*****:*****@10.110.1.55/1')
        # self.camera.open('rtsp://*****:*****@10.150.10.154/1')
        self.camera.open('rtsp://*****:*****@10.150.10.155/1')

        # self.camera.open('rtsp://*****:*****@10.150.10.153 /1')

    def main(self, draw=False):
        clock = 0
        while True:
            clock += 1
            _, self.liveFeed = self.camera.read()
            if self.liveFeed is None:
                continue

            self.frame = StandardDetectionTrans.prepareResized(self.liveFeed)
            # detect
            contrast = StandardDetectionTrans.prepareMono(self.liveFeed)

            rois = self.D.detect(contrast)
            if draw:
                for roi in rois:
                    ImgUtils.drawRect(roi, self.frame, colour=(0, 255, 255))

            # track
            self.T.track(rois)
            print('--', self.T.N)

            ###
            ImgUtils.show("Live", self.frame, 800, 0)
            ImgUtils.show("Frame", contrast, 0, 0)
            keyboard = cv2.waitKey(30)
            if keyboard == 'q' or keyboard == 27:
                break

    def showFeed(self):
        while True:
            _, feed = self.camera.read()
            if feed is None:
                continue
            # self.frame = ImageTransforms.prepareResized(self.liveFeed)
            # contrast = ImageTransforms.prepareMono(self.liveFeed)

            ImgUtils.show("1", feed, 0, 0)

            keyboard = cv2.waitKey(30)
            if keyboard == 'q' or keyboard == 27:
                break

    #
    #
    # def _sample(self):
    #     yScale = self.liveFeed.shape[0] - self.frame.shape[0]
    #     xScale = self.liveFeed.shape[1] - self.frame.shape[1]
    #     for (xmin, ymin, xmax, ymax) in self.rois:
    #         ymin += int(yScale / 2)
    #         ymax += int(yScale / 2)
    #         xmin += int(xScale / 2)
    #         xmax += int(xScale / 2)
    #         self.S.extract(self.liveFeed[ymin:ymax, xmin:xmax])
    #         # ImgUtils.drawRect((xmin, ymin, xmax, ymax), liveFeed, (255, 0, 255))

    def test(self):
        while True:
            _, feed = self.camera.read()
            if feed is None:
                continue

            frame = StandardDetectionTrans.prepareMono(feed)
            for i in range(3):
                ImgUtils.show(str(i), frame[:, :, i], 0, 300 * i)

            # ImgUtils.show('sum', cv2.bitwise_or(frame[:, :, 0], frame[:, :, 1]), 0, 800)
            ImgUtils.show('Feed', StandardDetectionTrans.prepareResized(feed),
                          800, 0)

            keyboard = cv2.waitKey(30)
            if keyboard == 'q' or keyboard == 27:
                break
Exemplo n.º 25
0
class City(object):

    def __init__(self, env, n_people, rng, x_range, y_range, start_time, init_percent_sick, Human):
        self.env = env
        self.rng = rng
        self.x_range = x_range
        self.y_range = y_range
        self.total_area = (x_range[1] - x_range[0]) * (y_range[1] - y_range[0])
        self.n_people = n_people
        self.start_time = start_time
        self.init_percent_sick = init_percent_sick
        self.last_date_to_check_tests = self.env.timestamp.date()
        self.test_count_today = defaultdict(int)
        self.test_type_preference = list(zip(*sorted(TEST_TYPES.items(), key=lambda x:x[1]['preference'])))[0]
        print("Initializing locations ...")
        self.initialize_locations()

        self.humans = []
        self.households = OrderedSet()
        print("Initializing humans ...")
        self.initialize_humans(Human)

        self.log_static_info()

        print("Computing their preferences")
        self._compute_preferences()
        self.tracker = Tracker(env, self)
        self.tracker.track_initialized_covid_params(self.humans)

    def create_location(self, specs, type, name, area=None):
        _cls = Location
        if type in ['household', 'senior_residency']:
            _cls = Household
        if type == 'hospital':
            _cls = Hospital

        return   _cls(
                        env=self.env,
                        rng=self.rng,
                        name=f"{type}:{name}",
                        location_type=type,
                        lat=self.rng.randint(*self.x_range),
                        lon=self.rng.randint(*self.y_range),
                        area=area,
                        social_contact_factor=specs['social_contact_factor'],
                        capacity= None if not specs['rnd_capacity'] else self.rng.randint(*specs['rnd_capacity']),
                        surface_prob = specs['surface_prob']
                        )
    @property
    def tests_available(self):
        if self.last_date_to_check_tests != self.env.timestamp.date():
            self.last_date_to_check_tests = self.env.timestamp.date()
            for k in self.test_count_today.keys():
                self.test_count_today[k] = 0
        return any(self.test_count_today[test_type] < TEST_TYPES[test_type]['capacity'] for test_type in self.test_type_preference)

    def get_available_test(self):
        for test_type in self.test_type_preference:
            if self.test_count_today[test_type] < TEST_TYPES[test_type]['capacity']:
                self.test_count_today[test_type] += 1
                return test_type

    def initialize_locations(self):
        for location, specs in LOCATION_DISTRIBUTION.items():
            if location in ['household']:
                continue

            n = math.ceil(self.n_people/specs["n"])
            area = _get_random_area(n, specs['area'] * self.total_area, self.rng)
            locs = [self.create_location(specs, location, i, area[i]) for i in range(n)]
            setattr(self, f"{location}s", locs)

    def initialize_humans(self, Human):
        # allocate humans to houses such that (unsolved)
        # 1. average number of residents in a house is (approx.) 2.6
        # 2. not all residents are below 15 years of age
        # 3. age occupancy distribution follows HUMAN_DSITRIBUTION.residence_preference.house_size

        # current implementation is an approximate heuristic

        # make humans
        count_humans = 0
        house_allocations = {2:[], 3:[], 4:[], 5:[]}
        n_houses = 0
        for age_bin, specs in HUMAN_DISTRIBUTION.items():
            n = math.ceil(specs['p'] * self.n_people)
            ages = self.rng.randint(*age_bin, size=n)

            senior_residency_preference = specs['residence_preference']['senior_residency']

            professions = ['healthcare', 'school', 'others', 'retired']
            p = [specs['profession_profile'][x] for x in professions]
            profession = self.rng.choice(professions, p=p, size=n)

            for i in range(n):
                count_humans += 1
                age = ages[i]

                # residence
                res = None
                if self.rng.random() < senior_residency_preference:
                    res = self.rng.choice(self.senior_residencys)
                # workplace
                if profession[i] == "healthcare":
                    workplace = self.rng.choice(self.hospitals + self.senior_residencys)
                elif profession[i] == 'school':
                    workplace = self.rng.choice(self.schools)
                elif profession[i] == 'others':
                    type_of_workplace = self.rng.choice([0,1,2], p=OTHERS_WORKPLACE_CHOICE, size=1).item()
                    type_of_workplace = [self.workplaces, self.stores, self.miscs][type_of_workplace]
                    workplace = self.rng.choice(type_of_workplace)
                else:
                    workplace = res

                self.humans.append(Human(
                        env=self.env,
                        rng=self.rng,
                        name=count_humans,
                        age=age,
                        household=res,
                        workplace=workplace,
                        profession=profession[i],
                        rho=0.3,
                        gamma=0.21,
                        infection_timestamp=self.start_time if self.rng.random() < self.init_percent_sick else None
                        )
                    )

        # assign houses
        # stores tuples - (location, current number of residents, maximum number of residents allowed)
        remaining_houses = []
        for human in self.humans:
            if human.household is not None:
                continue
            if len(remaining_houses) == 0:
                cap = self.rng.choice(range(1,6), p=HOUSE_SIZE_PREFERENCE, size=1)
                x = self.create_location(LOCATION_DISTRIBUTION['household'], 'household', len(self.households))

                remaining_houses.append((x, cap))

            # get_best_match
            res = None
            for  c, (house, n_vacancy) in enumerate(remaining_houses):
                new_avg_age = (human.age + sum(x.age for x in house.residents))/(len(house.residents) + 1)
                if new_avg_age > MIN_AVG_HOUSE_AGE:
                    res = house
                    n_vacancy -= 1
                    if n_vacancy == 0:
                        remaining_houses = remaining_houses[:c] + remaining_houses[c+1:]
                    break

            if res is None:
                for i, (l,u) in enumerate(HUMAN_DISTRIBUTION.keys()):
                    if l <= human.age < u:
                        bin = (l,u)
                        break

                house_size_preference = HUMAN_DISTRIBUTION[(l,u)]['residence_preference']['house_size']
                cap = self.rng.choice(range(1,6), p=house_size_preference, size=1)
                res = self.create_location(LOCATION_DISTRIBUTION['household'], 'household', len(self.households))
                if cap - 1 > 0:
                    remaining_houses.append((res, cap-1))

            # FIXME: there is some circular reference here
            res.residents.append(human)
            human.assign_household(res)
            self.households.add(res)

        # assign area to house
        area = _get_random_area(len(self.households), LOCATION_DISTRIBUTION['household']['area'] * self.total_area, self.rng)
        for i,house in enumerate(self.households):
            house.area = area[i]

    def log_static_info(self):
        for h in self.humans:
            Event.log_static_info(self, h, self.env.timestamp)

    @property
    def events(self):
        return list(itertools.chain(*[h.events for h in self.humans]))

    def pull_events(self):
        return list(itertools.chain(*[h.pull_events() for h in self.humans]))

    def _compute_preferences(self):
        """ compute preferred distribution of each human for park, stores, etc."""
        for h in self.humans:
            h.stores_preferences = [(compute_distance(h.household, s) + 1e-1) ** -1 for s in self.stores]
            h.parks_preferences = [(compute_distance(h.household, s) + 1e-1) ** -1 for s in self.parks]
Exemplo n.º 26
0
    def test_log_method_should_return_true_to_ensure_logging_continues(self):
        sensors = Mock()
        tracker = Tracker(Mock(), sensors, Mock())

        self.assertTrue(tracker.log_position())
Exemplo n.º 27
0
class Wiring():
    def __init__(self, gps=False, servo_port=SERVO_PORT):
        # devices
        self._gps = gps
        self.windsensor = WindSensor(I2C(WINDSENSOR_I2C_ADDRESS))
        self.compass = Compass(I2C(COMPASS_I2C_ADDRESS),
                               I2C(ACCELEROMETER_I2C_ADDRESS))
        self.red_led = GpioWriter(17, os)
        self.green_led = GpioWriter(18, os)

        # Navigation
        self.globe = Globe()
        self.timer = Timer()
        self.application_logger = self._rotating_logger(APPLICATION_NAME)
        self.position_logger = self._rotating_logger("position")
        self.exchange = Exchange(self.application_logger)
        self.timeshift = TimeShift(self.exchange, self.timer.time)
        self.event_source = EventSource(self.exchange, self.timer,
                                        self.application_logger,
                                        CONFIG['event source'])

        self.sensors = Sensors(self.gps, self.windsensor, self.compass,
                               self.timer.time, self.exchange,
                               self.position_logger, CONFIG['sensors'])
        self.gps_console_writer = GpsConsoleWriter(self.gps)
        self.rudder_servo = Servo(serial.Serial(servo_port),
                                  RUDDER_SERVO_CHANNEL, RUDDER_MIN_PULSE,
                                  RUDDER_MIN_ANGLE, RUDDER_MAX_PULSE,
                                  RUDDER_MAX_ANGLE)
        self.steerer = Steerer(self.rudder_servo, self.application_logger,
                               CONFIG['steerer'])
        self.helm = Helm(self.exchange, self.sensors, self.steerer,
                         self.application_logger, CONFIG['helm'])
        self.course_steerer = CourseSteerer(self.sensors, self.helm,
                                            self.timer,
                                            CONFIG['course steerer'])
        self.navigator = Navigator(self.sensors, self.globe, self.exchange,
                                   self.application_logger,
                                   CONFIG['navigator'])
        self.self_test = SelfTest(self.red_led, self.green_led, self.timer,
                                  self.rudder_servo, RUDDER_MIN_ANGLE,
                                  RUDDER_MAX_ANGLE)

        # Tracking
        self.tracking_logger = self._rotating_logger("track")
        self.tracking_sensors = Sensors(self.gps, self.windsensor,
                                        self.compass, self.timer.time,
                                        self.exchange, self.tracking_logger,
                                        CONFIG['sensors'])
        self.tracker = Tracker(self.tracking_logger, self.tracking_sensors,
                               self.timer)

    def _rotating_logger(self, appname):
        logHandler = TimedRotatingFileHandler("/var/log/pi-nav/" + appname,
                                              when="midnight",
                                              backupCount=30)
        logHandler.setFormatter(logging.Formatter(LOGGING_FORMAT))
        logger = logging.getLogger(appname)
        logger.addHandler(logHandler)
        logger.setLevel(CONFIG['wiring']['logging level'])
        return logger

    @property
    def gps(self):
        if not self._gps:
            self._gps = GpsReader()
            self._gps.setDaemon(True)
            self._gps.start()
        return self._gps

    def showgps(self):
        try:
            self.timer.call(self.gps_console_writer.write).every(5)
        except (KeyboardInterrupt, SystemExit):
            self.gps.running = False
            self.gps.join()

    def follow(self, waypoints):
        self.application_logger.info(
            '**************************************************************')
        self.application_logger.info(
            '*** Pi-Nav starting navigation: ' +
            datetime.datetime.now().strftime("%Y-%m-%d"))
        self.application_logger.info(
            '**************************************************************')
        self.self_test.run()
        self.rudder_servo.set_position(0)
        self.follower = Follower(self.exchange, waypoints,
                                 self.application_logger)
        self.event_source.start()

    def track(self):
        self.self_test.run()
        self.tracker.track(10)
Exemplo n.º 28
0
    logging.basicConfig(filename=log_path,
                        filemode='a',
                        format='%(asctime)s %(message)s')
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.DEBUG)
    console = logging.StreamHandler()
    console.setLevel(logging.DEBUG)
    logger.addHandler(console)

    # Get parameter file from arguments or from default source
    if args.config is not None:
        assert args.config.split(
            '.')[-1] == 'py', 'Config path must be a python script file'
        config_name = os.path.split(args.config)[-1].split('.')[0]
        params = imp.load_source(config_name, args.config).Parameters().params
    else:
        logger.info(
            'No custom config file specified. Using existing config file in {}'
            .format(dname))
        params = Parameters().params

    # Train model
    if args.train:
        trainer = Trainer(logger=logger, params=params)
        train_performance = trainer.train()

    # Run tractography using a trained model
    if args.track:
        tracker = Tracker(logger=logger, params=params)
        tractogram = tracker.track()
Exemplo n.º 29
0
class Wiring():
    def __init__(self,gps=False,servo_port=SERVO_PORT):
        # devices
        self._gps = gps
        self.windsensor = WindSensor(I2C(WINDSENSOR_I2C_ADDRESS))
        self.compass = Compass(I2C(COMPASS_I2C_ADDRESS),I2C(ACCELEROMETER_I2C_ADDRESS))
        self.red_led = GpioWriter(17,os)
        self.green_led = GpioWriter(18,os)

        # Navigation
        self.globe = Globe()
        self.timer = Timer()
        self.application_logger = self._rotating_logger(APPLICATION_NAME)
        self.position_logger = self._rotating_logger("position")
        self.exchange = Exchange(self.application_logger)
        self.timeshift = TimeShift(self.exchange,self.timer.time)
        self.event_source = EventSource(self.exchange,self.timer,self.application_logger,CONFIG['event source'])

        self.sensors = Sensors(self.gps,self.windsensor,self.compass,self.timer.time,self.exchange,self.position_logger,CONFIG['sensors'])
        self.gps_console_writer = GpsConsoleWriter(self.gps)
        self.rudder_servo = Servo(serial.Serial(servo_port),RUDDER_SERVO_CHANNEL,RUDDER_MIN_PULSE,RUDDER_MIN_ANGLE,RUDDER_MAX_PULSE,RUDDER_MAX_ANGLE)
        self.steerer = Steerer(self.rudder_servo,self.application_logger,CONFIG['steerer'])
        self.helm = Helm(self.exchange,self.sensors,self.steerer,self.application_logger,CONFIG['helm'])
        self.course_steerer = CourseSteerer(self.sensors,self.helm,self.timer,CONFIG['course steerer'])
        self.navigator = Navigator(self.sensors,self.globe,self.exchange,self.application_logger,CONFIG['navigator'])
        self.self_test = SelfTest(self.red_led,self.green_led,self.timer,self.rudder_servo,RUDDER_MIN_ANGLE,RUDDER_MAX_ANGLE)

        # Tracking
        self.tracking_logger = self._rotating_logger("track")
        self.tracking_sensors = Sensors(self.gps,self.windsensor,self.compass,self.timer.time,self.exchange,self.tracking_logger,CONFIG['sensors'])
        self.tracker = Tracker(self.tracking_logger,self.tracking_sensors,self.timer)

    def _rotating_logger(self,appname):
        logHandler = TimedRotatingFileHandler("/var/log/pi-nav/" + appname,when="midnight",backupCount=30)
        logHandler.setFormatter(logging.Formatter(LOGGING_FORMAT))
        logger = logging.getLogger(appname)
        logger.addHandler( logHandler )
        logger.setLevel( CONFIG['wiring']['logging level'])
        return logger

    @property
    def gps(self):
        if not self._gps:
            self._gps = GpsReader()
            self._gps.setDaemon(True)
            self._gps.start()
        return self._gps

    def showgps(self):
        try:
            self.timer.call(self.gps_console_writer.write).every(5)
        except (KeyboardInterrupt, SystemExit):
            self.gps.running = False
            self.gps.join()

    def follow(self,waypoints):
        self.application_logger.info('**************************************************************')
        self.application_logger.info('*** Pi-Nav starting navigation: ' + datetime.datetime.now().strftime("%Y-%m-%d"))
        self.application_logger.info('**************************************************************')
        self.self_test.run()
        self.rudder_servo.set_position(0)
        self.follower = Follower(self.exchange,waypoints,self.application_logger)
        self.event_source.start()

    def track(self):
        self.self_test.run()
        self.tracker.track(10)
Exemplo n.º 30
0
 def __init__(self, conn):
     self.conn = conn
     self.tracker = Tracker()
Exemplo n.º 31
0
class Detector(BaseDetector):
    def __init__(self):
        super(Detector,
              self).__init__(pathToCached='~/Samples/cached/Line11',
                             pathToSamples='~/Samples/samples/Line11')
        # self.sizeLower = 40
        # self.sizeUpper = 150
        self.sizeLower = 60
        self.sizeUpper = 200

        trackerArgs = {
            'upperBound': 300,
            'lowerBound': 220,
            'rightBound': 270,
            'leftBound': 30,
            'timeToDie': 1,
            'timeToLive': 0,
        }

        self.tracker = Tracker(**trackerArgs)

        self.guiMode = False
        self.averageColour = [0, 0, 0]
        self.averageSize = 0

    def transform(self, img):
        contrast = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)[:, :, 2]
        contrast = cv2.medianBlur(contrast, 13, 9)
        contrast = cv2.threshold(src=contrast,
                                 maxval=255,
                                 thresh=70,
                                 type=cv2.THRESH_BINARY)[1]
        return contrast

    def resize(self, img, ymin=150, ymax=1000, xmin=530, xmax=830):
        return img[ymin:ymax, xmin:xmax]

    def detect(self, img):
        hBefore, wBefore, _ = img.shape
        self.cacher.update(img)
        self.sampler.update(img)
        img = self.resize(img)
        origImg = np.copy(img)
        contrast = self.transform(img)
        rois, radii = DetectionUtils.houghDetect(contrast,
                                                 radiusMin=self.sizeLower,
                                                 radiusMax=self.sizeUpper)
        tracked, newRois = self.tracker.track(rois)
        self.numObjects = self.tracker.N

        if self.guiMode:
            for roi in rois:
                ImgUtils.drawRect(roi, img)
                detectedCentroid = ImgUtils.findRoiCentroid(roi)
                ImgUtils.drawCircle(detectedCentroid, img, colour=(255, 0, 0))
                ImgUtils.putText(coords=(roi[0] + 50, roi[1] + 50),
                                 text=str(roi[2] - roi[0]),
                                 img=img,
                                 colour=(255, 255, 0),
                                 fontSize=3)

            for objectId, centroid in tracked.items():
                ImgUtils.drawCircle((centroid[0], centroid[1]), img)
                ImgUtils.putText(coords=centroid,
                                 text=str(objectId % 1000),
                                 img=img,
                                 colour=(255, 0, 0))

        # out = []

        # for roi in newRois:
        # colour = self.colour(origImg[roi[1]:roi[3], roi[0]:roi[2]])
        # self.averageColour[0] += colour[0] self.averageColour[1] += colour[1] self.averageColour[2] += colour[2]
        # self.averageSize += roi[3]-roi[1]

        return img, contrast, []

    def detectDebug(self, feed):
        radiusMin = self.sizeLower
        radiusMax = self.sizeUpper
        img = self.transform(feed)

        circles = cv2.HoughCircles(img,
                                   cv2.HOUGH_GRADIENT,
                                   1,
                                   150,
                                   param1=101,
                                   param2=11,
                                   minRadius=radiusMin,
                                   maxRadius=radiusMax)
        dets = []
        if circles is not None:
            circles = np.uint16(np.around(circles))
            for i in circles[0, :]:
                center = (i[0], i[1])
                cv2.circle(img, center, 1, (0, 100, 100), 3)
                radius = i[2]
                if radiusMin < radius < radiusMax:
                    cv2.circle(img, center, radius, (255, 0, 255), 3)
                    dets.append(ImgUtils.findBoxAroundCircle(center, radius))
        return dets
Exemplo n.º 32
0
import cv2
import numpy as np
from track import Tracker, Object
import requests
import json

nTracker = Tracker()

def post_to_frontend(payload):
    url = "http://0.0.0.0:5000/push_to_queue"
    headers = {"Content-Type":"application/json"}
    response = requests.request("POST", url, headers=headers, data=payload)
    return response


def process(img):
    img = cv2.resize(img, dsize= (0,0), fx=.5, fy=.5)
    return img

if __name__ == "__main__":
    
    # Read YOLO pretrain weights
    net = cv2.dnn.readNet("yolo/yolov3.weights", "yolo/yolov3.cfg")
    
    classes = []
    with open("yolo/coco.names", "r") as f:
        classes = [line.strip() for line in f.readlines()]
    layer_names = net.getLayerNames()
    output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
    # Demo output:
    output = cv2.VideoWriter("output.avi", cv2.VideoWriter_fourcc(*'XVID'), 20,
Exemplo n.º 33
0
class Detector(BaseDetector):
    def __init__(self, ):
        super(Detector,
              self).__init__(pathToCached='~/Samples/cached/Line30',
                             pathToSamples='~/Samples/samples/Line30')

        trackerArgs = {
            'upperBound': 200,
            'lowerBound': 70,
            'rightBound': 9999,
            'leftBound': -9999,
            'timeToDie': 5,
            'timeToLive': 3,
        }
        self.tracker = Tracker(**trackerArgs)
        self.sizeLower = 0
        self.sizeUpper = 0

    def transform(self, img):
        contrast = cv2.inRange(img, lowerb=(0, 0, 0), upperb=(220, 180, 180))
        cv2.bitwise_not(src=contrast, dst=contrast)
        kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 15))
        cv2.morphologyEx(src=contrast,
                         dst=contrast,
                         op=cv2.MORPH_ERODE,
                         kernel=kernel,
                         iterations=1)
        cv2.medianBlur(src=contrast, dst=contrast, ksize=11)
        return contrast

    def resize(self, img):
        return cv2.resize(img, fx=0.5, fy=0.5, dsize=(0, 0))

    def detect(self, img):
        self.cacher.update(img)
        self.sampler.update(img)
        img = self.resize(img)

        contrast = self.transform(np.copy(img))
        contours, h = cv2.findContours(contrast, cv2.RETR_EXTERNAL,
                                       cv2.CHAIN_APPROX_SIMPLE)
        rois = []
        for c in contours:
            approx = cv2.approxPolyDP(c, 0.01 * cv2.arcLength(c, True), True)
            x, y, w, h = cv2.boundingRect(c)
            if len(approx) < 1 or w < 100 or h < 40:
                continue
            elif h > 140:
                x1 = x
                x2 = x1 + w
                y1 = y
                y2 = y1 + int(h / 2)
                rois.append([x1, y1, x2, y2])

                rois.append([x1, y2, x2, y2 + int(h / 2)])
            else:
                x1 = x
                x2 = x1 + w
                y1 = y
                y2 = y1 + h
                rois.append([x1, y1, x2, y2])
            # if y1 < 250 or x2 < 100:
            #     continue

        tracked, newRois = self.tracker.track(rois)
        self.numObjects = self.tracker.N

        for roi in rois:
            ImgUtils.drawRect(roi, img)
            detectedCentroid = ImgUtils.findRoiCentroid(roi)
            ImgUtils.drawCircle(detectedCentroid, img, colour=(255, 0, 0))
            ImgUtils.putText(coords=(roi[0] + 50, roi[1] + 50),
                             text=str(roi[2] - roi[0]),
                             img=img,
                             colour=(255, 255, 0),
                             fontSize=3)

        for objectId, centroid in tracked.items():
            ImgUtils.drawCircle((centroid[0], centroid[1]), img)
            ImgUtils.putText(coords=centroid,
                             text=str(objectId % 1000),
                             img=img,
                             colour=(0, 255, 0))

        # for roi in newRois:
        #     print(roi[3]-roi[1], roi[2]-roi[0])

        return img, contrast, []