Ejemplo n.º 1
0
def execute():
	mongo = Mongo('ztis', 'ztis-test')
	# mongo.cloneCollection('ztis-test')
	# mongo.removeNonEnglishArticles()
	# mongo.removeDuplicates()


	# heatMap = HeatMap(mongo.mapReduceLocations())
	# heatMap.setMap("map.png")

	mongo.collection = mongo.findCustom({"locations": {"$in": ["Poland", "PL"]}})

	heatMap = HeatMap(mongo.mapReduceLocations())
	heatMap.setMap("map2.png")
Ejemplo n.º 2
0
def execute():
    mongo = Mongo('ztis', 'ztis-test')
    # mongo.cloneCollection('ztis-test')
    # mongo.removeNonEnglishArticles()
    # mongo.removeDuplicates()

    # heatMap = HeatMap(mongo.mapReduceLocations())
    # heatMap.setMap("map.png")

    mongo.collection = mongo.findCustom(
        {"locations": {
            "$in": ["Poland", "PL"]
        }})

    heatMap = HeatMap(mongo.mapReduceLocations())
    heatMap.setMap("map2.png")
Ejemplo n.º 3
0
Archivo: plot.py Proyecto: sshi27/plot
    def work(data):
      if isinstance(data, str):
        try:
          data = rapidjson.loads(data)
        except Exception as e1:
          try:
            data = ast.literal_eval(data)
          except Exception as e2:
            print(e1)
            print(e2)
            raise Exception("Please input a valid json or python object string")

      if not isinstance(data, dict):
        raise Exception("Please input a valid json or python object string, or an object")

      plt.rc('text', usetex=data.get('usetex', False))

      type = data.get('type', None)
      if type == 'bar':
        ParallelBars().draw(data, fig, ax)
      elif type == 'line':
        MultipleLines().draw(data, fig, ax)
      elif type == 'cdf':
        Cdf().draw(data, fig, ax)
      elif type == 'annotated_bar':
        AnnotatedBars().draw(data, fig, ax)
      elif type == 'violin':
        Violin().draw(data, fig, ax)
      elif type == 'heatmap':
        HeatMap().draw(data, fig, ax)
      else:
        raise Exception("Please specify type in json. Supported: bar, line, cdf")
Ejemplo n.º 4
0
 def process_batter(self, batterId, px, pz, pitchType, pitchResult,
                    paResult, hand):
     try:
         batterId = int(batterId)
         px, pz = float(px), float(pz)
     except:
         return
     if batterId in self.season:
         self.season[batterId].process_pitch(px, pz, pitchType, pitchResult,
                                             paResult, hand)
     else:
         self.season[batterId] = HeatMap()
         self.season[batterId].process_pitch(px, pz, pitchType, pitchResult,
                                             paResult, hand)
Ejemplo n.º 5
0
class Heatmap_unittest(unittest.TestCase):
    def setUp(self):
        self.rows = load(open(test_data, 'r'))
        self.heatmap = HeatMap()
        for idx in range(5):
            params = self.rows[idx]
            self.heatmap.process_pitch(*params)

    def test_keys(self):
        """ Test keys are correct.
        """
        keys = self.heatmap.maps['R'].keys()
        key_answer = ['SI', 'SL']
        self.assertItemsEqual(key_answer, keys)

    def test_add(self):
        """ Test incremental adding
        """
        # Out recorded
        self.heatmap.process_pitch(*self.rows[12])
        first = self.heatmap.maps['R']['SI'][5, 2, 3]
        self.assertEqual(first, 1)
        # Add same pitch
        self.heatmap.process_pitch(*self.rows[12])
        second = self.heatmap.maps['R']['SI'][5, 2, 3]
        self.assertEqual(second, 2)

    def test_prob(self):
        foul = self.heatmap.prob_foul(0.647, 2.325, 'SL', 'R')
        answer = 1.0
        self.assertEqual(foul, answer)
        # Add new pitch, should increment swings, changing probability
        self.heatmap.process_pitch(0.647, 2.325, 'SL', 'SS', '', 'R')
        foul = self.heatmap.prob_foul(0.647, 2.325, 'SL', 'R')
        answer = 0.5
        self.assertEqual(foul, answer)
Ejemplo n.º 6
0
def process_frame(img):
    car_boxes = find_cars(img, clf, scaler, parameter)

    from heatmap import HeatMap
    heatmap = HeatMap(threshold=3)
    heatmap.add_heat(car_boxes)
    heatmap.apply_threshold()

    from scipy.ndimage.measurements import label
    labels = label(heatmap.get_headmap())

    label_box_img = draw_labeled_bboxes(np.copy(img), labels)

    return label_box_img
test_images = list(
    map(lambda img: read_image(img), glob.glob('./test_images/*.jpg')))

model_file = './data/model.p'
print('Loading classifier model from file', model_file)
clf, scaler = load_model(model_file)
parameter = FeatureParameter()

box_imgs = []
for img in test_images:
    car_boxes = find_cars(img, clf, scaler, parameter)

    car_boxes_img = draw_cars(img, car_boxes)
    box_imgs.append(car_boxes_img)

    from heatmap import HeatMap

    heatmap = HeatMap(threshold=2)
    heatmap.add_heat(car_boxes)
    heatmap.apply_threshold()
    heatmap_img = heatmap.get_headmap()

    from scipy.ndimage.measurements import label
    labels = label(heatmap_img)

    box_imgs.append(heatmap_img)
    label_box_img = draw_labeled_bboxes(np.copy(img), labels)
    box_imgs.append(label_box_img)

plot_images(box_imgs)
Ejemplo n.º 8
0
# The extractor gets the data
toilet_extr = ToiletExtractor(
    './data/oeffentlichetoilettenmuenchen2016-06-28.csv', 'latitude',
    'longitude')

# The renderer deals with visual aspects of the map
renderer = DefaultRenderer(center=marienplatz,
                           zoom=12,
                           opacity=0.35,
                           color_scale=custom_color_scale,
                           tiles='cartodbdark_matter')

# Create a new heatmap
h_map = HeatMap(None,
                geo_json,
                filename='toilet',
                square_size=500,
                num_threads=100,
                load_intermediate_results=True)
h_map.generate(toilet_extr.get_value)
h_map.normalize()
# Generate the polygon areas within 1km of a public toilet
h_map.generate_polygon(lambda v: v.get('lin_value', 999) < 1.0,
                       dash_array=[5, 5],
                       color='#0ef',
                       opacity=0.5,
                       weight=2)
# Render and save
h_map.render(renderer, before_saving=toilet_extr.add_markers)
Ejemplo n.º 9
0
def reset():
    global clicks_hm
    global moves_hm
    clicks_hm = HeatMap(grid_resolution)
    moves_hm = HeatMap(grid_resolution)
Ejemplo n.º 10
0
from threading import Lock
from flask import session, request, copy_current_request_context

from datetime import datetime
import re

from heatmap import HeatMap
import json

grid_resolution = (50, 50)

app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app)
thread_lock = Lock()
clicks_hm = HeatMap(grid_resolution)
moves_hm = HeatMap(grid_resolution)


def bck():
    global clicks_hm
    global moves_hm
    global thread_lock

    while True:
        socketio.sleep(2)
        with thread_lock:
            c = clicks_hm()
            m = moves_hm()

            hms = [c.tolist(), m.tolist()]
Ejemplo n.º 11
0
    return contents


def bikable_zone(unit):
    if 'original_value' not in unit or unit.get('original_value') is None:
        return False
    return unit.get('original_value') > 1.5 and unit.get('bike_val') < 25


renderer = DefaultRenderer(center=home,
                           zoom=12,
                           opacity=0.5,
                           label=make_label,
                           color_scale=custom_color_scale)
h_map = HeatMap(home,
                geo_json,
                filename='bike_vs_ubahn',
                square_size=800,
                num_threads=100,
                load_intermediate_results=True)
h_map.generate(calc_time)
h_map.normalize(normalize_log2_scale)
h_map.generate_polygon(bikable_zone,
                       color='#0012b3',
                       opacity=0.6,
                       weight=5,
                       dash_array=[1, 6])
h_map.render(
    renderer,
    before_saving=lambda r, _: r.add_circle(home, color='#0012b3', radius=20))
Ejemplo n.º 12
0
    def update_heatmap(self):
        if self.heatmap is None:
            self.heatmap = HeatMap(self.cropped_image_size)

        self.heatmap.add_detections(self.detections)
        self.heatmap.update_map()
Ejemplo n.º 13
0
class VehicleDetector(object):
    def __init__(self,
                 save_false_positives=False,
                 use_multires_classifiers=True,
                 use_hires_classifier=False,
                 frame_skip=0):
        super().__init__()
        self.use_multires_classifiers = use_multires_classifiers and not use_hires_classifier
        self.use_hires_classifier = use_hires_classifier
        self.scale = 2
        self.crop_y_rel = np.array((0.55, 0.90))
        self.crop_y = None
        self.grid = None
        self.load_classifier()
        self.heatmap = None
        self.pool = Pool(8)
        self.save_false_positives = save_false_positives
        self.detected_cars = []
        self.hog_y_1 = None
        self.hog_y_2 = None
        self.annotate = True
        self.annotated_heatmap = None
        self.frame_count = 0
        self.frame_skip = frame_skip + 1

        if self.save_false_positives:
            self.false_positive_dir_name = "false_positives_%s" % Utils.date_file_name(
            ).split(".")[0]
            for size in (64, 32, 16):
                Utils.mkdir("%s/%d" % (self.false_positive_dir_name, size))

    def load_classifier(self):
        filename = "svc_multires.pickle" if not self.use_hires_classifier else "xgb_hires.pickle"

        with open(filename, "rb") as f:
            self.classifier, self.scaler = pickle.load(f)
            self.classifier_sizes = sorted(list(self.classifier.keys()),
                                           reverse=True)

    def process(self, frame):
        self.input_frame = frame
        self.output_frame = copy_img(frame)
        scaled_frame = scale_img(self.input_frame, 1 / self.scale)
        self.cropped_frame = self.crop_frame(scaled_frame)
        self.cropped_frame_yuv = bgr2yuv(self.cropped_frame)

        if self.frame_count % self.frame_skip == 0:
            self.initialize_scan()
            self.scan_edges()
            self.scan_vehicle_bboxes()
            #self.sliding_window()
            self.update_heatmap()
            self.update_car_detections()
        else:
            self.interpolate_car_detections()

        self.draw_detected_cars()
        self.draw_evaluated_windows()
        self.draw_detections()
        self.draw_bboxes()
        #self.draw_grid_on_cropped_img()
        #self.calc_test_hog()
        self.annotate_heatmap()

        self.frame_count += 1

        return self.output_frame

    def draw_grid_on_cropped_img(self):
        if self.grid == None:
            w, h = img_size(self.cropped_frame)
            self.grid = new_img((w, h))
            y = h // 2
            for i in range(0, 3):
                draw_line(self.grid, (0, y), (w, y),
                          color=cvcolor.white,
                          thickness=1,
                          antialias=False)
                y //= 2

            vanishing_point = (w // 2, h // 4 - h // 8)
            dw1 = w // 4
            dw2 = w // 2 + w // 4
            w11 = w // 2 - dw1
            w12 = w // 2 + dw1
            w21 = w // 2 - dw2
            w22 = w // 2 + dw2

            draw_line(self.grid, (w11, h),
                      vanishing_point,
                      color=cvcolor.white,
                      antialias=False)
            draw_line(self.grid, (w12, h),
                      vanishing_point,
                      color=cvcolor.white,
                      antialias=False)
            draw_line(self.grid, (w21, h),
                      vanishing_point,
                      color=cvcolor.white,
                      antialias=False)
            draw_line(self.grid, (w22, h),
                      vanishing_point,
                      color=cvcolor.white,
                      antialias=False)

            draw_line(self.grid,
                      self.left_edge[0].astype(np.int),
                      self.left_edge[1].astype(np.int),
                      color=cvcolor.pink,
                      antialias=False)

        if self.annotate:
            self.cropped_frame = blend_img(self.cropped_frame, self.grid, 0.25)

    def crop_frame(self, scaled_frame):
        w, h = img_size(scaled_frame)
        if self.crop_y == None:
            y1, y2 = (self.crop_y_rel * h)
            h_cropped = 64 * 4 // self.scale
            y1 = y2 - h_cropped
            self.crop_y = np.array((y1, y2), np.int)
            self.cropped_image_size = np.array((h_cropped, w))
            self.cropped_frame_rect = Rectangle(pos=(0, 0),
                                                size=(w, h_cropped))
            self.vanishing_point = Point(w // 2,
                                         h_cropped // 4 - h_cropped // 8)
            self.left_edge = [
                Point(0, h_cropped // 4 + h_cropped // 16),
                self.vanishing_point
            ]

        return scaled_frame[self.crop_y[0]:self.crop_y[1]]

    def initialize_scan(self):
        self.detections = []
        self.evaluated_windows = []
        self.false_positive_count = 0

    def sliding_window(self):
        for size, y1, y2, delta_y in ((48, 16, 16, 24), (32, 16, 16, 8),
                                      (24, 12, 12, 12), (16, 0, 0, 8)):
            result = self.sliding_window_impl(size, y1, y2, delta_y)
            for window_rect, i_score in result:
                self.detections.append((window_rect, i_score))

    def sliding_window_impl(self, window_size, y1, y2, delta_y):
        window_size = window_size * 4 // self.scale
        ppc = 16 * window_size // 64
        X = []
        window_positions = []
        y1 = y1 * 4 // self.scale
        y2 = y2 * 4 // self.scale
        delta_y = delta_y * 4 // self.scale

        y = y1
        while y <= y2:
            window_positions_row = self.sliding_window_horizontal(
                None, window_size, ppc, y)
            window_positions.extend(window_positions_row)
            y += delta_y

        return self.evaluate_windows(window_positions)

    def sliding_window_horizontal(self, hog_for_slice, window_size, ppc, y):
        h, w = self.cropped_image_size
        X = []
        delta_x = window_size // 4
        x = 0
        window_positions = []

        while x <= w - window_size:
            window_positions.append(Rectangle(pos=(x, y), size=window_size))
            x += delta_x

        return window_positions

    def evaluate_window(self, window):
        window_size = window.height()
        window_yuv = self.cropped_frame_yuv[int(window.y1):int(window.y2),
                                            int(window.x1):int(window.x2)]
        X = np.array(extract_features(window_yuv, window_size))
        normalized_feature_vector = self.scaler[window_size].transform(X)
        return self.classifier[window_size].predict(
            normalized_feature_vector)[0]

    def evaluate_windows_of_size(self, windows, window_size):
        X = []
        for w in windows:
            window_yuv = self.cropped_frame_yuv[int(w.y1):int(w.y2),
                                                int(w.x1):int(w.x2)]
            if w.width() != window_size or w.height() != window_size:
                window_yuv = cv2.resize(window_yuv, (window_size, window_size))

            ppc = 8 if self.use_hires_classifier else 16
            X.append(extract_features(window_yuv, window_size, ppc=ppc))
        X = np.array(X)

        windows = np.array(windows)
        normalized_feature_vector = self.scaler[window_size].transform(X)
        score = self.classifier[window_size].predict(normalized_feature_vector)
        pos_window_indexes = np.where(score == 1.0)[0]
        pos_windows = windows[pos_window_indexes]
        pos_window_scores = score[pos_window_indexes]

        result = []
        self.evaluated_windows.extend(windows)
        for r, score in zip(pos_windows, pos_window_scores):
            result.append((r, score))
            if self.save_false_positives and self.is_false_positive_candidate(
                    r):
                window_img = crop_img(self.cropped_frame, r.x1, r.y1, r.x2,
                                      r.y2)
                save_img(
                    window_img, "%s/%d/%04d-%04d" %
                    (self.false_positive_dir_name, window_size,
                     self.frame_count, self.false_positive_count))
                self.false_positive_count += 1

        return result

    def evaluate_windows(self, windows):
        result = []
        if self.use_multires_classifiers:
            for ws in self.classifier_sizes:
                windows_of_size = [w for w in windows if int(w.height()) == ws]
                if not windows_of_size:
                    continue
                result.extend(
                    self.evaluate_windows_of_size(windows_of_size, ws))

            other_windows = [
                w for w in windows if not w.height() in self.classifier_sizes
            ]
            if other_windows:
                result.extend(self.evaluate_windows_of_size(other_windows, 64))

        else:
            result.extend(self.evaluate_windows_of_size(windows, 64))
        return result

    def scan_edges(self):
        h, w = self.cropped_image_size
        windows = self.left_edge_windows()
        windows.extend([r.mirror_x(w // 2) for r in windows])
        windows.extend(self.top_edge_windows())
        result = self.evaluate_windows(windows)
        for window_rect, i_score in result:
            self.detections.append((window_rect, i_score))

    def left_edge_windows(self):
        result = []
        h, w = self.cropped_image_size
        frame_rect = Rectangle(pos=(0, 0), size=(w, h))

        for width, height, x, y in (48, 48, 0,
                                    16), (32, 32, 0,
                                          0), (32, 32, 8,
                                               0), (32, 32, 16,
                                                    0), (32, 32, 0,
                                                         8), (32, 32, 8,
                                                              8), (32, 32, 16,
                                                                   8):
            window = Rectangle(pos=(x, y),
                               size=(width, height)) * 4 // self.scale
            assert frame_rect.contains(window)
            result.append(window)
        return result

    def top_edge_windows(self):
        h, w = self.cropped_image_size
        frame_rect = Rectangle(pos=(0, 0), size=(w, h))
        ws = 16 * 4 // self.scale
        left_edge_ws = 48 * 4 // self.scale
        result = []
        x1, x2 = left_edge_ws, w - ws - left_edge_ws
        x, y = x1, 4 * 4 // self.scale
        while x <= x2:
            window = Rectangle(pos=(x, y), size=ws)
            assert frame_rect.contains(window)
            result.append(window)
            x += ws / 2

        return result

    def window_size_for_rect(self, rect):
        for ws in reversed(self.classifier_sizes):
            if ws >= 0.65 * rect.height():
                return ws
        return 64

    def scan_vehicle_bboxes(self):
        h, w = self.cropped_image_size
        windows = []
        for d in self.detected_cars:
            d_rect = (d.current_rect() // self.scale).translate(
                (0, -self.crop_y[0]))

            #if d_rect.aspect_ratio() >= 0.75:
            #    windows.append(d_rect.intersect(self.cropped_frame_rect))

            ws = self.window_size_for_rect(d_rect)
            dx = ws // 4
            dy = ws // 4

            pos = d_rect.center()
            rect_w, rect_h = d_rect.size()
            rect_w = max(rect_w, ws)
            rect_h = max(rect_h, ws)
            w_rect = Rectangle(center=pos, size=(rect_w, rect_h))
            w_rect = w_rect.expand(2 * dx,
                                   dy // 2).intersect(self.cropped_frame_rect)

            y = w_rect.y1
            while y <= w_rect.y2 - ws:
                x = w_rect.x1
                while x <= w_rect.x2 - ws:
                    r = Rectangle(pos=(x, y), size=ws)
                    windows.append(r)
                    x += dx
                y += dy

        if windows:
            result = self.evaluate_windows(windows)
            for window_rect, i_score in result:
                self.detections.append((window_rect, i_score))

    def is_false_positive_candidate(self, window_rect):
        w, h = img_size(self.input_frame)
        r = self.transform_rect(window_rect)

        if self.frame_count < 125:
            return True
        elif r.x2 < w - w // 3:
            return True
        elif any(
                map(lambda d: d.current_rect().intersects(r),
                    self.detected_cars)):
            return False
        elif self.frame_count < 150 and r.x1 > w * 3 // 4:
            return False
        elif self.frame_count > 675 and self.frame_count < 700 and r.x1 > w * 3 // 4:
            return False
        return True

    def draw_detections(self):
        if self.annotate:
            self.detections_frame = copy_img(self.cropped_frame)
            for r, _ in self.detections:
                offset = Point(0, self.crop_y[0])
                draw_rectangle(self.output_frame,
                               r.translate(offset) * self.scale,
                               color=cvcolor.light_blue)
                draw_rectangle(self.detections_frame, r, color=cvcolor.pink)

    def draw_evaluated_windows(self):
        if self.annotate:
            self.sliding_windows_frame = copy_img(self.cropped_frame)
            for w in self.evaluated_windows:
                offset = Point(0, self.crop_y[0])
                draw_rectangle(self.output_frame,
                               w.translate(offset) * self.scale,
                               color=cvcolor.gray70)
                draw_rectangle(self.sliding_windows_frame,
                               w,
                               color=cvcolor.gray70)

    def draw_bboxes(self):
        if self.annotate:
            self.annotated_detected_cars = copy_img(self.cropped_frame)
            for r in map(self.transform_rect, self.heatmap.get_bboxes()):
                draw_rectangle(self.output_frame, r, color=cvcolor.green)

        for d in self.detected_cars:
            if not d.is_real:
                continue
            r = d.current_rect()
            draw_rectangle(self.output_frame,
                           r,
                           color=cvcolor.orange,
                           thickness=2)

            if self.annotate:
                r1 = (r // self.scale).translate(Point(0, -self.crop_y[0]))
                draw_rectangle(self.annotated_detected_cars,
                               r1,
                               color=cvcolor.orange,
                               thickness=2)

    def draw_detected_cars(self):
        i = 0
        for d in self.detected_cars:
            r = d.current_rect()
            if not d.is_real or r.width() <= 4 or r.height() <= 4:
                continue
            car_img = crop_img(self.input_frame, r.x1, r.y1, r.x2, r.y2)
            size, margin = 128, 32
            car_img = cv2.resize(car_img, (size, size))
            paste_img(self.output_frame, car_img,
                      (margin, (size + margin) * i + margin))
            i += 1

    def update_heatmap(self):
        if self.heatmap is None:
            self.heatmap = HeatMap(self.cropped_image_size)

        self.heatmap.add_detections(self.detections)
        self.heatmap.update_map()

    def transform_rect(self, rect):
        offset = Point(0, self.crop_y[0])
        return rect.translate(offset) * self.scale

    def update_car_detections(self):
        rect_list = [self.transform_rect(r) for r in self.heatmap.get_bboxes()]
        [d.tick() for d in self.detected_cars]

        # sort existing car detections by area: bigger rectangles represent closer cars
        self.detected_cars = sorted(self.detected_cars,
                                    key=lambda d: d.current_area(),
                                    reverse=True)

        for d in self.detected_cars:
            rect_list = d.update(rect_list)

        # remaining rectangles are newly detected vehicles:
        for r in rect_list:
            self.detected_cars.append(VehicleDetection(r, self.frame_skip))

        # remove old detections
        self.detected_cars = [d for d in self.detected_cars if d.is_alive()]

    def interpolate_car_detections(self):
        [d.interpolate() for d in self.detected_cars]

    def annotate_heatmap(self):
        heatmap = (self.heatmap.map * 16).astype(np.uint8)
        heatmap = expand_channel(heatmap)
        self.annotated_heatmap = blend_img(self.cropped_frame,
                                           heatmap,
                                           1.0,
                                           beta=0.5)
        thresholded_heatmap = (self.heatmap.thresholded_map * 255).astype(
            np.uint8)
        thresholded_heatmap = expand_channel(thresholded_heatmap)
        self.annotated_thresholded_heatmap = blend_img(self.cropped_frame,
                                                       thresholded_heatmap,
                                                       0.5,
                                                       beta=0.5)

    def calc_test_hog(self):
        import skimage.feature

        def my_hog(img):
            return skimage.feature.hog(img,
                                       orientations=9,
                                       pixels_per_cell=(ppc, ppc),
                                       cells_per_block=(2, 2),
                                       visualise=True,
                                       transform_sqrt=False,
                                       feature_vector=False,
                                       normalise=None)[1]

        ppc = 16
        ws = 64
        w, h = img_size(self.cropped_frame)
        self.hog_y_1 = my_hog(self.cropped_frame_yuv[:, :, 0])
        self.hog_y_2 = np.zeros_like(self.hog_y_1)
        delta_ij = 3
        for y in range(0, h, ws):
            for x in range(0, w, ws):
                hog = my_hog(self.cropped_frame_yuv[y:y + ws, x:x + ws, 0])
                self.hog_y_2[y:y + ws, x:x + ws] = hog

        self.hog_y_12 = (np.abs(self.hog_y_1 - self.hog_y_2) * 128.0).astype(
            np.uint8)
        self.hog_y_1 = (self.hog_y_1 * 32).astype(np.uint8)
        self.hog_y_2 = (self.hog_y_2 * 32).astype(np.uint8)
Ejemplo n.º 14
0
 def setUp(self):
     self.rows = load(open(test_data, 'r'))
     self.heatmap = HeatMap()
     for idx in range(5):
         params = self.rows[idx]
         self.heatmap.process_pitch(*params)
Ejemplo n.º 15
0
        loss_value = loss_fn(label, logits)

        lastc = model.last_conv_value

    guide = loss_tape.gradient(loss_value, image)
    grads = logits_tape.gradient(logits, lastc)

    GAP_pool = tf.keras.layers.AveragePooling2D(
        (lastc.shape[1], lastc.shape[2]), padding='valid',
        strides=(1, 1))(grads)

    grad_c = tf.zeros(lastc.shape[1:3], tf.float32)
    for idx in range(0, GAP_pool.shape[3]):
        grad_c = tf.nn.relu(grad_c +
                            lastc[0, :, :, idx] * GAP_pool[0, :, :, idx])

grad_cam, heatmap = HeatMap(grad_c, guide, dims=2)

#show with heatmap
image = image.numpy() * 255  #rescale to original
image = np.squeeze(np.uint8(image))

RGB_img = cv.cvtColor(image, cv.COLOR_GRAY2BGR)  #convert to "RGB" (size)

heatmap_img = cv.applyColorMap(np.uint8(heatmap), cv.COLORMAP_JET)

fin = cv.addWeighted(heatmap_img, 0.7, RGB_img, 0.3, 0)

plt.imshow(fin)
#cv.imshow('image_w_heatmap', fin)
Ejemplo n.º 16
0
def video_stream():

    # construct the argument parse and parse the arguments
    # ap = argparse.ArgumentParser()
    # ap.add_argument("-p", "--shape-predictor", required=True,
    #	help="path to facial landmark predictor")
    # ap.add_argument("-r", "--picamera", type=int, default=-1,
    #	help="whether or not the Raspberry Pi camera should be used")
    # args = vars(ap.parse_args())

    # initialize the video stream and allow the cammera sensor to warmup
    global sup_esq, sup_dir, inf_esq, inf_dir, vitrine_larg_altu, divisao_colum_row, comecar_leitura_unica

    sup_esq = (330, 10)
    sup_dir = (-330, 10)
    inf_esq = (330, -170)
    inf_dir = (-330, -170)
    vitrine_larg_altu = None
    comecar_leitura_unica = False

    print("[INFO] Preparando a câmera...")
    cap = cv2.VideoCapture(1)

    set_resolution_480(cap)

    time.sleep(1.0)

    land_mark = LandMark()
    head_pose = HeadPose(cv2)
    heatmap_global = HeatMap("Global")
    heatmap_usuario = HeatMap("Instantaneo")

    # Loop de frames do video
    while True:
        # Captura o frame
        ret, frame = cap.read()

        key = cv2.waitKey(100) & 0xFF
        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break
        if key == ord("s"):
            heatmap_global.salve_map()
            heatmap_usuario.salve_map()
            print("[INFO] HeatMap salvo!!")

        if key == ord("r"):
            heatmap_usuario.reset_map()
            # heatmap_global.reset_map()
            print("[INFO] HeatMap resetaqdo!!")

        if key == ord("b"):
            comecar_leitura_unica = True
            print("[INFO] Leitura de usuário começado!!")
        if key == ord("e"):
            comecar_leitura_unica = False
            print("[INFO] Leitura de usuário parado!!")

        if (head_pose.vitrine_points != None):
            if key == ord("7"):
                sup_esq = head_pose.vitrine_points
                print("[INFO] Ponto superior esquerdo capturado!!")
            if key == ord("9"):
                sup_dir = head_pose.vitrine_points
                print("[INFO] Ponto superior direito capturado!!")
            if key == ord("1"):
                inf_esq = head_pose.vitrine_points
                print("[INFO] Ponto inferor esquerdo capturado!!")
            if key == ord("3"):
                inf_dir = head_pose.vitrine_points
                print("[INFO] Ponto inferior direito capturado!!")

        if key == ord("5"):
            sup_esq = None
            sup_dir = None
            inf_esq = None
            inf_dir = None
            vitrine_larg_altu = None
            print("[INFO] Calibração resetada!!")

        if key == ord("p"):
            print("[INFO] Sup Esquerda:")
            print(sup_esq)
            print("[INFO] Sup Direita:")
            print(sup_dir)
            print("[INFO] Inf Esquerda:")
            print(inf_esq)
            print("[INFO] Inf Direita:")
            print(inf_dir)
            print("[INFO] Largura ; Altura:")
            print(vitrine_larg_altu)

        if key == ord("c"):
            if (sup_esq != None and sup_dir != None and inf_esq != None
                    and inf_dir != None):
                vitrine_larg_altu = calibrar_vitrine(sup_esq, sup_dir, inf_esq,
                                                     inf_dir)
                divisao_colum_row = [0, 0]
                divisao_colum_row[0] = int(vitrine_larg_altu[0] /
                                           heatmap_global.width)
                divisao_colum_row[1] = int(vitrine_larg_altu[1] /
                                           heatmap_global.higth)
                print("[INFO] Calibração feita!!")

        if (None is frame):
            print("[ERROR] FALHA NA CAPTURA DO VIDEO!!")
            print("[ERROR] TENTANDO NOVAMENTE")
            continue

        frame = cv2.flip(frame, 1)

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        land_mark.set_frame(gray)
        head_pose.set_frame(gray)

        mapa = land_mark.get_land_mark()

        primeiro_elemento = True

        if (mapa.__len__() != 0):  # Encontrou alguns rostos

            # for face in land_mark.shape:  # Loop pelos rostos encontrados
            for face in mapa:  # Loop pelos rostos encontrados
                for (x, y) in face:
                    cv2.circle(frame, (int(x), int(y)), 2, (0, 0, 255), -1)

                points = head_pose.get_line_points(face)

                if (points == None):
                    continue

                cv2.arrowedLine(frame, points[0], points[1], (255, 0, 0), 2)

                # Adicionar regra de tempo para saber a partir de quanto tempo começa a contar
                if (head_pose.vitrine_points != None):

                    if (sup_esq != None and vitrine_larg_altu != None):
                        coordenada_heat = global2heat(sup_esq,
                                                      head_pose.vitrine_points)
                        heatmap_global.incrementa(coordenada_heat)

                        if (primeiro_elemento and comecar_leitura_unica):
                            primeiro_elemento = False
                            heatmap_usuario.incrementa(coordenada_heat)
        else:
            heatmap_usuario.reset_map()

        # show the frame
        cv2.imshow("Frame", frame)

    heatmap_global.salve_map()
    heatmap_usuario.salve_map()

    cv2.destroyAllWindows()
    cap.release()

    return
Ejemplo n.º 17
0
with open('./geo/muenchen.json', 'r') as fp:
    geo_json = json.load(fp)


def calc_time(pt1, pt2):
    """ Calculate the time to get somewhere by UBahn """
    t_ubahn = mvg.average_time_between(pt1, pt2, datetime.now())
    if not t_ubahn:
        return None
    value = int(t_ubahn / 60.0)
    # Since the value will be normalized, we save it also on a separate key to show it on the label
    return {'value': value, 'time': value}


renderer = DefaultRenderer(center=home,
                           zoom=12,
                           color_scale=color,
                           label=label)
h_map = HeatMap(home,
                geo_json,
                square_size=250,
                filename='mvg',
                load_intermediate_results=True)
h_map.generate(calc_time)
# Normalize on a log2 scale
h_map.normalize(lambda v, _1, _2: log2(v))
# Then again on a 0 to 1 range
h_map.normalize()
h_map.render(renderer)