Esempio n. 1
0
def draw_box(
        out_img: np.ndarray,
        box: Box,
        labels: Iterable[Tuple[str, Point]],
        color: Color = Color.red(),
        line_thickness: int = 2,
) -> np.ndarray:
    cv.rectangle(
        img=out_img,
        pt1=box.top_left,
        pt2=box.bottom_right,
        color=color.to_bgr(),
        thickness=line_thickness,
    )
    for text, translation in labels:
        text_loc: Point = translate_point(
            Point(box.top_left_x, box.bottom_right_y), translation)
        cv.putText(
            img=out_img,
            text=text,
            org=text_loc,
            fontFace=cv.FONT_HERSHEY_SIMPLEX,
            fontScale=0.5,
            color=Color.orange().to_bgr(),
            thickness=2,
        )

    return out_img
Esempio n. 2
0
def create_color(name):
    """Create color"""

    color = Color(name=name)
    db.session.add(color)
    db.session.commit()

    return color
Esempio n. 3
0
    def detect(
        self,
        img: np.ndarray,
        out_img: np.ndarray = None,
        color: Color = Color.yellow(),
        line_thickness: int = 2,
    ) -> np.ndarray:
        """
        Detect self.template in img and draw a box around it.

        :param img: The image to detect the template in
        :param out_img: The image to draw on (should be at least as large as img)
        :param color: The color of the bounding box
        :param line_thickness: The thickness of he bounding box line
        :return: out_img
        """

        # TODO: Make this scale invariant.
        #  see https://www.pyimagesearch.com/2015/01/26/multi-scale-template-matching-using-python-opencv/
        if out_img is None:
            out_img = img.copy()
        else:
            assert all(
                img_dim <= out_img_dim
                for img_dim, out_img_dim in zip(img.shape, out_img.shape))

        similarity_map: np.ndarray = self._compute_similarity_map(img)
        match_xy_indices: Iterable[Tuple[
            int, int]] = self._get_match_xy_indices(similarity_map)

        # For each match, draw the bounding box
        for x, y in match_xy_indices:
            top_left = x, y
            bottom_right = x + self.temp_w, y + self.temp_h
            cv.rectangle(
                img=out_img,
                pt1=top_left,
                pt2=bottom_right,
                color=color.to_bgr(),
                thickness=line_thickness,
            )

        return out_img
Esempio n. 4
0
def add_color_data(hex_code, color_name):
    """Assume hex_code is a 7-character string that's a hex code.
    Add it to the database."""

    color = Color(hex_code=hex_code.rstrip().lower(),
                      color_name=color_name.rstrip().lower())
    db.session.add(color)
    try:
        db.session.commit()
    except (Exception, exc.SQLAlchemyError, exc.InvalidRequestError, exc.IntegrityError) as e:
        print(hex_code + '\n' + str(e))
Esempio n. 5
0
def write_boxes(
        out_path: str,
        out_img: np.ndarray,
        boxes: Iterable[Box],
        labelss: Iterable[Iterable[Tuple[str, Point]]] = tuple(),
        color: Color = Color.red(),
        line_thickness: int = 2,
):
    for box, labels in zip_longest(boxes, labelss):
        draw_box(out_img, box, labels, color, line_thickness)

    cv.imwrite(filename=out_path, img=out_img)
Esempio n. 6
0
def load_colors():
    """Load colors/hex from css3 dict into database."""

    print "Color"

    for key, value in css3_hex_to_names.items():
        color_hex, color_name = key, value
        color = Color(color_hex=color_hex, color_name=color_name)

        db.session.add(color)

    db.session.commit()
Esempio n. 7
0
def load_color():
    "Load colors from colors into database"

    print "Colors"

    for row in open("seed_data/colors"):
        color_name = row.rstrip()

        color_name = Color(color=color_name)

        db.session.add(color_name)

    db.session.commit()
Esempio n. 8
0
    def __init__(self, view):
        self.view = view
        self.draw_mode = 'select'
        self.viewable_size = 512
        self.selected_shape = None
        self.draw_color = Color(0.0, 0.0, 0.0, 1.0)
        self.selected_draw_color = Color(0.0, 0.0, 0.0, 1.0)
        self.view.update_color_indicator(*self.draw_color.rgba())
        self.zoom_list = [.25, .5, 1.0, 2.0, 4.0]
        self.zoom_list.sort(reverse=True)
        self.zoom_level = 2
        self.set_v_scrollbar_size(512)
        self.set_h_scrollbar_size(512)
        self.camera = Camera(2048, 2048)
        self.threeD_mode = False
        self.img_mode = False
        self.t = None
        self.waiters = []
        self.current_image = None

        for w in QApplication.topLevelWidgets():
            if isinstance(w, QMainWindow):
                self.main_window = w
                break
def load_colors(color_filename):
    """Load colors from seed_data/generic_colors into database."""

    # Delete all rows in table, so if we need to run this a second time,
    # we won't be trying to add duplicate users
    Color.query.delete()

    #Read generic_colors file and insert data
    for row in open(color_filename):
        row = row.rstrip()
        color_id, color = row.split("|")

        colors = Color(color=color)

        # We need to add to the session or it won't ever be stored
        db.session.add(colors)

    # Once we're done, we should commit our work
    db.session.commit()

    #finished the function
    print("Colors inserted")
Esempio n. 10
0
    def track_faces(
        self,
        clip_dir: str,
        out_base_dir: str,
        draw_on_dir: str = None,
        detect_only: bool = False,
    ):
        # Setup
        # load image paths
        frames: List[os.DirEntry] = load_and_sort_dir(clip_dir)
        draw_on_frames: List[os.DirEntry] = load_and_sort_dir(draw_on_dir)
        assert len(draw_on_frames) in (0, len(frames))

        # create output directory
        out_dir: str = create_output_dir(out_base_dir)

        # initialize variables required for object tracking
        new_face_id: Iterator[int] = count(start=1)
        tracked_faces: Dict[int, TrackedFace] = {}

        # Iterate Through Video Frames
        for frame, draw_on_frame in zip_longest(frames, draw_on_frames):
            # load new frame
            img = cv.imread(frame.path)

            # load out_img
            out_img: np.ndarray = (img.copy() if draw_on_frame is None else
                                   cv.imread(draw_on_frame.path))

            # ensure out_img is at least as large as img
            assert len(img.shape) == len(out_img.shape) and all(
                out_dim >= in_dim
                for in_dim, out_dim in zip(img.shape, out_img.shape))

            detected_face_boxes: List[Box] = self.detect_face_boxes(img)

            # If tracking is disabled, draw the boxes and move to next frame
            if detect_only:
                write_boxes(
                    out_path=os.path.join(out_dir, frame.name),
                    out_img=out_img,
                    boxes=detected_face_boxes,
                )
                continue

            detected_faces: List[GenderedFace] = gender_faces(
                img=img,
                faces=[
                    self.recognize_face(img, detected_face_box)
                    for detected_face_box in detected_face_boxes
                ],
            )

            current_face_ids: Set[int] = set()
            lost_face_ids: Set[int] = set()

            # Iterate over the known (tracked) faces
            for tracked_face in tracked_faces.values():
                matched_detected_faces: List[GenderedFace] = [
                    detected_face for detected_face in detected_faces
                    if self.faces_match(tracked_face, detected_face)
                ]

                if not matched_detected_faces:
                    # Tracked face was not matched to and detected face
                    # Increment staleness since we didn't detect this face
                    tracked_face.staleness += 1
                    # Update tracker with img and get confidence
                    tracked_confidence: float = tracked_face.tracker.update(
                        img)
                    if (tracked_face.staleness < self.tracking_expiry
                            and tracked_confidence >= self.tracking_threshold):
                        # Assume face is still in frame but we failed to detect
                        # Update box with predicted location box
                        predicted_box: Box = Box.from_dlib_rect(
                            tracked_face.tracker.get_position())
                        tracked_face.box = predicted_box
                        current_face_ids.add(tracked_face.id_)
                    else:
                        # Assume face has left frame because either it is too stale or confidence is too low
                        if self.remember_identities:
                            # Set effectively infinite staleness to force tracker reset if face is found again later
                            tracked_face.staleness = sys.maxsize
                        else:
                            lost_face_ids.add(tracked_face.id_)
                    continue

                # Tracked face was matched to one or more detected faces
                # Multiple matches should rarely happen if faces in frame are distinct. We take closest to prev location
                # TODO: Handle same person multiple times in frame
                matched_detected_face = min(
                    matched_detected_faces,
                    key=lambda face: tracked_face.box.distance_to(face.box),
                )
                # Update tracked_face
                tracked_face.descriptor = matched_detected_face.descriptor
                tracked_face.shape = matched_detected_face.descriptor
                tracked_face.box = matched_detected_face.box
                if tracked_face.staleness >= self.tracking_expiry:
                    # Face was not present in last frame so reset tracker
                    tracked_face.tracker = dlib.correlation_tracker()
                    tracked_face.tracker.start_track(
                        image=img,
                        bounding_box=tracked_face.box.to_dlib_rect())
                else:
                    # Face was present in last frame so just update guess
                    tracked_face.tracker.update(
                        image=img, guess=tracked_face.box.to_dlib_rect())
                tracked_face.staleness = 0
                tracked_face.gender = matched_detected_face.gender
                tracked_face.gender_confidence = matched_detected_face.gender_confidence
                # Add tracked_face to current_ids to reflect that it is in the frame
                current_face_ids.add(tracked_face.id_)
                # remove matched_detected_face from detected_faces
                detected_faces.remove(matched_detected_face)

            # Delete all faces that were being tracked but are now lost
            # lost_face_ids will always be empty if self.remember_identities is True
            for id_ in lost_face_ids:
                del tracked_faces[id_]

            for new_face in detected_faces:
                # This is a new face (previously unseen)
                id_ = next(new_face_id)
                tracker: dlib.correlation_tracker = dlib.correlation_tracker()
                tracker.start_track(image=img,
                                    bounding_box=new_face.box.to_dlib_rect())
                tracked_faces[id_] = TrackedFace(
                    box=new_face.box,
                    descriptor=new_face.descriptor,
                    shape=new_face.shape,
                    id_=id_,
                    tracker=tracker,
                    gender=new_face.gender,
                    gender_confidence=new_face.gender_confidence,
                )
                current_face_ids.add(id_)

            write_boxes(
                out_path=os.path.join(out_dir, frame.name),
                out_img=out_img,
                boxes=[tracked_faces[id_].box for id_ in current_face_ids],
                labelss=[[
                    (
                        f'Person {id_}',
                        Point(3, 14),
                    ),
                    (
                        f'{tracked_faces[id_].gender.name[0].upper()}: {round(100 * tracked_faces[id_].gender_confidence, 1)}%',
                        Point(3, 30),
                    ),
                ] for id_ in current_face_ids],
                color=Color.yellow(),
            )

            print(
                f"Processed {frame.name}.  Currently tracking {len(tracked_faces)} faces"
            )
        return out_dir
Esempio n. 11
0
import cv2 as cv
import dlib
import numpy as np

from gender_clasification import gender_faces
from logo_detector import run_template_detector
from model import Color, Box, Point, Face, GenderedFace
from utils import create_output_dir, load_and_sort_dir, write_boxes

DETECTION_MODEL = "models/face_detection/res10_300x300_ssd_iter_140000.caffemodel"
DETECTION_MODEL_CONFIG = "models/face_detection/deploy.prototxt.txt"
RECOGNITION_MODEL = 'models/face_recognition/dlib_face_recognition_resnet_model_v1.dat'
SHAPE_MODEL = 'models/face_recognition/shape_predictor_5_face_landmarks.dat'
DETECTION_NETWORK_INPUT_SIZE = 300, 300
# Value from https://towardsdatascience.com/face-detection-models-which-to-use-and-why-d263e82c302c
BLOB_MEAN_SUBTRACTION: Color = Color(r=123, b=104, g=117)
"""
Challenge track robust to cuts
"""


@dataclass
class TrackedFace(GenderedFace):
    id_: int = None
    tracker: dlib.correlation_tracker = None
    staleness: int = 0


class FaceTracker:
    def __init__(
        self,
Esempio n. 12
0
class Controller():
    def __init__(self, view):
        self.view = view
        self.draw_mode = 'select'
        self.viewable_size = 512
        self.selected_shape = None
        self.draw_color = Color(0.0, 0.0, 0.0, 1.0)
        self.selected_draw_color = Color(0.0, 0.0, 0.0, 1.0)
        self.view.update_color_indicator(*self.draw_color.rgba())
        self.zoom_list = [.25, .5, 1.0, 2.0, 4.0]
        self.zoom_list.sort(reverse=True)
        self.zoom_level = 2
        self.set_v_scrollbar_size(512)
        self.set_h_scrollbar_size(512)
        self.camera = Camera(2048, 2048)
        self.threeD_mode = False
        self.img_mode = False
        self.t = None
        self.waiters = []
        self.current_image = None

        for w in QApplication.topLevelWidgets():
            if isinstance(w, QMainWindow):
                self.main_window = w
                break

    def close_down(self):
        if self.t:
            self.t.quit()

        for w in self.waiters:
            w.quit()
        return True

    def update_progress(self, v):
        self.main_window.update_progress.emit(v)

    def process_finished(self, img):
        if img:
            self.main_window.process_finished.emit(True)
            self.view.draw_image(img)
        else:
            self.main_window.process_finished.emit(False)
            self.view.draw_image()

    def color_button_hit(self, r, g, b, a):
        color = color = Color(r, g, b, a)
        if self.selected_shape:
            self.view.clear()
            self.selected_draw_color = color
            self.selected_shape.color = color
            self.view.update_color_indicator(*self.selected_shape.color.rgba())
            print 'selected color', self.selected_shape.color, id(self.selected_shape)
            self.view.canvas.updateGL()
        else:
            self.draw_color = color
            self.view.update_color_indicator(*self.draw_color.rgba())
            print 'draw color', self.draw_color

    def alpha_slider_changed(self, alpha):
        if self.selected_shape:
            self.selected_draw_color.a = alpha
            self.color_button_hit(*self.selected_draw_color.rgba())
        else:
            self.draw_color.a = alpha
            self.color_button_hit(*self.draw_color.rgba())

    def triangle_button_hit(self):
        self.view.clear_state()
        self.draw_mode = 'triangle'

    def square_button_hit(self):
        self.view.clear_state()
        self.draw_mode = 'square'

    def rectangle_button_hit(self):
        self.view.clear_state()
        self.draw_mode = 'rectangle'

    def circle_button_hit(self):
        self.view.clear_state()
        self.draw_mode = 'circle'

    def ellipse_button_hit(self):
        self.view.clear_state()
        self.draw_mode = 'ellipse'

    def line_button_hit(self):
        self.view.clear_state()
        self.draw_mode = 'line'

    def select_button_hit(self):
        self.view.clear_state()
        self.draw_mode = 'select'

    def zoomIn_button_hit(self):
        if self.zoom_in():
            self.update_zoom()
            zoom_width = (512/self.zoom_amount())/2
            newPos = min(self.h_scrollbar_position()+zoom_width, 2048)
            self.set_h_scrollbar_position(newPos)
            self.set_v_scrollbar_position(newPos)

    def zoomOut_button_hit(self):
        if self.zoom_out():
            self.update_zoom()
            zoom_width = (512/self.zoom_amount())/4
            newPos = max(self.h_scrollbar_position()-zoom_width, 0)
            self.set_h_scrollbar_position(newPos)
            self.set_v_scrollbar_position(newPos)

    def update_zoom(self):
        self.view.viewport.set_scale(self.zoom_amount())
        view_zoom = 512*1/self.zoom_amount()
        self.set_h_maximum(2048-view_zoom)
        self.set_v_maximum(2048-view_zoom)
        self.set_v_scrollbar_size(view_zoom)
        self.set_h_scrollbar_size(view_zoom)

    def set_h_maximum(self, m):
        self.view.parent().ui.horizontalScrollBar.setMaximum(m)

    def set_v_maximum(self, m):
        self.view.parent().ui.verticalScrollBar.setMaximum(m)

    def h_scrollbar_changed(self, value):
        self.view.viewport.set_offset(value, self.view.viewport.y)
        self.view.draw()
        self.view.canvas.updateGL()

    def v_scrollbar_changed(self, value):
        if self.img_mode:
            self.view.viewport.set_offset(self.view.viewport.x,
                                          self.view.parent().ui.horizontalScrollBar.maximum()-value)
        else:
            self.view.viewport.set_offset(self.view.viewport.x, value)
        self.view.draw()
        self.view.canvas.updateGL()

    def toggle_3D_model_display(self):
        self.threeD_mode = self.threeD_mode is False
        if self.threeD_mode:
            self.view.canvas.clear_color = (0, 0, 0, 0)
        else:
            self.view.canvas.clear_color = (1, 1, 1, 1)
        self.view.draw()
        self.view.canvas.updateGL()

    def key_pressed(self, event):
        if not self.threeD_mode:
            return

        key = event.key()
        move_amount = .5
        if key == Qt.Key_A:
            self.camera.move_left(move_amount)
        elif key == Qt.Key_D:
            self.camera.move_right(move_amount)
        elif key == Qt.Key_W:
            self.camera.move_forward(move_amount)
        elif key == Qt.Key_S:
            self.camera.move_backward(move_amount)
        elif key == Qt.Key_Q:
            self.camera.turn_left(move_amount+2)
        elif key == Qt.Key_E:
            self.camera.turn_right(move_amount+2)
        elif key == Qt.Key_R:
            self.camera.move_up(move_amount)
        elif key == Qt.Key_F:
            self.camera.move_down(move_amount)
        elif key == Qt.Key_H:
            self.camera.reset()

        self.camera.set_camera()
        self.view.canvas.updateGL()

    def do_edge_detection(self):
        img = QImage(self.view.image.qimage.width(), self.view.image.qimage.height(), QImage.Format_ARGB32)
        img_hor = QImage(self.view.image.qimage.width(), self.view.image.qimage.height(), QImage.Format_ARGB32)
        img_ver = QImage(self.view.image.qimage.width(), self.view.image.qimage.height(), QImage.Format_ARGB32)
        args = (img_hor, img_ver)
        limg = QImage(self.view.image.qimage.width(), self.view.image.qimage.height(), QImage.Format_ARGB32)

        # make luminence
        self.apply_filter('Applying luminence for edge detect',
                          limg, self.luminance_filter)

        self.apply_filter('Doing edge detect', img, self.edge_detect_filter,
                          args=args, read_image=limg)

    def edge_detect_filter(self, read_image, x, y, img_hor, img_ver):
        xconstants = [-1, 0, 1,
                      -2, 0, 1,
                      -1, 0, 1]

        xr, xg, xb = self.spacial_filter(read_image, x, y, xconstants)

        yconstants = [-1, -2, -1,
                      0,  0,  0,
                      1,  2,  1]

        yr, yg, yb = self.spacial_filter(read_image, x, y, yconstants)
        v = sqrt(xr**2 + yr**2)
        return self.clip(v, v, v)

    def luminance_filter(self, read_image, x, y):
        c = self.pixel_at(read_image, x, y)
        v = (c.red() + c.green() + c.blue())/3
        return v, v, v

    def do_sharpen(self):
        constants = (0, -1, 0,
                     -1, 6, -1,
                     0, -1, 0)
        args = (constants,)
        kwargs = {'scale': 2}
        img = QImage(self.view.image.qimage.width(), self.view.image.qimage.height(), QImage.Format_ARGB32)
        self.main_window.track_progress('Doing sharpen', img.height())
        self.apply_filter('Doing sharpen', img, self.spacial_filter, args=args, kwargs=kwargs)

    def do_median_blur(self):
        img = QImage(self.view.image.qimage.width(), self.view.image.qimage.height(), QImage.Format_ARGB32)
        self.apply_filter('Doing median blur', img, self.median_filter)

    def do_uniform_blur(self):
        constants = (1, 1, 1,
                     1, 1, 1,
                     1, 1, 1)
        args = (constants,)
        kwargs = {'scale': 9}
        img = QImage(self.view.image.qimage.width(), self.view.image.qimage.height(), QImage.Format_ARGB32)
        self.apply_filter('Doing uniform blur', img, self.spacial_filter, args, kwargs)

    def do_change_contrast(self, contrast_amount_num):
        print 'changing contrast by', contrast_amount_num
        img = QImage(self.view.image.qimage.width(), self.view.image.qimage.height(), QImage.Format_ARGB32)
        args = (float(contrast_amount_num),)
        self.apply_filter('Doing contrast change', img, self.contrast_operation, args, {})

    def do_change_brightness(self, brightness_amount_num):
        print 'changing brightness by', brightness_amount_num
        constants = [0, 0, 0,
                     0, 1, 0,
                     0, 0, 0]
        img = QImage(self.view.image.qimage.width(), self.view.image.qimage.height(), QImage.Format_ARGB32)
        args = (constants,)
        kwargs = {'offset': brightness_amount_num}
        self.apply_filter('Doing change brightness', img, self.spacial_filter, args, kwargs)

    def do_load_image(self, open_image):
        self.view.image = Image(None, open_image, Point(1080, 1080), open_image.width(), open_image.height())
        if self.img_mode:
            self.view.draw_image(open_image)

    def apply_filter(self, label, write_img, func, args=(), kwargs={}, read_image=None):
        read_image = read_image or self.view.image.qimage
        # print write_img, self.view.image.qimage, write_img == self.view.image.qimage
        t = Filter_Thread(self, label, range(write_img.height()),
                          read_image, write_img, func, args, kwargs)
        startnew = False
        if self.t:
            if self.t.isRunning():
                w = Waiter(self, t)
                self.main_window.process_finished.connect(w.wake_up)
                self.waiters.append(w)
            else:
                startnew = True
        else:
            startnew = True

        if startnew:
            self.t = t
            self.t.start()
        return write_img

    def spacial_filter(self, read_image, x, y, constants, scale=1, offset=0):
        rtot = 0
        gtot = 0
        btot = 0
        for i, e in enumerate(self.neighbors(read_image, x, y)):
            if e:
                rtot += e.red()*constants[i]
                gtot += e.green()*constants[i]
                btot += e.blue()*constants[i]

        rtot = rtot/scale+offset
        gtot = gtot/scale+offset
        btot = btot/scale+offset
        return self.clip(rtot, gtot, btot)

    def median_filter(self, read_image, x, y):
        r = []
        g = []
        b = []
        for n in self.neighbors(read_image, x, y):
            if n:
                r.append(n.red())
                g.append(n.green())
                b.append(n.blue())
        r.sort()
        g.sort()
        b.sort()
        return r[len(r)/2], g[len(g)/2], b[len(b)/2]

    def clip(self, rtot, gtot, btot):
        rtot = max(0,   rtot)
        rtot = min(255, rtot)
        gtot = max(0,   gtot)
        gtot = min(255, gtot)
        btot = max(0,   btot)
        btot = min(255, btot)
        return rtot, gtot, btot

    def contrast_operation(self, read_image, x, y, c):
        p = self.pixel_at(read_image, x, y)
        r = ((c+100.0)/100.0)**4*(p.red()-128)+128
        g = ((c+100.0)/100.0)**4*(p.green()-128)+128
        b = ((c+100.0)/100.0)**4*(p.blue()-128)+128
        return self.clip(r, g, b)

    def neighbors(self, read_image, x, y):
        nw = self.pixel_at(read_image, x-1, y+1)
        nn = self.pixel_at(read_image, x,   y+1)
        ne = self.pixel_at(read_image, x+1, y+1)
        ww = self.pixel_at(read_image, x-1, y)
        cc = self.pixel_at(read_image, x,   y)
        ee = self.pixel_at(read_image, x+1, y)
        sw = self.pixel_at(read_image, x-1, y-1)
        ss = self.pixel_at(read_image, x,   y-1)
        se = self.pixel_at(read_image, x+1, y-1)
        return nw, nn, ne, ww, cc, ee, sw, ss, se

    def pixel_at(self, read_image, x, y):
        # if self.view.image.qimage.valid(x, y):
        if (x >= 0) and (x < self.view.image.fullw) and (y >= 0) and (y < self.view.image.fullh):
            return QColor(read_image.pixel(x, y))
        else:
            return None

    def toggle_background_display(self):
        self.img_mode = self.img_mode is False
        if self.img_mode:
            self.view.canvas.hide()
            self.view.draw_image()
        else:
            self.view.canvas.show()
            self.view.canvas.updateGL()

    # scroll bar interaction

    def v_scrollbar_size(self):
        return self.view.parent().ui.verticalScrollBar.pageStep()

    def h_scrollbar_size(self):
        return self.view.parent().ui.horizontalScrollBar.pageStep()

    def v_scrollbar_position(self):
        return self.view.parent().ui.verticalScrollBar.sliderPosition()

    def h_scrollbar_position(self):
        return self.view.parent().ui.horizontalScrollBar.sliderPosition()

    def set_v_scrollbar_size(self, size):
        self.view.parent().ui.verticalScrollBar.setPageStep(size)

    def set_h_scrollbar_size(self, size):
            self.view.parent().ui.horizontalScrollBar.setPageStep(size)

    def set_v_scrollbar_position(self, position):
        self.view.parent().ui.verticalScrollBar.setSliderPosition(position)
        self.v_scrollbar_changed(position)

    def set_h_scrollbar_position(self, position):
        self.view.parent().ui.horizontalScrollBar.setSliderPosition(position)
        self.h_scrollbar_changed(position)

    def zoom_amount(self):
        return self.zoom_list[self.zoom_level]

    def zoom_in(self):
        if self.zoom_level > 0:
            self.zoom_level -= 1
            return True
        else:
            return False

    def zoom_out(self):
        if self.zoom_level < len(self.zoom_list)-1:
            self.zoom_level += 1
            return True
        else:
            return False

    def house_lines(self):
        ret = []
        for i in range(-64, 64, 8):
            lines = house_lines(offset=(i, 0, 0))
            ret_lines = []
            for l in lines:
                ret_lines.append([self.camera.to_camera(*l[0]), self.camera.to_camera(*l[1])])
            ret.append(ret_lines)
        return ret