コード例 #1
0
ファイル: yolo3.py プロジェクト: rasql/opencv-tutorial
def trackbar(x):
    global img
    conf = x / 100
    img = img0.copy()
    post_process(img, outputs, conf)
    cv.displayOverlay('window', f'confidence level={conf}')
    cv.imshow('window', img)
コード例 #2
0
def load_image(path):
    global img, img0, outputs, ln

    img0 = cv.imread(path)
    img = img0.copy()

    blob = cv.dnn.blobFromImage(img,
                                1 / 255.0, (416, 416),
                                swapRB=True,
                                crop=False)

    net.setInput(blob)
    t0 = time.time()
    outputs = net.forward(ln)
    t = time.time() - t0

    # combine the 3 output groups into 1 (10647, 85)
    # large objects (507, 85)
    # medium objects (2028, 85)
    # small objects (8112, 85)
    outputs = np.vstack(outputs)

    post_process(img, outputs, 0.5)
    cv.imshow('window', img)
    cv.displayOverlay('window', f'forward propagation time={t:.3}')
    cv.waitKey(0)
コード例 #3
0
ファイル: record_games.py プロジェクト: ironbar/orangutan
def _update_window(info, level_idx, n_steps):
    frame, speed, previous_action, reward = _unpack_info(info)

    cv2.imshow('img', _add_hud(frame[:, :, [2, 1, 0]]))
    msg = 'Games played: %i     n_steps: %i   Reward: %.2f   Speed: %s Text observations: %s' % (
        level_idx, n_steps, reward, speed, info.text_observations[0])
    cv2.displayOverlay('img', msg)
コード例 #4
0
def trackbar(x):
    ret, img1 = cv.threshold(img, x, 255, cv.THRESH_BINARY)
    ret, img2 = cv.threshold(img, x, 255, cv.THRESH_BINARY_INV)
    cv.imshow('window', np.hstack([img, img1, img2]))

    text = f'threshold={x}, mode=BINARY, BINARY_INV'
    cv.displayOverlay('window', text, 1000)
コード例 #5
0
ファイル: line4.py プロジェクト: rasql/opencv-tutorial
def mouse(event, x, y, flags, param):
    if flags == 1:
        p1 = x, y
        cv.displayOverlay('window', f'p1=({x}, {y})')
        img[:] = 0
        cv.line(img, p0, p1, GREEN, 10)
        cv.imshow('window', img)
コード例 #6
0
ファイル: threshold2.py プロジェクト: rasql/opencv-tutorial
def trackbar(x):
    """Trackbar callback function."""
    text = f'threshold={x}, mode=TOZERO, TOZERO_INV'
    cv.displayOverlay('window', text, 1000)

    ret, img1 = cv.threshold(img, x, 255, cv.THRESH_TOZERO)
    ret, img2 = cv.threshold(img, x, 255, cv.THRESH_TOZERO_INV)
    cv.imshow('window', np.hstack([img, img1, img2]))
コード例 #7
0
ファイル: viewer_tool.py プロジェクト: cyoukaikai/ahc_ete
 def init_WithQT(self):
     try:
         cv2.namedWindow('Test')
         cv2.displayOverlay('Test', 'Test QT', 500)
         self.WITH_QT = True
     except cv2.error:
         print('-> Please ignore this error message\n')
     cv2.destroyAllWindows()
コード例 #8
0
def change_class_index(x):
    global class_index
    class_index = x
    cv2.displayOverlay(
        WINDOW_NAME, "Selected class "
        "" + str(class_index) + "/"
        "" + str(last_class_index) + ""
        "\n " + class_list[class_index], 3000)
コード例 #9
0
ファイル: trackbar_rgb.py プロジェクト: rasql/opencv-tutorial
def rgb(x):
    """Trackbar callback function."""
    r = cv.getTrackbarPos('red', 'window')
    g = cv.getTrackbarPos('green', 'window')
    b = cv.getTrackbarPos('blue', 'window')
    img[:] = [b, g, r]
    cv.displayOverlay('window', f'Red={r}, Green={g}, Blue={b}')
    cv.imshow('window', img)
コード例 #10
0
ファイル: record_games.py プロジェクト: ironbar/orangutan
def record_games(args):
    env = _create_environment(args.config_filepath)
    output_folder = _prepare_output_folder(args.config_filepath,
                                           args.output_path)

    info = env.reset()['Learner']
    level_idx = _get_initial_level_idx(output_folder)
    n_steps = 0
    cv2.namedWindow('img', cv2.WINDOW_NORMAL)
    cv2.namedWindow('map', cv2.WINDOW_NORMAL)
    level_storage = LevelStorage()

    while 1:
        _update_window(info, level_idx, n_steps)
        _show_map(info)
        action = _get_action_from_keyboard()
        if isinstance(action, str):
            if action == 'break':
                break
            elif action == 'reset':
                info = env.reset()['Learner']
                level_storage = LevelStorage()
                n_steps = 0
                _transition_between_levels()
                continue
            elif action == 'save':
                info = env.reset()['Learner']
                level_storage.save(
                    os.path.join(output_folder, '%05d.npz' % level_idx))
                level_storage = LevelStorage()
                level_idx += 1
                n_steps = 0
                _transition_between_levels()
                continue

        info_next = env.step(vector_action=action)['Learner']
        level_storage.add(info, info_next, action)
        info = info_next

        n_steps += 1
        is_level_ended = info.max_reached[0] or info.local_done[0]
        if is_level_ended:
            reward = _unpack_info(info)[-1]
            if reward > 0:
                level_storage.save(
                    os.path.join(output_folder, '%05d.npz' % level_idx))
                level_idx += 1
            else:
                msg = 'Not saving level because of negative reward'
                print(msg)
                cv2.displayOverlay('img', msg)
            level_storage = LevelStorage()
            n_steps = 0
            _transition_between_levels()

    cv2.destroyAllWindows()
    cv2.waitKey(1)
    env.close()
コード例 #11
0
def trackbar(x):
    n = 2 * x + 1
    kernel = np.ones((n, n), np.uint8)

    img1 = cv.morphologyEx(img, cv.MORPH_GRADIENT, kernel)
    cv.imshow('window', np.hstack([img, img1]))

    text = f'morphological gradiant, kernel={n}x{n}'
    cv.displayOverlay('window', text)
コード例 #12
0
def change_img_index(x):
    global img_index, img
    img_index = x
    img_path = image_list[img_index]
    img = cv2.imread(img_path)
    cv2.displayOverlay(
        WINDOW_NAME, "Showing image "
        "" + str(img_index) + "/"
        "" + str(last_img_index), 1000)
コード例 #13
0
ファイル: morph1.py プロジェクト: rasql/opencv-tutorial
def trackbar(x):
    n = 2 * x + 1
    kernel = np.ones((n, n), np.uint8)

    img1 = cv.erode(img, kernel, iterations=1)
    cv.imshow('window', np.hstack([img, img1]))

    text = f'erode, kernel={n}x{n}'
    cv.displayOverlay('window', text)
コード例 #14
0
ファイル: morph4.py プロジェクト: rasql/opencv-tutorial
def trackbar(x):
    n = 2 * x + 1
    kernel = np.ones((n, n), np.uint8)

    img1 = cv.morphologyEx(img, cv.MORPH_CLOSE, kernel)
    cv.imshow('window', np.hstack([img, img1]))

    text = f'close, kernel={n}x{n}'
    cv.displayOverlay('window', text)
コード例 #15
0
def draw(x):
    d = cv.getTrackbarPos('thickness', 'window')
    d = -1 if d == 0 else d
    i = cv.getTrackbarPos('color', 'window')
    color = colors[i]
    img[:] = img0
    cv.polylines(img, np.array([pts]), True, color, d)
    cv.imshow('window', img)
    text = f'color={color}, thickness={d}'
    cv.displayOverlay('window', text)
コード例 #16
0
ファイル: run.py プロジェクト: wangqiqi/OpenLabeling
def change_class_index(x):
    global class_index
    class_index = x
    if WITH_QT:
        cv2.displayOverlay(WINDOW_NAME, "Selected class "
                                "" + str(class_index) + "/"
                                "" + str(last_class_index) + ""
                                "\n " + class_list[class_index],3000)
    else:
        print("Selected class :" + class_list[class_index])
コード例 #17
0
ファイル: utils.py プロジェクト: taicaile/OpenLabeling
def with_qt_test():
    try:
        import cv2
        WINDOW_NAME = "__WITH_QT_TEST__"
        cv2.namedWindow(WINDOW_NAME)
        cv2.displayOverlay(WINDOW_NAME, 'Test QT', 500)
        cv2.destroyWindow(WINDOW_NAME)
        return True
    except cv2.error:
        return False
コード例 #18
0
ファイル: convolution1.py プロジェクト: rasql/opencv-tutorial
def trackbar(x):
    """Trackbar callback function."""
    d = 2 * x + 1
    kernel = np.ones((d, d), 'float32') / (d**2)

    img1 = cv.filter2D(img, -1, kernel)
    cv.imshow('window', np.hstack([img, img1]))

    text = f'kernel=({d}x{d})'
    cv.displayOverlay('window', text)
コード例 #19
0
ファイル: run.py プロジェクト: blackbird71SR/OpenLabeling
def change_class_index(x):
    global class_index
    class_index = x
    if WITH_QT:
        cv2.displayOverlay(WINDOW_NAME, "Selected class "
                                "" + str(class_index) + "/"
                                "" + str(last_class_index) + ""
                                "\n " + class_list[class_index],3000)
    else:
        print("Selected class :" + class_list[class_index])
コード例 #20
0
def trackbar(x):
    """Trackbar callback function."""
    text = f'threshold={x}'
    cv.displayOverlay('window', text, 1000)

    ret, img1 = cv.threshold(img, x, 255, cv.THRESH_BINARY)
    img2 = cv.adaptiveThreshold(img, 255, cv.ADAPTIVE_THRESH_MEAN_C,
                                cv.THRESH_BINARY, 11, 2)
    # img2 = cv.adaptiveThreshold(img, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 11, 2)
    cv.imshow('window', np.hstack([img, img1, img2]))
コード例 #21
0
ファイル: cvlib_old.py プロジェクト: rasql/opencv-tutorial
 def show_object(self):
     """Cycle through the objects and display in Overlay."""
     n = len(self.objects)
     self.obj_id %= n
     self.objects[self.obj_id].selected = False
     self.obj_id += 1
     self.obj_id %= n
     self.objects[self.obj_id].selected = True
     text = str(self.objects[self.obj_id])
     cv.displayOverlay(self.win, text, 1000)
     cv.imshow(self.win, self.img)
コード例 #22
0
def draw(x):
    global p0, p1
    d = cv.getTrackbarPos('thickness', 'window')
    d = -1 if d==0 else d
    i = cv.getTrackbarPos('color', 'window')
    color = colors[i]
    img[:] = img0
    cv.rectangle(img, p0, p1, color, d)
    cv.imshow('window', img)
    text = f'color={color}, thickness={d}'
    cv.displayOverlay('window', text)
コード例 #23
0
def trackbar2(x):
    confidence = x / 100
    r = r0.copy()
    for output in np.vstack(outputs):
        if output[4] > confidence:
            x, y, w, h = output[:4]
            p0 = int((x - w / 2) * 416), int((y - h / 2) * 416)
            p1 = int((x + w / 2) * 416), int((y + h / 2) * 416)
            cv.rectangle(r, p0, p1, 1, 1)
    cv.imshow('blob', r)
    text = f'Bbox confidence={confidence}'
    cv.displayOverlay('blob', text)
コード例 #24
0
ファイル: threshold0.py プロジェクト: rasql/opencv-tutorial
def trackbar(x):
    """Trackbar callback function."""
    text = f'threshold={x}'
    cv.displayOverlay('window', text, 1000)

    ret, img1 = cv.threshold(img, x, 255, cv.THRESH_BINARY)
    ret, img2 = cv.threshold(img, x, 255, cv.THRESH_BINARY_INV)
    ret, img3 = cv.threshold(img, x, 255, cv.THRESH_TRUNC)
    ret, img4 = cv.threshold(img, x, 255, cv.THRESH_TOZERO)
    ret, img5 = cv.threshold(img, x, 255, cv.THRESH_TOZERO_INV)

    cv.imshow('window', np.vstack([img, img1, img2, img3, img4, img5]))
コード例 #25
0
ファイル: run.py プロジェクト: wangqiqi/OpenLabeling
def change_img_index(x):
    global img_index, img
    img_index = x
    img_path = image_list[img_index]
    img = cv2.imread(img_path)
    if WITH_QT:
        cv2.displayOverlay(WINDOW_NAME, "Showing image "
                                    "" + str(img_index) + "/"
                                    "" + str(last_img_index), 1000)
    else:
        print("Showing image "
                "" + str(img_index) + "/"
                "" + str(last_img_index) + " path:" + img_path)
コード例 #26
0
ファイル: run.py プロジェクト: blackbird71SR/OpenLabeling
def change_img_index(x):
    global img_index, img
    img_index = x
    img_path = image_list[img_index]
    img = cv2.imread(img_path)
    if WITH_QT:
        cv2.displayOverlay(WINDOW_NAME, "Showing image "
                                    "" + str(img_index) + "/"
                                    "" + str(last_img_index), 1000)
    else:
        print("Showing image "
                "" + str(img_index) + "/"
                "" + str(last_img_index) + " path:" + img_path)
コード例 #27
0
def trackbar(x):
    global img
    conf = x / 100
    img = img0.copy()
    post_process(img, outputs, conf)
    cv.displayOverlay('window', f'confidence level={conf}')
    cv.imshow('window', img)


#cv.namedWindow('window')
#cv.createTrackbar('confidence', 'window', 50, 100, trackbar)

#cv.destroyAllWindows()
コード例 #28
0
def trackbar(x):
    global minSize, minNeighbors, scaleFactor
    i = cv.getTrackbarPos('size', 'window')
    d = (24, 30, 60, 120)[i]
    minSize = (d, d)

    n = cv.getTrackbarPos('neighbors', 'window') + 1
    minNeighbors = n

    i = cv.getTrackbarPos('scale', 'window')
    s = (1.05, 1.1, 1.4, 2)[i]
    scaleFactor

    text = f'minNeighbors={n}, minSize={d}, scaleFactor={s}'
    cv.displayOverlay('window', text)
    detect()
コード例 #29
0
ファイル: line5.py プロジェクト: rasql/opencv-tutorial
def mouse(event, x, y, flags, param):
    global p0, p1

    if event == cv.EVENT_LBUTTONDOWN:
        p0 = x, y
        p1 = x, y

    elif event == cv.EVENT_MOUSEMOVE and flags == 1:
        p1 = x, y

    elif event == cv.EVENT_LBUTTONUP:
        p1 = x, y

    img[:] = 0
    cv.line(img, p0, p1, RED, 10)
    cv.imshow('window', img)
    cv.displayOverlay('window', f'p0={p0}, p1={p1}')
コード例 #30
0
ファイル: scope.py プロジェクト: wmarchewka/Scope_New
    def contours_calibration_check(self):
        # we now want to find the distance between the center of the first and next contour
        # make sure the contours are sorted from least to most across the X axis
        # keep a running total of found spaces and a running average
        cal_lines = 0
        self.cal_line_distance_total = 0
        self.cal_line_distance_avg = 0
        # TODO: make this without loop
        last_center_horz = 0
        if len(self.contours_found) == 11:
            for c in self.contours_found:
                ext_left = tuple(c[c[:, :, 0].argmin()][0])
                ext_right = tuple(c[c[:, :, 0].argmax()][0])
                contour_width = tuple(np.subtract(ext_right, ext_left))
                center_horz = int((ext_left[0] + (contour_width[0] / 2)))
                diff = abs(last_center_horz - center_horz)
                cal_lines += 1
                if 2 < diff < 200:
                    if cal_lines > 1:
                        self.cal_line_distance_total = self.cal_line_distance_total + diff
                last_center_horz = center_horz
            if self.cal_line_distance_total is not 0:
                self.cal_line_distance_total = self.cal_line_distance_total / (
                    cal_lines - 1)
                self.calibration_ok = True
                self.px_mm_conversion = 1 / self.cal_line_distance_total
        else:
            self.calibration_ok = False
            cv2.displayOverlay(self.mainwindow_name,
                               "Please complete calibration", 1000)
        if self.cal_mode:
            if self.grabbed_cal_value is False:
                self.hold_px_mm_conversion = self.px_mm_conversion
                self.hold_cal_line_distance_total = self.cal_line_distance_total
                self.grabbed_cal_value = True
            else:
                self.px_mm_conversion = self.hold_px_mm_conversion
                self.cal_line_distance_total = self.hold_cal_line_distance_total
        else:
            pass

        self.lines_draw()
コード例 #31
0
ファイル: cvlib_old.py プロジェクト: rasql/opencv-tutorial
    def mouse(self, event, x, y, flags, param):
        """Mouse callback."""
        text = '{} in ({}, {}) flags={}, param={}'.format(
            event, x, y, flags, param)
        cv.displayStatusBar(self.win, text, 1000)

        if event == cv.EVENT_LBUTTONDOWN:
            # draw_selection objects under mouse click
            self.select_obj_at(event, x, y, flags, param)

            App.win = self
            self.p0 = x, y
            self.p1 = x, y
            self.text = 'p0 = ({}, {})'.format(x, y)
            cv.displayStatusBar(self.win, self.text, 2000)
            cv.displayOverlay(self.win, self.text, 1000)

            # draw rectangle if ALT key is pressed
            if flags == cv.EVENT_FLAG_ALTKEY:
                rect = Rectangle(self.img, (x, y), (x, y), RED, 3)
                self.objects.append(Rectangle(rect))

            if flags == cv.EVENT_FLAG_SHIFTKEY:
                self.pos0 = x, y

        elif event == cv.EVENT_MOUSEMOVE:
            if flags == cv.EVENT_FLAG_ALTKEY:
                print('ALT')
                self.objects[-1].set_p1(x, y)

            elif flags == cv.EVENT_FLAG_CTRLKEY:
                print('CTRL KEY')

            elif flags == cv.EVENT_FLAG_SHIFTKEY:
                self.obj.pos = x, y

        elif event == cv.EVENT_LBUTTONUP:
            cv.displayOverlay(self.win, 'Mouse released', 1000)

        self.draw()
コード例 #32
0
def next_img_index():
    global img_index, sample, img_objects
    try:
        img_index = next(img_indexes_iter)
    except StopIteration:
        print('Samples ended')

    sample = folder_manager_load.load_sample_using_absolute_count(
        img_index, use_thread=False)
    sample.set_pretty_semantic_image(sample.get_semantic_image().copy())
    sample.create_pretty_semantic_image(color=Color(red=0, blue=0, green=255))

    img_objects = [
        (0, *box)
        for box in sample.get_bboxes_from_semantic_image(threshold=0.03)
    ]

    # Apply criterion to filder bounding boxes:
    # The bounding boxe indicates a door that is too close it is discarted. The thoresold is 0.5m
    threshold = 0.3
    new_img_object = []
    for label, x1, y1, width, height in img_objects:
        depth_data = sample.get_depth_data()
        mask = np.zeros(list(depth_data.shape), dtype=np.uint8)
        points = np.array([[[x1, y1], [x1 + width, y1],
                            [x1 + width, y1 + height], [x1, y1 + height]]],
                          dtype=np.int32)
        cv2.fillPoly(mask, points, 255)
        pixels = depth_data[mask == 255]
        mean = np.mean(pixels)

        if mean >= threshold:
            new_img_object.append((label, x1, y1, width, height))
        else:
            print('DISCARTED')

    img_objects = new_img_object

    cv2.displayOverlay(WINDOW_NAME, "Showing image " + str(img_index), 1000)
コード例 #33
0
ファイル: run.py プロジェクト: wangqiqi/OpenLabeling
import argparse
import textwrap
import glob
import os

import numpy as np
import cv2


WITH_QT = True
try:
    cv2.namedWindow("Test")
    cv2.displayOverlay("Test", "Test QT", 1000)
except:
    WITH_QT = False
cv2.destroyAllWindows()

parser = argparse.ArgumentParser(description='YOLO v2 Bounding Box Tool')
parser.add_argument('--format', default='yolo', type=str, choices=['yolo', 'voc'], help="Bounding box format")
parser.add_argument('--sort', action='store_true', help="If true, shows images in order.")
parser.add_argument('--cross-thickness', default='1', type=int, help="Cross thickness")
parser.add_argument('--bbox-thickness', default='1', type=int, help="Bounding box thickness")
args = parser.parse_args()

class_index = 0
img_index = 0
img = None
img_objects = []
bb_dir = "bbox_txt/"

# selected bounding box