示例#1
0
 def __init__(self, d):
     self.d = d
     self.camera = Camera(self.d)
     self.switchcamera = []
     self.camera_report_details = []
     self.path = image_comparison.get_path()
     self.facility = adb.read_adb("adb shell getprop ro.product.model")
示例#2
0
 def __init__(self):
     self.gui = GUI()
     self.camera = Camera()
     self.camera.startVideo()
     self.detectCanny = False
     self.detectFace = False
     self.detectEye = False
示例#3
0
def main():

    face_net_arch = config["NNParameters"]["face"]["architecture"]
    face_net_model = config["NNParameters"]["face"]["model"]
    face_net_confidence = config["NNParameters"]["face"]["confidence"]

    emotion_net_arch = config["NNParameters"]["emotion"]["architecture"]
    emotion_net_model = config["NNParameters"]["emotion"]["model"]
    emotion_net_confidence = config["NNParameters"]["emotion"]["confidence"]
    emotion_classes = config["NNParameters"]["emotion"]["classes"]

    face_net = cv2.dnn.readNetFromCaffe(face_net_arch, face_net_model)
    emotion_net = cv2.dnn.readNetFromCaffe(emotion_net_arch, emotion_net_model)

    camera = Camera(camera_idx=CAM,
                    fps=FPS,
                    net=face_net,
                    detection_classes=emotion_classes,
                    confidence=float(face_net_confidence),
                    detection_period_in_seconds=0.3)

    camera.start()

    track_info = ''

    while True:
        rg_frame = camera.emotion_detection(emotion_net=emotion_net)

        if len(camera.detected_objects) > 0:
            emotion = camera.detected_objects[0].label

        # if len(emotion_detection) > 0:
        #     emotion_confidence = max(emotion_detection[0])
        #     idx = emotion_detection[0].tolist().index(emotion_confidence)
        #     emotion = camera.emotion_classes[idx]

            if cv2.waitKey(1) & 0xFF == ord('p'):
                track_author, track_name = play_music(emotion)
                track_info = 'Track: {} - {}'.format(track_author, track_name)

        # img_pil = Image.fromarray(rg_frame)
        # font = ImageFont.truetype("Arial Unicode.ttf", size=20)
        # draw = ImageDraw.Draw(img_pil)
        # draw.text((5, 10), track_info, font=font, fill=(0, 0, 255, 0))
        # rg_frame = np.array(img_pil)

        cv2.putText(img=rg_frame,
                    text=track_info,
                    org=(5, 20),
                    fontFace=cv2.FONT_HERSHEY_DUPLEX,
                    fontScale=0.6,
                    color=[0, 0, 255],
                    thickness=1)

        cv2.imshow('frame', rg_frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            camera.stop()
            break
    cv2.destroyAllWindows()
示例#4
0
def run_inference(opt, settings):
    camera = Camera(opt.camid)
    print('Loading pre-trained network...')
    net = InferenceWrapper(weights_path=opt.weights_path, settings=settings)
    print('Successfully loaded pre-trained network.')
    win_name = 'SuperPoint features'
    cv2.namedWindow(win_name)
    prev_frame_time = 0

    stop_img = None
    stop_features = None
    do_blur = False

    while True:
        frame, ret = camera.get_frame()
        if do_blur:
            frame = cv2.blur(frame, (3, 3))
        if ret:
            img_size = (opt.W, opt.H)
            new_img = frame * 255
            new_img = np.ascontiguousarray(new_img, dtype=np.uint8)

            query_img = make_query_image(frame, img_size)

            features = get_features(query_img, net)
            if stop_features is None:
                draw_features(features, new_img, img_size)
                stop_img = new_img
            else:
                correspondences, indices, = get_best_correspondences(stop_features, features)
                draw_features(correspondences, new_img, img_size, indices)

            # combine images
            res_img = np.hstack((stop_img, new_img))

            # draw FPS
            new_frame_time = time.perf_counter()
            time_diff = new_frame_time - prev_frame_time
            prev_frame_time = new_frame_time

            draw_fps(time_diff, res_img)

            cv2.imshow(win_name, res_img)
            key = cv2.waitKey(delay=1)
            if key == ord('q'):
                print('Quitting, \'q\' pressed.')
                break
            if key == ord('s'):
                stop_features = features
                stop_img = (frame * 255.).astype('uint8')
                draw_features(stop_features, stop_img, img_size)
            if key == ord('b'):
                do_blur = not do_blur
            if key == ord('t'):
                # net.trace(frame, opt.out_file_name)
                print('Model saved, \'t\' pressed.')
        else:
            break
    camera.close()
    cv2.destroyAllWindows()
示例#5
0
def main():

    # Calibration parameters
    targetDimensions = (6, 9)  # Calibration target dimensions in squares
    exposure = 70000  # Exposure (gain). Should be high to increase depth of field
    numTargets = 8  # Number of calibration targets to collect

    worldCorners = [
        (38, 210),  # Top Left
        (1140, 25),  # Top Right
        (1140, 997),  # Bottom Right
        (38, 809)
    ]  # Bottom Left

    # Used in the mouse-click callback function
    global hueLocs, calibrating

    # Instantiate camera
    cam = Camera(targetDimensions, exposure)

    # Store white balance coefficients
    cam.white_balance()

    # Set focus and exposure
    cam.calibrate_lens()

    # Collect points for calibration target
    cam.capture_calibration_targets(numTargets)

    # Generate camera model
    cal.calibrate_camera(cam, targetDimensions)

    # Show blockout regions for aligning workspace
    cal.align_tilt(cam, worldCorners)

    # Generate values for the tops of all objects to be incorporated into work space
    cam.calibrationParams['red'], hueLocs = generate_baselines(
        cam, hueLocs, "SELECT RED TOPS")
    calibrating = True
    cam.calibrationParams['green'], hueLocs = generate_baselines(
        cam, hueLocs, "SELECT GREEN TOPS")
    calibrating = True
    cam.calibrationParams['blue'], hueLocs = generate_baselines(
        cam, hueLocs, "SELECT BLUE TOPS")
    calibrating = True
    cam.calibrationParams['yellow'], hueLocs = generate_baselines(
        cam, hueLocs, "SELECT YELLOW TOPS")
    calibrating = True
    cam.calibrationParams['purple'], hueLocs = generate_baselines(
        cam, hueLocs, "SELECT CARD BACKS")

    #cam.stream(rectify=True)

    # Save camera parameters
    jsonFile = os.path.join(os.path.dirname(__file__), 'cameraData.json')
    print "CALIBRATION COMPLETE, SAVING CAMERA PARAMETERS to : ", jsonFile
    with open(jsonFile, 'w') as fp:
        json.dump(cam.calibrationParams, fp, cls=NumpyEncoder)

    exit(0)
示例#6
0
def run(window_size):
    pygame.init()
    cells = {}
    camera = Camera(window_size, cells)
    Cell.static_init(camera, cells)
    simulation = Simulation(camera, cells)
    clock = pygame.time.Clock()
    debug_mode = False

    while True:
        dt = clock.tick(FPS) / 1000
        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                return
            elif event.type == pygame.KEYDOWN:
                if event.key == pygame.K_ESCAPE:
                    return
                elif event.key == pygame.K_F1:
                    debug_mode = not debug_mode
            camera.process_event(event)
            simulation.process_event(event)
        camera.update(dt)
        simulation.update(dt)
        camera.draw()
        if debug_mode:
            show_debug_info(camera, simulation, clock.get_fps())
        pygame.display.flip()
示例#7
0
 def level(self, target_lvl, biome: str):
     """
     Utilizes the Forge class to build and returns a level with the desired specifications.
     If the level is "gen", a new level will be generated. Otherwise, Forge will attempt to
     load a map from the specified file.
     """
     for sprite in self.sprite_grouping.all_sprites:
         if sprite != self.player and sprite != self.player.legs:
             sprite.kill()
     for wall in self.sprite_grouping.walls:
         wall.kill()
     for spawner in self.sprite_grouping.spawners:
         spawner.kill()
     if target_lvl == "gen":
         lvl_pieces, surf_w, surf_h = (
             self.settings["lvl"]["pieces"],
             self.settings["lvl"]["tiles_wide"],
             self.settings["lvl"]["tiles_high"],
         )
     else:
         lvl_pieces, surf_w, surf_h = 1, 128, 128
     if self.init_player:
         self.init_player = False
         self.player = Player(
             self.settings,
             self.data.player_img[self.character]["magic"],
             self.data.player_img[self.character]["move"],
         )
         self.sprite_grouping.all_sprites.add(self.player, self.player.legs)
         self.sprite_grouping.player_sprite.add(self.player)
         self.sprite_grouping.legs_sprite.add(self.player.legs)
     self.map = Forge(
         self.settings,
         self.sprite_grouping,
         self.data,
         self.character,
         self.player,
         lvl_pieces,
     )
     if target_lvl == "gen":
         self.map.load_all()
     else:
         self.map.load(target_lvl)
         if target_lvl == "temple.txt" and not self.init_player:
             self.player.hp = self.player.max_hp
     self.map.new_surface(surf_w, surf_h)
     self.map.build_lvl(biome)
     self.map_img = self.map.make_map()
     self.map_rect = self.map_img.get_rect()
     self.cursor = Cursor(
         self.settings,
         self.data.cursor_img,
     )
     self.sprite_grouping.all_sprites.add(self.cursor)
     self.sprite_grouping.cursor_sprite.add(self.cursor)
     self.camera = Camera(self.settings, self.map.width, self.map.height,
                          self.cursor)
     self.player = self.map.player
     self.mob_count, self.mob_max = 0, self.settings["gen"]["mob_max"]
示例#8
0
    def __init__(self, game):
        super().__init__(game)

        self.camera = Camera(100, 100)
        self.star_map = StarMap(self.camera)

        self.last_map_pos = tuple(self.camera.d_pos)
        self.mouse_pos_when_press = None
示例#9
0
 def __init__(self, levelname: str, world: esper.World):
     super().__init__()
     self.scene = self
     self.map: Map = Map(settings.BASE_DIR + "\\" + levelname)
     self.camera: Camera = Camera()
     self.entities = []
     self.player = world.create_entity()
     self.world = world
     self.player: Player = Player(world, player_name="ShuzZzle")
     self.player.create((Velocity(velx=10, vely=10), Position(x=0, y=0)))
示例#10
0
    def reset(self):
        # state
        self.running = True
        self.playing = False
        self.complete = False
        self.pause = False

        # keys
        self.up_key = False
        self.down_key = False
        self.enter_key = False
        self.back_key = False
        self.esc_key = False
        self.p_key = False

        # hud
        self.lives = 4
        self.bonus_life = 0
        self.gold = 0
        self.kills = 0
        self.distance = 0

        # player
        self.player = Player(self)
        self.alive = True

        # player spawn location
        self.spawn_x = 16
        self.spawn_y = 100
        self.player.position.x = self.spawn_x
        self.player.position.y = self.spawn_y

        # save location
        self.save_x = 16
        self.save_y = 100
        self.camera_x = 0
        self.camera_y = 0
        self.camera_speed = 0

        # camera
        self.camera = Camera(self.player)
        self.auto_scroll = Auto(self.camera, self.player)
        self.camera.set_method(self.auto_scroll)
        self.start_scrolling = False

        # map
        self.map = TileMap(self, 'assets/maps/level 1.csv')

        # menu
        self.main_menu = MainMenu(self)
        self.pause_screen = PauseScreen(self)
        self.gameover_screen = GameOver(self)
        self.gamecomplete_screen = GameComplete(self)
        self.current_menu = self.main_menu
示例#11
0
    def __init__(self, d, phong_number):
        self.d = d
        self.chrome = Chrome(d)
        self.camera = Camera(self.d)
        self.settings = Settings(self.d)
        self.message = Message1(self.d)
        self.path = os.path.abspath(os.path.join(
            os.getcwd(), "../..")) + "\\enter_resource\\"
        self.init_operation = Init_Operation()
        self.language = self.settings.change_language()

        self.phone_numebr = phong_number
示例#12
0
    def __init__(self, path=""):
        self.configpath = path
        self.root = Tk()
        self.cameras = Camera()
        self.parser = Parser()
        self.gui = GUI(root=self.root)
        self.callback = Callback(self.root)

        self.isImage_active = False
        self.isVideo_active = False
        self.isCamera_active = False
        self.isObject_active = False
示例#13
0
def run() -> None:
    room = create_room()
    objects = create_objects()
    world = World()
    world.light = PointLight(point(-10, 10, -10), Color(1, 1, 1))
    world.objects.extend(room)
    world.objects.extend(objects)
    camera = Camera(400, 200, math.pi / 3)
    camera.transform = view_transform(point(0, 1.5, -5), point(0, 1, 0),
                                      vector(0, 1, 0))
    canvas = camera.render(world)
    PPM(canvas).save_to_file("scene.ppm")
示例#14
0
 def __init__(self, simulation_manager, client_id):
     threading.Thread.__init__(self)
     self.pepper = simulation_manager.spawnPepper(client_id,
                                                  spawn_ground_plane=True)
     self.duck_finder = keras.models.load_model("./model/classifier_V2.h5")
     self.threads = [
         Camera(self, self.pepper, "top", self.duck_finder),
     ]
     self.initialPosture()
     for thread in self.threads:
         thread.start()
     self.killed = False
     self.stop = False
示例#15
0
 def __init__(self, d):
     self.d = d
     self.nictalk = NicTalk(self.d)
     self.camtalk = CamTalk(self.d)
     self.qq = Tx_QQ(self.d)
     self.clock = Clock(self.d)
     self.camera = Camera(self.d)
     self.settings = Settings(self.d)
     self.message = Message1(self.d)
     self.path = os.path.abspath(os.path.join(
         os.getcwd(), "../..")) + "\\enter_resource\\"
     self.language = self.settings.change_language()
     self.settings.display_setting()
示例#16
0
def reprojection_distance(rt, points_3d, points_2d):
    Rt = rt.reshape((3, 4))
    R, t, _ = np.hsplit(Rt, np.array((3, 4)))
    t = t.reshape((1, 3))

    camera = Camera()

    projected = cv2.projectPoints(points_3d, R, t, camera.K,
                                  camera.distortion)[0]

    d = euclidean(projected.reshape(projected.shape[0] * 2),
                  points_2d.reshape(points_2d.shape[0] * 2))

    return d
示例#17
0
    def __init__(self, cam=None):
        self.mainCamera = None

        if (cam is None):
            self.mainCamera = Camera()
        else:
            self.mainCamera = cam

        Render.camera = self.mainCamera

        self.objects = {}
        self.active = False

        pass
示例#18
0
 def __init__(self,d):
     self.d=d
     self.chrome=Chrome(d)
     self.music=Music(d)
     self.init_operation=Init_Operation()
     self.nictalk = NicTalk(self.d)
     self.camtalk = CamTalk(self.d)
     self.qq=Tx_QQ(self.d)
     self.clock=Clock(self.d)
     self.camera=Camera(self.d)
     self.settings = Settings(self.d)
     self.message=Message1(self.d)
     self.path = os.path.abspath(os.path.join(os.getcwd(), "../..")) + "\\enter_resource\\"
     self.init_operation=Init_Operation()
示例#19
0
def create_video():
    ir = ImageReader(read_mode='RGB')
    cc = Camera(ir, None)
    cc.calibrate()
    ta = ThresholdApplier()

    output_video_name = '../output_videos/project_video_result.mp4'
    input_video = VideoFileClip("../project_video.mp4")

    image = input_video.get_frame(0)
    undistorted = cc.undistort(image)
    llf = LaneLineFinder(ta, cc, (cc.get_region_of_interest(image)),
                         undistorted)
    output_video = input_video.fl_image(llf.run)
    output_video.write_videofile(output_video_name, audio=False)
示例#20
0
    def __init__(self):
        super().__init__()

        # get customised PiCamera instance
        self.camera = Camera()

        # get daisy driver object, disable manual movement section if not available
        try:
            self.DD = DaisyDriver()
            self.DDconnected = True
        except SerialException:
            self.DDconnected = False
            self.DD = DaisyDriver(connected=False)

        # initialise user interface
        self.initUI()
示例#21
0
 def __init__(self):
     self.args = self.create_parser().parse_args()
     self.db = DataBase()
     self.utils = Utils()
     self.face_recognition = FaceRecognition()
     self.camera = Camera(
         int(self.args.camera) if str(self.args.camera).isdigit() else self.
         args.camera)
     self.lock = mraa.Gpio(20)
     self.green_light = mraa.Gpio(32)
     self.is_door_opened = mraa.Gpio(26)
     self.quit = mraa.Gpio(18)
     self.remember_new_face = mraa.Gpio(16)
     self.pin = mraa.Gpio(12)
     self.exit_code = 0
     self.log_folder = None
     self.create_logger()
示例#22
0
    def __init__(self):
        # Initializing Pygame window
        os.environ['SDL_VIDEO_CENTERED'] = '1'

        pygame.mixer.pre_init(44100, 16, 2, 4096)
        pygame.mixer.init()
        pygame.init()

        pygame.display.set_caption(CAPTION)
        self.surface = pygame.display.set_mode((DISPLAY_WIDTH, DISPLAY_HEIGHT),
                                               0, 32)

        # Import here to avoid
        # pygame.error: cannot convert without pygame.display initialized
        from src.camera import Camera
        from src.level.tutorial_one import TutorialOne
        from src.level.main_menu import MainMenu
        from src.level.tutorial_two import TutorialTwo
        from src.level.ledge import Ledge
        from src.level.deathrun import DeathRun

        levels = [
            MainMenu,
            TutorialOne,
            TutorialTwo,
            Ledge,
            DeathRun,
        ]

        self.entities = {ALL_SPRITES: pygame.sprite.Group()}
        self.fps_clock = pygame.time.Clock()
        self.events = pygame.event.get()

        screen = pygame.Rect(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT)
        self.camera = Camera(self, screen)
        self.font = pygame.font.Font('src//font//font.otf', 30)

        self.sfxs = SoundManager()
        self.background_color = None

        self.paused = False
        self.levels = itertools.cycle(levels)
        self.build_next_level()

        self.run()
示例#23
0
def main():
    print("[INFO] loading model...")
    net = cv2.dnn.readNetFromCaffe(net_arch, net_model)

    camera = Camera(camera_idx=CAM,
                    fps=FPS,
                    net=net,
                    detection_classes=classes,
                    confidence=float(net_confidence),
                    detection_period_in_seconds=1)
    camera.start()

    video_out, v_filename = create_video()
    colors = np.random.uniform(0, 255, size=(len(classes), 3))

    while True:

        rg_frame = camera.person_detection_on_video()

        # rg_frame, jpeg, detection_status, person_in_image = camera.motion_detect(running=True,
        #                                                                          video_file=video_out,
        #                                                                          show_edges=False,
        #                                                                          dnn_detection_status=True,
        #                                                                          net=net,
        #                                                                          classes=classes,
        #                                                                          colors=colors,
        #                                                                          given_confidence=float(confidence),
        #                                                                          min_area=int(min_area),
        #                                                                          blur_size=int(blur_size),
        #                                                                          blur_power=int(blur_power),
        #                                                                          threshold_low=int(threshold_low))

        cv2.imshow('frame', rg_frame)

        # if len(detected_objects) > 0:
        #     for obj in detected_objects:
        #         image = detected_objects[obj]
        #         # TODO: Save crop images just with persons

        if cv2.waitKey(1) & 0xFF == ord('q'):
            video_out.release()
            camera.stop()
            break
    cv2.destroyAllWindows()
示例#24
0
def test_with_images():
    ir = ImageReader(read_mode='RGB')
    ip = ImagePlotter(images_to_show_count=10)
    cc = Camera(ir, ip)
    cc.calibrate()
    ir.regex = '../test_images/test*.jpg'
    ir.set_image_names()
    ta = ThresholdApplier()

    for image_file in ir.images():
        image = image_file.image
        ip.add_to_plot(image, image_file.name, 'out', False)
        undistorted = cc.undistort(image)
        llf = LaneLineFinder(ta, cc, (cc.get_region_of_interest(image)), image)
        result = llf.run(undistorted)
        # ip.add_to_plot(llf._sliding_window_frame, image_file.name + ' sliding windows', 'out', False)
        # ip.add_to_plot(result, image_file.name + 'result', 'out', False)
        ip.add_to_plot(llf._overlay, image_file.name + ' overlay', 'out',
                       False)
    ip.plot('out', 2)
示例#25
0
    def __init__(self):
        super().__init__()

        # get customised PiCamera instance
        self.camera = Camera()

        # get daisy driver object, disable manual movement section if not available
        try:
            self.DD = DaisyDriver()
            self.DDconnected = True
        except SerialException:
            self.DDconnected = False
            self.DD = DaisyDriver(connected=False)

        # initialise user interface
        self.initUI()

        # connect camera finishing timer signal to UI to re-enable start button
        self.camera.callbackemitter.timer_finished_signal.connect(
            self.cameratimer.BB.sreset.onfinish)
示例#26
0
def main():
    images = img_loader.load(const.path4)
    camera = Camera()
    focal_calculated = False
    TEST_HIGHT = 600
    KNOWN_SIZE = 200
    for t, i in images:

        i = resize_img(i, 1000)

        binary_img = led_thresh(i, adapt_mask=False)
        key_points = get_points_by_moment(binary_img)

        # draw circles
        for p in key_points:
            cv2.circle(i, (p[0], p[1]), 30, (0, 0, 255))
        # Вписываем центры в квадрат
        x, y, w, h = cv2.boundingRect(np.array(key_points))
        cv2.rectangle(i, (x, y), (x + w, y + h), (0, 255, 0), 2)

        # find line by manual
        x = key_points[0][0] - key_points[1][0]
        y = key_points[0][1] - key_points[1][1]
        length = math.sqrt(x**2 + y**2)
        if not focal_calculated:
            camera.calculate_f_length(length, KNOWN_SIZE, TEST_HIGHT)
            focal_calculated = True

        distance = camera.calculate_distance_from_camera(200, length)

        font = cv2.FONT_HERSHEY_SIMPLEX
        cv2.putText(i, t, (30, 30), font, 1, (255, 255, 255), 1, cv2.LINE_AA)
        cv2.putText(i, "%.0f" % distance + "mm", (30, 80), font, 1, (0, 0, 255), 1, cv2.LINE_AA)
        cv2.putText(i, "rectangle %dx%d" % (h, w) + "mm", (30, 120), font, 0.7, (0, 0, 255), 1, cv2.LINE_AA)

        cv2.imshow("image", i)
        k = cv2.waitKey(0) & 0xFF
        if k == ord("q"):
            cv2.destroyAllWindows()
            break
示例#27
0
    def __init__(self, screen_w = 800, screen_h = 600):
        pygame.init()

        self.screen_w = screen_w
        self.screen_h = screen_h

        # Cursor handling
        self.visible = False
        self.esc = False
        pygame.mouse.set_visible(self.visible)
        pygame.event.set_grab(not self.visible)
        pygame.mouse.set_pos((screen_w / 2, screen_h / 2))

        # Initialize camera
        self.camera = Camera()
        self.camera.set_pos(Vec3(0, 0, -32)) # Set far away to help performance

        self.renderer = Renderer(screen_w, screen_h)

        # Only generate cube mesh once
        self.cube = Cube()

        # Load images and define grass block
        self.media = {
            "grass_block_side": pygame.image.load("media/grass_block_side.png"),
            "grass": pygame.image.load("media/grass.png"),
            "dirt": pygame.image.load("media/dirt.png")
        }

        self.texture = [
            self.media["grass_block_side"],
            self.media["grass_block_side"],
            self.media["grass_block_side"],
            self.media["grass_block_side"],
            self.media["grass"],
            self.media["dirt"]
        ]

        self.map = Map(8, False) # Change False to True for testing mode
def reprojection_distance(input, points_3d, points_2d):
    r1, r2, r3, t1, t2, t3 = input[0], input[1], input[2], input[3], input[
        4], input[5]

    R = cv2.Rodrigues(np.array([r1, r2, r3]))[0]

    t = np.array([[t1], [t2], [t3]])

    # Rt = input.reshape((3, 4))
    # R, t, _ = np.hsplit(Rt, np.array((3, 4)))
    # t = t.reshape((1, 3))

    camera = Camera()

    projected = cv2.projectPoints(points_3d, R, t, camera.K,
                                  camera.distortion)[0]

    vec1 = projected.reshape(projected.shape[0] * 2)
    vec2 = points_2d.reshape(points_2d.shape[0] * 2)

    res = 0
    for v1, v2 in zip(vec1, vec2):
        res += pow(v1 - v2, 2)

    # print(list(vec1))
    # print(list(vec2))
    # print(res)

    d = euclidean(projected.reshape(projected.shape[0] * 2),
                  points_2d.reshape(points_2d.shape[0] * 2))

    d2 = cdist(np.array([projected.reshape(projected.shape[0] * 2)]),
               np.array([points_2d.reshape(points_2d.shape[0] * 2)]),
               'sqeuclidean')

    # print(d2)
    # print(d)

    return res
示例#29
0
from src.camera import Camera



img1 = cv2.imread("img10.png")
img2 = cv2.imread("img20.png")

height1, width1, _ = img1.shape 
height2, width2, _ = img2.shape 

#img1 = cv2.resize(img1, (int(width1/2), int(height1/2))) 
#img2 = cv2.resize(img2, (int(width2/2), int(height2/2))) 


initial_camera = Camera()
initial_camera.R = np.eye(3, 3)
initial_camera.t = np.array([0.0, 0.0, 0.0])

camera = Camera()
camera.compute_camera_extrinsic(img1, img2)


def generate_cube(scale=1, shifting=[0, 0, 0]):
    """ Generates cube in homogenious coordinates """

    world_coords = [
        [-1, -1, -1], [1, -1, -1], [1, 1, -1], [-1, 1, -1],
        [-1, -1, 1], [1, -1, 1 ], [1, 1, 1], [-1, 1, 1]
    ]
示例#30
0
    else:
        unit_direction: Vec3 = ray.direction.unit_vector()
        t: float = 0.5 * (unit_direction.y + 1)
        return ((1.0 - t) * UNIT_VEC3) + (t * Vec3(0.5, 0.7, 1.0))


if __name__ == "__main__":
    width = 400
    height = 200
    sampling_size = 200
    ppm: PPM = PPM(width, height)
    lower_left_corner: Vec3 = Vec3(-2, -1, -1)
    h_movement: Vec3 = Vec3(4, 0, 0)
    v_movement: Vec3 = Vec3(0, 2, 0)
    origin: Vec3 = Vec3(0, 0, 0)
    cam = Camera(lower_left_corner, h_movement, v_movement, origin)

    hittables: List[Hittable] = [
        Sphere(Vec3(0, 0, -1), 0.5),
        Sphere(Vec3(0, -100.5, -1), 100)
    ]
    world: HittableList = HittableList(hittables)

    for j in range(height - 1, -1, -1):
        for i in range(width):
            print("Tracing on row %s, col %s" % (j, i))
            print("antialiasing...", end="")
            accumulator: Vec3 = Vec3(0, 0, 0)
            for sample in range(sampling_size):
                # In this instance, instead of u and v being mere ratios to
                # our distance from the edges, they feature a random "jitter"