示例#1
0
class Face:
  def __init__(self,resolution,color=(0, 0, 0)):

    self.screen=pygame.display.set_mode(resolution)

    self.background = pygame.Surface(self.screen.get_size())
    self.background = self.background.convert()
    self.background.fill(color)

    center_x=self.background.get_width()/2

    self.left_eye=Eye(center_x-250,250)
    self.right_eye=Eye(center_x+250,250)
    self.mouth=Mouth(center_x,500)
    self.photo=Photo()

    self.draw()

  def draw(self):
    self.screen.blit(self.background,(0,0))
    self.left_eye.draw(self.screen)
    self.right_eye.draw(self.screen)
    self.mouth.draw(self.screen)
    pygame.display.flip()

  
  def surprise(self):
    self.left_eye.draw(self.screen,"up")
    self.right_eye.draw(self.screen,"up")
  def sad(self):
    self.left_eye.draw(self.screen,"brownleft")
    self.right_eye.draw(self.screen,"brownright")
    self.mouth.draw(self.screen,"sad")
  def angry(self):
    self.left_eye.draw(self.screen,"brownright")
    self.right_eye.draw(self.screen,"brownleft")
  def look_left(self):
    self.left_eye.draw(self.screen,"left")
    self.right_eye.draw(self.screen,"left")
  def look_right(self):
    self.left_eye.draw(self.screen,"right")
    self.right_eye.draw(self.screen,"right")
  def blink_left(self):
    self.left_eye.blink(self.screen)
  def blink_right(self):
    self.right_eye.blink(self.screen)
  def close_both(self):
    self.left_eye.draw(self.screen,"close")
    self.right_eye.draw(self.screen,"close")
  def open_both(self):
    self.left_eye.status="open"
    self.right_eye.status="open"
    self.left_eye.draw(self.screen)
    self.right_eye.draw(self.screen)
  def blink_both(self):
    self.close_both()
    time.sleep(0.10)
    self.open_both()
class Brain(Agent):
    """The driver Agent of the program, manages all other agents"""
    def __init__(self):
        """default constructor"""
        super(Brain, self).__init__("brain")
        self.mouth = Mouth()
        self.message = "Hello World!"

    def say_hello_world(self):
        self.mouth.speak()
示例#3
0
 def __init__(self, screen):
     self.targets = []
     self.eat_food = []
     self.eat_count = 0
     self.screen = screen
     self.shooter = Shooter(self.screen)
     self.mouth = Mouth(screen)
     self.total_score = 0
     self.final_score = 0
     self.eating = False
     self.chew_timer = 0
     self.spawn_count = 0
    def set_objects(self):
        Builder.load_file("main.kv")
        self.size = (Window.width, Window.height)
        self.eyes = Eyes(box=[
            self.game_area.x, self.game_area.y +
            65, self.game_area.width, self.game_area.height - 175
        ])

        self.nose = Nose(box=[
            self.game_area.x, self.game_area.y +
            65, self.game_area.width, self.game_area.height - 175
        ])

        self.mouth = Mouth(box=[
            self.game_area.x, self.game_area.y +
            65, self.game_area.width, self.game_area.height - 175
        ])

        self.game_area.add_widget(self.eyes, index=1)
        self.eyes.active = True

        self.game_area.add_widget(self.mouth, index=1)
        self.mouth.active = True
        #        self.auto_bring_to_front(self.mouth) # nekoja vakva kombinacija da se napravi :)

        self.game_area.add_widget(self.nose, index=1)
        self.nose.active = True
示例#5
0
  def __init__(self,resolution,color=(0, 0, 0)):

    self.screen=pygame.display.set_mode(resolution)

    self.background = pygame.Surface(self.screen.get_size())
    self.background = self.background.convert()
    self.background.fill(color)

    center_x=self.background.get_width()/2

    self.left_eye=Eye(center_x-250,250)
    self.right_eye=Eye(center_x+250,250)
    self.mouth=Mouth(center_x,500)
    self.photo=Photo()

    self.draw()
示例#6
0
    def __init__(self, port=DEFAULT_PORT, logdir='/var/log/xm'):
        """Creates a new istance of the body. By default it's composed by
        a thread-safe version of `Legs`, `Mouth` and `Eyes`.

        Args:
            port (str): serial port `Legs` will connect to.
            logdir (str): path where to store logs.
        """
        self.safe_legs = LockAdapter(Legs(port))

        eye_log = '{}-eyes.log'.format(str(datetime.date.today()))
        self.safe_mouth = Mouth()
        self.safe_eye = Eyes(log=open(os.path.join(logdir, eye_log), 'w'))

        self.circuits = {}

        self.add_circuit('forward',
                         target=self.safe_legs.forward,
                         pre=[move_synapse])
        self.add_circuit('backward',
                         target=self.safe_legs.backward,
                         pre=[move_synapse])
        self.add_circuit('left',
                         target=self.safe_legs.left,
                         pre=[move_synapse])
        self.add_circuit('right',
                         target=self.safe_legs.right,
                         pre=[move_synapse])
        self.add_circuit('stop', target=self.safe_legs.stop)
        self.add_circuit('set_speed',
                         target=self.safe_legs.set_speed,
                         pre=[speedvalue_synapse])
        self.add_circuit('set_movetime',
                         target=self.safe_legs.set_movetime,
                         pre=[movetime_synapse])

        self.add_circuit('say', target=self.safe_mouth.say, pre=[say_synapse])
        self.add_circuit('shutup', target=self.safe_mouth.shutup)

        self.add_circuit('open_eyes', target=self.safe_eye.open)
        self.add_circuit('close_eyes', target=self.safe_eye.close)
示例#7
0
class Head():
    def __init__(self, bot):
        if bot:
            self.mouth = Mouth(bot)

    def run_command(self, command):
        return subprocess.getoutput(command)

    def get_ip_addr(self):
        # addr = subprocess.getoutput('hostname -I')
        addr = self.run_command('hostname -I')
        return addr.split(maxsplit=1)[0]

    def start_vncserver(self):
        return self.run_command('vncserver :1')

    def get_command_and_params(self, msg):
        v = msg.replace('/', '', 1).split(maxsplit=1)
        if len(v) > 1:
            return v[0].lower(), v[1]
        
        return v[0].lower(), None


    def handle_commands(self, msg):
        cmd, params = self.get_command_and_params(msg)
        print(cmd, params)
        if cmd == 'start' or cmd == 'hello' or cmd == 'hi' or cmd == 'what\'s up' or cmd == 'help':
            return self.mouth.help()

        if cmd == 'run':
            return self.mouth.command_output(self.run_command(params))

        if cmd == 'ip':
            return self.mouth.myip(self.get_ip_addr())

        if cmd == 'vnc':
            return self.mouth.command_output(self.start_vncserver())
        
        return self.mouth.sorry()

    def greetings(self):
        return self.mouth.greetings(self.get_ip_addr())
示例#8
0
    def __init__(self, audio, fill_color):

        Mouth.__init__(self, audio, fill_color)
        audio.connect_wave(self.__wave_cb)
        audio.connect_idle(self.__idle_cb)
        self.wave = []
示例#9
0
 def speak(self, session_id=None):
     if not session_id:
         session_id = str(self.default_session.uuid)
     Mouth.process_input()
 def __init__(self):
     """default constructor"""
     super(Brain, self).__init__("brain")
     self.mouth = Mouth()
     self.message = "Hello World!"
示例#11
0
 def __init__(self, bot):
     if bot:
         self.mouth = Mouth(bot)
示例#12
0
    def __init__(self):
        QWidget.__init__(self)

        # loaind ui from xml
        uic.loadUi(os.path.join(DIRPATH, 'app.ui'), self)

        # FIXME - libpng warning: iCCP: known incorrect sRGB profile
        self.setWindowIcon(QIcon("./images/robot_icon.png"))

        # keep the window fixed sized
        self.setFixedSize(self.size())

        # button event handlers
        self.btnStartCaptureForVideoAnalysis.clicked.connect(
            self.start_capture_for_video_analysis)
        self.btnStopCaptureForVideoAnalysis.clicked.connect(
            self.stop_capture_for_video_analysis)

        self.btnChooseClassifierXML.clicked.connect(
            self.choose_classifier_file)

        self.btnChooseImage.clicked.connect(self.choose_image_for_analysis)

        self.setup_tray_menu()

        # add camera ids
        for i in range(0, 11):
            self.cboxCameraIds.addItem(str(i))
            self.cboxCameraIds1.addItem(str(i))

        # setting up handlers for menubar actions
        self.actionAbout.triggered.connect(self.about)
        self.actionExit.triggered.connect(qApp.quit)
        self.actionPreferences.triggered.connect(self.show_preferences)

        # video analysis image widget
        self.img_widget_vid_analysis = ImageWidget()
        self.hlayoutVideoAnalysis.addWidget(self.img_widget_vid_analysis)

        # face training image widget
        self.img_widget_face_training = ImageWidget()
        self.hlayoutFaceTrainingImg.addWidget(self.img_widget_face_training)

        # face identification image widget
        self.img_widget_identify_face = ImageWidget()
        self.hlayoutIdentifyFace.addWidget(self.img_widget_identify_face)

        # image analysis image widget
        self.img_widget_img_analysis = ImageWidget()
        self.hlayoutImageAnalysis.addWidget(self.img_widget_img_analysis)
        img = cv2.imread("images/human.png")
        self.img_widget_img_analysis.handle_image_data(img)

        self.vid_capture = VideoCapture()
        self.vid_capture.got_image_data_from_camera.connect(
            self.process_image_data_from_camera)

        self.highlight_faces = self.chkHighlightFaces.isChecked()
        self.chkHighlightFaces.stateChanged.connect(
            self.highlight_faces_checkbox_changed)
        self.chckGrayscale.stateChanged.connect(
            self.grayscale_checkbox_changed)

        # face trainer dataset browser btn handler
        self.btnBrowseDatasetForFaceTrainer.clicked.connect(
            self.browse_dataset_for_face_trainer)
        self.btnBrowseClassifierForFaceTrainer.clicked.connect(
            self.browse_classifier_file_for_face_trainer)
        self.btnStartFaceTrainer.clicked.connect(self.start_face_trainer)

        self.btnBrowseIdentifyFace.clicked.connect(self.browse_identify_face)

        self.btnTalk.clicked.connect(self.lets_talk)

        # create and start robot
        self.robot = Robot(self.lblRobot)

        self.mouth = Mouth()

        # connect global signals to slots
        g_emitter().feed_mouth.connect(self.mouth.feed_text)
        g_emitter().set_speaking_state.connect(self.robot.set_speaking_state)
        g_emitter().set_idle_state.connect(self.robot.set_idle_state)

        self.robot.start()
        self.mouth.start()
示例#13
0
class AppWindow(QMainWindow):
    """
    Main GUI class for application
    """
    def __init__(self):
        QWidget.__init__(self)

        # loaind ui from xml
        uic.loadUi(os.path.join(DIRPATH, 'app.ui'), self)

        # FIXME - libpng warning: iCCP: known incorrect sRGB profile
        self.setWindowIcon(QIcon("./images/robot_icon.png"))

        # keep the window fixed sized
        self.setFixedSize(self.size())

        # button event handlers
        self.btnStartCaptureForVideoAnalysis.clicked.connect(
            self.start_capture_for_video_analysis)
        self.btnStopCaptureForVideoAnalysis.clicked.connect(
            self.stop_capture_for_video_analysis)

        self.btnChooseClassifierXML.clicked.connect(
            self.choose_classifier_file)

        self.btnChooseImage.clicked.connect(self.choose_image_for_analysis)

        self.setup_tray_menu()

        # add camera ids
        for i in range(0, 11):
            self.cboxCameraIds.addItem(str(i))
            self.cboxCameraIds1.addItem(str(i))

        # setting up handlers for menubar actions
        self.actionAbout.triggered.connect(self.about)
        self.actionExit.triggered.connect(qApp.quit)
        self.actionPreferences.triggered.connect(self.show_preferences)

        # video analysis image widget
        self.img_widget_vid_analysis = ImageWidget()
        self.hlayoutVideoAnalysis.addWidget(self.img_widget_vid_analysis)

        # face training image widget
        self.img_widget_face_training = ImageWidget()
        self.hlayoutFaceTrainingImg.addWidget(self.img_widget_face_training)

        # face identification image widget
        self.img_widget_identify_face = ImageWidget()
        self.hlayoutIdentifyFace.addWidget(self.img_widget_identify_face)

        # image analysis image widget
        self.img_widget_img_analysis = ImageWidget()
        self.hlayoutImageAnalysis.addWidget(self.img_widget_img_analysis)
        img = cv2.imread("images/human.png")
        self.img_widget_img_analysis.handle_image_data(img)

        self.vid_capture = VideoCapture()
        self.vid_capture.got_image_data_from_camera.connect(
            self.process_image_data_from_camera)

        self.highlight_faces = self.chkHighlightFaces.isChecked()
        self.chkHighlightFaces.stateChanged.connect(
            self.highlight_faces_checkbox_changed)
        self.chckGrayscale.stateChanged.connect(
            self.grayscale_checkbox_changed)

        # face trainer dataset browser btn handler
        self.btnBrowseDatasetForFaceTrainer.clicked.connect(
            self.browse_dataset_for_face_trainer)
        self.btnBrowseClassifierForFaceTrainer.clicked.connect(
            self.browse_classifier_file_for_face_trainer)
        self.btnStartFaceTrainer.clicked.connect(self.start_face_trainer)

        self.btnBrowseIdentifyFace.clicked.connect(self.browse_identify_face)

        self.btnTalk.clicked.connect(self.lets_talk)

        # create and start robot
        self.robot = Robot(self.lblRobot)

        self.mouth = Mouth()

        # connect global signals to slots
        g_emitter().feed_mouth.connect(self.mouth.feed_text)
        g_emitter().set_speaking_state.connect(self.robot.set_speaking_state)
        g_emitter().set_idle_state.connect(self.robot.set_idle_state)

        self.robot.start()
        self.mouth.start()

    def lets_talk(self):
        text = self.teTalk.toPlainText()
        self.teTalk.setText("")
        g_emitter().emit_signal_to_feed_mouth(text)

    def browse_identify_face(self):
        fname = QFileDialog.getOpenFileName(self, 'Open file', '/home')
        self.teIdentifyFace.setText(fname[0])

        img = cv2.imread(fname[0])
        self.img_widget_identify_face.handle_image_data(img)

    def start_face_trainer(self):
        dataset_dir = self.teFaceTrainerDataset.toPlainText()
        classifier_xml = self.teFaceTrainerClassifier.toPlainText()
        log.info(
            "starting face trainer with classifier '%s' and dataset '%s'" %
            (classifier_xml, dataset_dir))

        ft = FaceTrainer(classifier_xml, dataset_dir)
        ft.processing_image.connect(self.processing_image_for_training)
        ft.face_training_finished.connect(self.face_training_finished)
        ft.start()
        self.lblFaceTrainingStatus.setText("FACE TRAINING UNDER PROGRESS")

    def face_training_finished(self):
        self.lblFaceTrainingStatus.setText("FACE TRAINING FINISHED")
        g_emitter().emit_signal_to_feed_mouth("face training finished")

    def processing_image_for_training(self, label, fname):
        log.info("processing image for training: '%s'" % label)
        self.lblFaceTrainerCurImg.setText("Learning face of: '%s' " % label)

        try:
            img = cv2.imread(fname)
            self.img_widget_face_training.handle_image_data(img)
        except Exception as exp:
            log.warning("failed while processing image '%s' while training" %
                        fname)
            log.warning("Exception: %s" % str(exp))

    def browse_dataset_for_face_trainer(self):
        dataset_dir = str(
            QFileDialog.getExistingDirectory(self,
                                             'Select directory for dataset',
                                             '/home'))
        log.info("dataset dir file: %s" % dataset_dir)
        self.teFaceTrainerDataset.setText(dataset_dir)

    def browse_classifier_file_for_face_trainer(self):
        classifier_xml = QFileDialog.getOpenFileName(self, 'Open file',
                                                     '/home')
        log.info("classifier xml file: %s" % classifier_xml[0])
        self.teFaceTrainerClassifier.setText(classifier_xml[0])

    def grayscale_checkbox_changed(self):
        fname = self.teImage.toPlainText()
        print(fname)
        img = cv2.imread(fname)
        if self.chckGrayscale.isChecked():
            # convert image to grayscale
            pil_image = Image.open(fname).convert("L")

            # convery grayscale image to numpy array
            image_array = np.array(pil_image, "uint8")

            # FIXME - code crashes here !!!
            self.img_widget_img_analysis.handle_image_data(image_array)
        else:
            self.img_widget_img_analysis.handle_image_data(img)

    def highlight_faces_checkbox_changed(self):
        if self.chkHighlightFaces.isChecked():
            print("yes")
        else:
            print("no")

    def choose_classifier_file(self):
        fname = QFileDialog.getOpenFileName(self, 'Open file', '/home')
        log.info("chose classfier xml file: %s" % fname[0])
        self.teClassifierXML.setText(fname[0])

    def choose_image_for_analysis(self):
        fname = QFileDialog.getOpenFileName(self, 'Open file', '/home')
        log.info("chose imagefile: %s, for analysis" % fname[0])
        self.teImage.setText(fname[0])

        img = cv2.imread(fname[0])
        self.img_widget_img_analysis.handle_image_data(img)

    def start_capture_for_video_analysis(self):
        log.debug("start video capture")
        self.vid_capture.start()

    def stop_capture_for_video_analysis(self):
        log.debug("start video capture")
        self.vid_capture.stop()
        self.img_widget_vid_analysis.reset()

    def detect_face_in_image_data(self, image_data):
        """
        function detects faces in image data,
        draws rectangle for faces in image data,
        and returns this updated image data with highlighted face/s
        """
        self._red = (0, 0, 255)
        self._width = 2
        self._min_size = (30, 30)

        # haarclassifiers work better in black and white
        gray_image = cv2.cvtColor(image_data, cv2.COLOR_BGR2GRAY)
        gray_image = cv2.equalizeHist(gray_image)

        # path to Haar face classfier's xml file
        face_cascade_xml = './cascades/haarcascades_cuda/haarcascade_frontalface_default.xml'
        self.classifier = cv2.CascadeClassifier(face_cascade_xml)
        faces = self.classifier.detectMultiScale(gray_image,
                                                 scaleFactor=1.3,
                                                 minNeighbors=4,
                                                 flags=cv2.CASCADE_SCALE_IMAGE,
                                                 minSize=self._min_size)

        for (x, y, w, h) in faces:
            cv2.rectangle(image_data, (x, y), (x + w, y + h), self._red,
                          self._width)

        return image_data

    def process_image_data_from_camera(self, image_data):
        if self.chkHighlightFaces.isChecked():
            image_data = self.detect_face_in_image_data(image_data)
        self.img_widget_vid_analysis.handle_image_data(image_data)

    def about(self):
        ad = AboutDialog()
        ad.display()

    def show_preferences(self):
        print("preferences")
        pd = PrefsDialog()
        pd.display()

    def setup_tray_menu(self):

        # setting up QSystemTrayIcon
        self.tray_icon = QSystemTrayIcon(self)
        self.tray_icon.setIcon(QIcon("./images/robot_icon.png"))

        # tray actions
        show_action = QAction("Show", self)
        quit_action = QAction("Exit", self)
        hide_action = QAction("Hide", self)

        # action handlers
        show_action.triggered.connect(self.show)
        hide_action.triggered.connect(self.hide)
        quit_action.triggered.connect(qApp.quit)

        # tray menu
        tray_menu = QMenu()
        tray_menu.addAction(show_action)
        tray_menu.addAction(hide_action)
        tray_menu.addAction(quit_action)
        self.tray_icon.setContextMenu(tray_menu)
        self.tray_icon.show()

    def closeEvent(self, event):
        try:
            event.ignore()
            self.hide()
            self.tray_icon.showMessage("RoboVision",
                                       "RoboVision was minimized to Tray",
                                       QSystemTrayIcon.Information, 2000)
            self.robot.stop()
            self.robot.join()
        except Exception as exp:
            log.warning("app close exp: %s" % str(exp))

    def ok_pressed(self):
        log.debug("[AppWindow] :: ok")
        self.show_msgbox("AppWindow", "Its ok")

    def show_msgbox(self, title, text):
        """
        Function for showing error/info message box
        """
        msg = QMessageBox()
        msg.setIcon(QMessageBox.Information)
        msg.setText(text)
        msg.setWindowTitle(title)
        msg.setStandardButtons(QMessageBox.Ok)

        retval = msg.exec_()
        print("[INFO] Value of pressed message box button:", retval)
示例#14
0
def handle(clock, event_handler):
    #print('Mode: Demo')

    # Make the bugs (not too close to center)
    bugs = []
    for _ in range(16):
        while True:
            cx = random.randint(0, 19)
            cy = random.randint(0, 15)
            if (cx >= 6 and cx < 14) and (cy >= 6 and cy < 14):
                continue
            break
        bugs.append(Bug(cx * 4 + 25, cy * 4 + 17, random.randint(0, 3)))

    pic = Frame()

    # Draw the bugs
    mega = Maze(20, 16, 192)
    mode_game.draw_maze(mega._maze, pic)

    # Draw the big bugs
    bugimage = GR.BIG_BUG
    pic.draw_image(3, 32, bugimage['standing'])
    pic.draw_image(108, 32, bugimage['standing'])

    # The mouth
    mouth = Mouth(25 + 10 * 4, 17 + 8 * 4, random.randint(0, 3))

    # Don't start on a dot
    pic.set_pixel(mouth.x, mouth.y, 0)

    hs = str(mode_game.HIGH_SCORE).rjust(4, '0')
    text.draw_text(pic, 19, 4, 'High Score ' + hs, GR.COLOR_SCORE)
    text.draw_text(pic, 26, 84, 'Play Giga-Bug', GR.COLOR_SCORE)

    transitions.wipe_in(pic)
    """
    base_frame = Frame()
    
    base_frame.draw_image(10,15, GR.CHARS['A']) # The letter 'A'
    base_frame.draw_text(5,5,    GR.CHARS,'Demonstration')
    base_frame.draw_image(20,25, GR.BIG_BUG['standing']) # Bug standing
    base_frame.draw_image(50,25, GR.BIG_BUG['dancing'][0]) # Bug dancing ...
    base_frame.draw_image(70,25, GR.BIG_BUG['dancing'][1]) # ... two animations   
    
    direction = 1 # 0=UP, 1=RIGHT, 2=DOWN, 3=LEFT
    animation = 0 # 0 or 1 ... two animations
    
    while True:
        frame = Frame(base_frame)        
             
        frame.draw_image(10,60, GR.MOUTH[direction][animation])
            
        hardware.render_frame(frame)
        
        animation = (animation + 1) % 2
    
        time.sleep(0.25)  
    """

    clock.tick(0.75)
    nf = MAG.draw_magnifier(pic, mouth.x - 8, mouth.y - 8, 17)
    MAG.draw_mouth_on_magnifier(nf, mouth)
    hardware.render_frame(nf)
    clock.tick(0.75)

    # Play this setup
    return play_game(clock, pic, mouth, bugs, event_handler)
示例#15
0
class GameLevel:
    def __init__(self, screen):
        self.targets = []
        self.eat_food = []
        self.eat_count = 0
        self.screen = screen
        self.shooter = Shooter(self.screen)
        self.mouth = Mouth(screen)
        self.total_score = 0
        self.final_score = 0
        self.eating = False
        self.chew_timer = 0
        self.spawn_count = 0

    def create_target(self, x_coor=0):
        new_target = Target(self.screen, x_coor)
        self.targets.append(new_target)
        self.spawn_count += 1

    def update_state(self, tick):

        self.mouth.draw()
        if len(
                self.targets
        ) < TARGET_LIMIT and tick == TARGET_UPDATE_DELAY and self.spawn_count < SPAWN_LIMIT:
            self.create_random_target()

        for entry in self.targets:
            entry.update_target_state()
            if entry.eaten:
                self.total_score += entry.score
                self.eat_food.append(entry)
                self.targets.remove(entry)
                self.eating = True
                self.eat_count += 1

            elif entry.end:
                self.targets.remove(entry)

        if self.eat_count == TIM_CAPACITY:
            new_score = 0
            for entry in self.eat_food:
                new_score += (entry.score * 10)
                self.eat_food.remove(entry)
            self.total_score += new_score
            return True
        for bullet in self.shooter.bullets:
            bullet.move()
            if bullet.y_pos < 0:
                self.shooter.bullets.remove(bullet)

        self.shooter.update_shooter()

        if self.eating:
            if self.chew_timer < CHEW_DELAY:
                self.mouth.close()
                self.chew_timer += 1
            else:
                self.mouth.open()
                self.chew_timer = 0
                self.eating = False

        self.check_collision()

        # self.final_score = round((self.total_score - self.spawn_count / (SPAWN_LIMIT / TIM_CAPACITY)) * 10)
        self.final_score = self.total_score * 10

        if self.spawn_count >= SPAWN_LIMIT and len(self.targets) == 0:
            return True

        return False

    def check_collision(self):
        for bullet in self.shooter.bullets:
            for target in self.targets:
                if bullet.y_pos <= target.y_coor + target.size - HIT_BOX_PADDING >= bullet.y_pos >= target.y_coor + \
                        HIT_BOX_PADDING and not target.exploding:
                    if target.x_coor + target.size - HIT_BOX_PADDING >= bullet.x_pos >= target.x_coor + HIT_BOX_PADDING\
                            and not target.exploding:
                        self.shooter.bullets.remove(bullet)
                        target.exploding = True
                        self.total_score += (target.score * -1)
                        continue

    def create_random_target(self):
        random_x_coor = random.randint(0, SCREEN_WIDTH - 100)
        self.create_target(random_x_coor)