コード例 #1
0
 def __init__(self, text_to_speech, speech_to_text):
     Feature.__init__(self)
     Speaking.__init__(self, text_to_speech)
     self.speech_to_text = speech_to_text
     self.neural_network = None
     self.background_image = np.array([])
     self.iris_slide = np.array([])
コード例 #2
0
ファイル: television.py プロジェクト: MYMSK4K/SaltwashAR
    def stop(self):
        Feature.stop(self)
        
        self.background_image = np.array([])

        if self.video_capture.isOpened():
            self.video_capture.release()
コード例 #3
0
    def stop(self):
        Feature.stop(self)
        
        self.background_image = np.array([])

        if self.video_capture.isOpened():
            self.video_capture.release()
コード例 #4
0
ファイル: configuration.py プロジェクト: vibster/ramp
 def set_attrs(self,
               target=None,
               features=None,
               metrics=None,
               model=None,
               column_subset=None,
               prediction=None,
               predictions_name=None,
               actual=None,
               reporters=None):
     if prediction is not None:
         if predictions_name is None:
             raise ValueError(
                 "If you provide a prediction feature, you "
                 "must also specify a _unique_ 'predictions_name'")
     self.target = target if isinstance(
         target, BaseFeature) or target is None else Feature(target)
     self.prediction = prediction if isinstance(
         prediction,
         BaseFeature) or prediction is None else Feature(prediction)
     self.predictions_name = predictions_name
     if actual is None:
         actual = self.target
     self.actual = actual if isinstance(actual,
                                        BaseFeature) else Feature(actual)
     self.features = [
         f if isinstance(f, BaseFeature) else Feature(f) for f in features
     ] if features else None
     self.metrics = metrics or []
     self.model = model
     self.column_subset = column_subset
     self.reporters = reporters or []
     for r in self.reporters:
         r.set_config(self)
コード例 #5
0
ファイル: shapes.py プロジェクト: drat/SaltwashAR
 def __init__(self, text_to_speech):
     Feature.__init__(self)
     Speaking.__init__(self, text_to_speech)
     self.is_pyramid = False
     self.is_cube = False
     self.rotation = 0
     self.background_image = np.array([])
     self.speech_thread = None
コード例 #6
0
ファイル: shapes.py プロジェクト: drat/SaltwashAR
 def __init__(self, text_to_speech):
     Feature.__init__(self)
     Speaking.__init__(self, text_to_speech)
     self.is_pyramid = False
     self.is_cube = False
     self.rotation = 0
     self.background_image = np.array([])
     self.speech_thread = None
コード例 #7
0
ファイル: shapes.py プロジェクト: drat/SaltwashAR
    def start(self, args=None):
        Feature.start(self, args)
 
        # draw rotating pyramid or cube
        self.rotation += 1

        if self.is_pyramid:
            draw_pyramid(self.rotation)
        elif self.is_cube:
            draw_cube(self.rotation)
コード例 #8
0
 def __init__(self, text_to_speech):
     Feature.__init__(self)
     Speaking.__init__(self, text_to_speech)
     self.background_image = np.array([])
     self.slides = []
     self.blurbs = []
     self.current_item = 0
     self.current_slide = np.array([])
     self.blurb_thread = None
     self._get_slides_and_blurbs()
コード例 #9
0
ファイル: shapes.py プロジェクト: drat/SaltwashAR
    def start(self, args=None):
        Feature.start(self, args)

        # draw rotating pyramid or cube
        self.rotation += 1

        if self.is_pyramid:
            draw_pyramid(self.rotation)
        elif self.is_cube:
            draw_cube(self.rotation)
コード例 #10
0
ファイル: slideshow.py プロジェクト: PhloxAR/irobot
 def __init__(self, text_to_speech):
     Feature.__init__(self)
     Speaking.__init__(self, text_to_speech)
     self.background_image = np.array([])
     self.slides = []
     self.blurbs = []
     self.current_item = 0
     self.current_slide = np.array([])
     self.blurb_thread = None
     self._get_slides_and_blurbs()
コード例 #11
0
    def start(self, args=None):
        image = args
        Feature.start(self, args)
 
        # if slide, add to background image
        if self.iris_slide.size > 0:
            slide_offset_and_height = self.SLIDE_OFFSET + self.iris_slide.shape[0]
            slide_offset_and_width = self.SLIDE_OFFSET + self.iris_slide.shape[1] 
        
            image[self.SLIDE_OFFSET:slide_offset_and_height, self.SLIDE_OFFSET:slide_offset_and_width] = self.iris_slide
            self.background_image = image
        else:
            self.background_image = np.array([])
コード例 #12
0
ファイル: weather.py プロジェクト: drat/SaltwashAR
    def start(self, args=None):
        Feature.start(self, args)
 
        # enable fog if cloudy
        if self.is_cloudy:
            glFogi(GL_FOG_MODE, GL_LINEAR)
            glFogfv(GL_FOG_COLOR, (0.5, 0.5, 0.5, 1.0))
            glFogf(GL_FOG_DENSITY, 0.35)
            glHint(GL_FOG_HINT, GL_NICEST)
            glFogf(GL_FOG_START, 1.0)
            glFogf(GL_FOG_END, 5.0)
            glEnable(GL_FOG)
        else:
            glDisable(GL_FOG)
コード例 #13
0
    def start(self, args=None):
        Feature.start(self, args)

        # enable fog if cloudy
        if self.is_cloudy:
            glFogi(GL_FOG_MODE, GL_LINEAR)
            glFogfv(GL_FOG_COLOR, (0.5, 0.5, 0.5, 1.0))
            glFogf(GL_FOG_DENSITY, 0.35)
            glHint(GL_FOG_HINT, GL_NICEST)
            glFogf(GL_FOG_START, 1.0)
            glFogf(GL_FOG_END, 5.0)
            glEnable(GL_FOG)
        else:
            glDisable(GL_FOG)
コード例 #14
0
    def __init__(self, text_to_speech, speech_to_text):
        Feature.__init__(self)

        # setup AV Table
        self.av_table = GameTable(13, 2)
        if (self.av_table.loadParameters() == False):
            self.av_table.initialize(0.)

        # setup a Q-Learning agent
        learner = Q(0.5, 0.0)
        learner._setExplorer(EpsilonGreedyExplorer(0.0))
        self.agent = LearningAgent(self.av_table, learner)

        # setup game interaction
        self.game_interaction = GameInteraction(text_to_speech, speech_to_text)

        # setup environment
        environment = GameEnvironment(self.game_interaction)

        # setup task
        task = GameTask(environment, self.game_interaction)

        # setup experiment
        self.experiment = Experiment(task, self.agent)
コード例 #15
0
    def __init__(self, text_to_speech, speech_to_text):
        Feature.__init__(self)

        # setup AV Table
        self.av_table = GameTable(13, 2)
        if(self.av_table.loadParameters() == False):
            self.av_table.initialize(0.)
 
        # setup a Q-Learning agent
        learner = Q(0.5, 0.0)
        learner._setExplorer(EpsilonGreedyExplorer(0.0))
        self.agent = LearningAgent(self.av_table, learner)
 
        # setup game interaction
        self.game_interaction = GameInteraction(text_to_speech, speech_to_text)

        # setup environment
        environment = GameEnvironment(self.game_interaction)
 
        # setup task
        task = GameTask(environment, self.game_interaction)
 
        # setup experiment
        self.experiment = Experiment(task, self.agent)
コード例 #16
0
 def stop(self):
     Feature.stop(self)
     self.background_image = np.array([])
コード例 #17
0
 def __init__(self, text_to_speech, speech_to_text):
     Feature.__init__(self)
     Speaking.__init__(self, text_to_speech)
     self.speech_to_text = speech_to_text
     self.is_cloudy = False
コード例 #18
0
 def __init__(self, text_to_speech):
     Feature.__init__(self)
     Speaking.__init__(self, text_to_speech)
コード例 #19
0
 def __init__(self):
     Feature.__init__(self)
     self.background_image = np.array([])
     self.video_capture = cv2.VideoCapture()
コード例 #20
0
ファイル: irisclassifier.py プロジェクト: drat/SaltwashAR
 def stop(self):
     Feature.stop(self)
     self.background_image = np.array([])
コード例 #21
0
 def __init__(self, speech_to_text):
     Feature.__init__(self)
     self.is_speaking = False
     self.speech_to_text = speech_to_text
     self.phrases = self._load_config()
     pygame.mixer.init()
コード例 #22
0
    def mark_tags(weights):
        """标注全部目标选框"""
        new_image = image.copy()
        drawer = ImageDraw.Draw(new_image)
        print('\n标注目标选框:')
        final_weight = lambda xy: reduce(
            lambda l, r: l + weights[r] * features[r].weight(xy),
            range(len(features)), 0)
        cap = weights[6] if len(weights) > 6 else 0.5
        is_tag = lambda xy: final_weight(xy) > cap
        visited = {}

        def mark_tag(xy):
            """标注目标选框"""
            x0, y0, x1, y1 = xy[0], xy[1], xy[0], xy[1]
            count = 0
            stack = [xy]
            while len(stack):
                xy = stack.pop()
                visited[xy] = True
                if xy[0] < x0:
                    x0 = xy[0]
                elif xy[0] > x1:
                    x1 = xy[0]
                if xy[1] < y0:
                    y0 = xy[1]
                elif xy[1] > y1:
                    y1 = xy[1]
                # 向附近延展
                for x in range(max(0, xy[0] - 10), min(width, xy[0] + 10)):
                    for y in range(max(0, xy[1] - 10), min(height,
                                                           xy[1] + 10)):
                        if (x, y) not in visited and is_tag((x, y)):
                            count += 1
                            stack.append((x, y))
                        visited[(x, y)] = True
            x0, y0, x1, y1 = (
                max(0, x0 - 10),
                max(0, y0 - 10),
                min(width, x1 + 10),
                min(height, y1 + 10),
            )
            for x in range(x0, x1):
                for y in range(y0, y1):
                    visited[(x, y)] = True
            # 去噪点
            if (x0 - x1) * (y0 - y1) < 1600 or count < 120:
                return
            # print(x0, y0, x1, y1)
            for ex in range(3):
                drawer.rectangle((x0 + ex, y0 + ex, x1 - ex, y1 - ex),
                                 outline='#f00')

        final_feature = Feature(width, height)

        def each_pix(xy, rgb):
            """每个像素点处理"""
            weight = final_weight(xy)
            if weight > cap and xy not in visited:
                final_feature.point(xy, 255)
                mark_tag(xy)
            else:
                final_feature.point(xy, 255 * weight)
            # if is_tag(xy) and xy not in visited:
            #     mark_tag(xy)

        pix_iter(each_pix)
        print_progress('OK')
        # Thread(target=new_image.show, name=path).start()
        new_image.save(main.outdir + 'result-marked.jpg')
        # final_feature.show()
        final_feature.weight_image.save(main.outdir + 'result-weight.jpg')
コード例 #23
0
 def __init__(self, text_to_speech):
     Feature.__init__(self)
     Speaking.__init__(self, text_to_speech)
     self.neural_network = None
コード例 #24
0
ファイル: weather.py プロジェクト: drat/SaltwashAR
 def stop(self):
     Feature.stop(self)
     
     # disable fog
     glDisable(GL_FOG)
     self.is_cloudy = False
コード例 #25
0
 def __init__(self, text_to_speech):
     Feature.__init__(self)
     Speaking.__init__(self, text_to_speech)
コード例 #26
0
ファイル: television.py プロジェクト: MYMSK4K/SaltwashAR
 def __init__(self):
     Feature.__init__(self)
     self.background_image = np.array([])
     self.video_capture = cv2.VideoCapture()
コード例 #27
0
ファイル: weather.py プロジェクト: drat/SaltwashAR
 def __init__(self, text_to_speech, speech_to_text):
     Feature.__init__(self)
     Speaking.__init__(self, text_to_speech)
     self.speech_to_text = speech_to_text
     self.is_cloudy = False
コード例 #28
0
 def __init__(self, text_to_speech, speech_to_text):
     Feature.__init__(self)
     Speaking.__init__(self, text_to_speech)
     self.speech_to_text = speech_to_text
     self.recognizer = sr.Recognizer()
     pygame.mixer.init(frequency=8000)
コード例 #29
0
 def __init__(self, speech_to_text):
     Feature.__init__(self)
     self.is_speaking = False
     self.speech_to_text = speech_to_text
     self.phrases = self._load_config()
     pygame.mixer.init()
コード例 #30
0
ファイル: mixingdesk.py プロジェクト: PhloxAR/irobot
 def __init__(self, text_to_speech, speech_to_text):
     Feature.__init__(self)
     Speaking.__init__(self, text_to_speech)
     self.speech_to_text = speech_to_text
     self.recognizer = sr.Recognizer()
     pygame.mixer.init(frequency=8000)
コード例 #31
0
    def stop(self):
        Feature.stop(self)

        # disable fog
        glDisable(GL_FOG)
        self.is_cloudy = False
コード例 #32
0
ファイル: audioclassifier.py プロジェクト: MYMSK4K/SaltwashAR
 def __init__(self, text_to_speech):
     Feature.__init__(self)
     Speaking.__init__(self, text_to_speech)
     self.neural_network = None