Пример #1
0
    def getText(self, text, colour):
        pixelsArray = []
        resolution = (self.height, self.width)

        lines = None
        if self.effect == 'scroll':
            lines = [text.replace("|", " ")]
        else:
            lines = text.split("|")

        for line in lines:
            image = self.__getImage(line, colour)
            textPixels = Pixels(image)
            if self.effect == 'scroll':
                for i in range(0, image.width + self.width + 1):
                    columns = (i, i + self.width)
                    pixelArray = textPixels.convert(resolution, self.effect,
                                                    columns)
                    pixelsArray.append(pixelArray)
            else:
                frameWidth, letterWidth = self.__getFrameWidth(line)
                frames = 1
                if frameWidth + letterWidth <= image.width:
                    frames = math.ceil(image.width / frameWidth)
                columns = (0, frameWidth)
                for i in range(0, frames):
                    pixelArray = textPixels.convert(resolution, self.effect,
                                                    columns)
                    pixelsArray.append(pixelArray)
                    columns = (columns[0] + frameWidth,
                               columns[1] + frameWidth)

        return pixelsArray
    def serve_forever(self):
        # [function] -> None

        self.serve = True

        start_time = time.time()
        fps_frame_time = 1.0 / self.fps

        print '\tsending pixels forever (quit or control-c to exit)...'
        self.is_running = True
        pixels = Pixels(STATE.layout)

        while self.serve:
            # TODO : Does this create lots of GC?
            frame_start_time = time.time()
            t = frame_start_time - start_time
            target_frame_end_time = frame_start_time + fps_frame_time

            # Create the pixels, set all, then put
            pixels[:] = 0
            self.curr_scene.render(pixels, t, self.get_osc_frame())
            # TODO: Check for None and replace with (0, 0, 0)
            self.opc_client.put_pixels(pixels.pixels, channel=0)

            # Crude way of trying to hit target fps
            wait_for_next_frame(target_frame_end_time, )

            # TODO: channel?

        self.is_running = False
        print "Scene Manager Exited"
Пример #3
0
    def test2Array(self):
        try:
            if self.config.has_key("2array_mic_hat"):
                sys.path.append(self.config["2array_mic_hat"])
                from pixels import Pixels
                pixels = Pixels()
                pixels.wakeup()
                time.sleep(3)
                pixels.off()
                time.sleep(2)
                pixels.think()
                time.sleep(3)
                for i in range(0, 5):
                    pixels.speak()
                    time.sleep(1)

                pixels.off()
                time.sleep(1)
        except Exception as e:
            print e.message
 class Pixels:
     from pixels import Pixels
     pixels = Pixels()
     thinking = False
     
     @classmethod
     def wakeup(cls):
         cls.pixels.wakeup()
         
     @classmethod
     def think(cls):
         cls.pixels.think()
                 
     @classmethod
     def speak(cls):
         cls.thinking = False
         cls.pixels.spin()
     
     @classmethod
     def off(cls):
         cls.thinking = False
         cls.pixels.off()
Пример #5
0
import requests

sys.path.append('/home/pi/projects/lumin/Lumin_FW_Src/audio_application/python/lumota')
from pixels import Pixels

def check_version():
    fd = os.open('/home/pi/projects/lumin/Lumin_FW_Src/audio_application/python/lumota/mversion.txt', os.O_RDONLY)
    readBytes = os.read(fd, 50)

    os.close(fd)

    return readBytes.decode('utf-8')


if __name__ == '__main__':
    pixels = Pixels()
    pixels.ota()

    # os.system("espeak --stdout 'Updating, please wait.' | aplay -Dsysdefault")

    timezone = None
    while True:
        r = requests.get("http://worldtimeapi.org/api/ip.txt")
        out = r.content.decode('utf-8')

        for line in out.splitlines():
            tokens = line.split(':')
            if tokens[0] == 'timezone':
                timezone = (tokens[-1]).strip()
                os.system("timedatectl set-timezone {0}".format(timezone))
                break
Пример #6
0
def main():

    path = os.path.dirname(sys.argv[0])

    imagesFolderPath = path + "/" + imagesFolder
    animationFolderPath  = path + "/" + animationFolder
    fontFullPath = path + "/" + fontPath

    parser = argparse.ArgumentParser(description='Process some integers.')
    parser.add_argument('--show', default=False, type=bool, help='Show animation')
    parser.add_argument('--update', default=False, type=bool, help='Re-create structures from existing images (default: False). True if images should be re-created')
    parser.add_argument('--delay', default=1000, type=int, help='Set delay in ms between animation frames - image, text, and effect (dafault: 1000)')
    parser.add_argument('--colour', default=[255, 255, 255], type=int, nargs=3, help='Set font colour in RGB format - 3 values between 0 and 255 (default: 255 255 255)')
    parser.add_argument('--parameters', default=[255, 255, 255], type=int, nargs='*', help='Set parameters for effect. For snakes use colour in RGB format. For rainbow use colour increment speed(integer) and number of frames')
    parser.add_argument('--font_size', default='large', help='Enter size of the font: small or large and effect: static or scroll')
    parser.add_argument('--font_effect', default='static', help='Enter effect for font: static or scroll')
    parser.add_argument('--image', help='Enter image name to get animation. Required option is delay')
    parser.add_argument('--text', help='Enter text to get animation. Required options are: colour, font and delay')
    parser.add_argument('--effect', help='Enter effect name to get animation. Required options are: parameters and delay')
    parser.add_argument('--images', help='Get list of all available image names to be used in image option')
    parser.add_argument('--effects', help='Get list of all available effect names to be used in effect option')
    parser.add_argument('--test', help='Get test matrix (only one frame)')
    # parser.print_help()

    args = parser.parse_args()
    args.colour = (args.colour[0], args.colour[1], args.colour[2])

    if args.update == True:
        # http://www.iconarchive.com/show/arcade-saturdays-icons-by-mad-science/Bashfull-Inky-icon.html
        # http://www.iconarchive.com/show/square-animal-icons-by-martin-berube.html
        for filename in os.listdir(imagesFolderPath):
            pixels = Pixels(imagesFolderPath, filename)
            pixels.save(resolution, animationFolderPath)

    if args.images != None:
        images = []
        for filename in os.listdir(animationFolderPath):
            file, ext = os.path.splitext(filename)
            name = file.split("_")[0]
            if name not in images:
                images.append(name)
        print(images)
        exit(0)

    if args.effects != None:
        effects = Effects()
        print(effects.list())
        exit(0)

    if args.image != None:
        animationObject = Animation(animationFolderPath, args.image)
        animation = animationObject.get(args.delay)
        animationJson = json.dumps(animation)
        print(animationJson)
        if args.show == True:
            animationObject.show(animation, resolution)
        exit(0)

    if args.text != None:
        fonts = Fonts(fontFullPath, args.font_size, args.font_effect, resolution)
        letterImages = fonts.getText(args.text, args.colour)
        animationObject = Animation(letterImages)
        animation = animationObject.get(args.delay)
        animationJson = json.dumps(animation)
        print(animationJson)
        if args.show == True:
            animationObject.show(animation, resolution)
        exit(0)

    if args.effect != None:
        effects = Effects(args.effect, resolution)
        pixelsArray = effects.get(args.parameters)
        animationObject = Animation(pixelsArray)
        animation = animationObject.get(args.delay)
        animationJson = json.dumps(animation)
        print(animationJson)
        if args.show == True:
            animationObject.show(animation, resolution)
        exit(0)

    if args.test != None:
        test = Test(resolution)
        pixelsArray = test.get()
        animationObject = Animation(pixelsArray)
        animation = animationObject.get(args.delay)
        animationJson = json.dumps(animation)
        print(animationJson)
        if args.show == True:
            animationObject.show(animation, resolution)
        exit(0)
import _thread
from subprocess import call
import RPi.GPIO as GPIO

#Current state of motion detect
# 0: No motion detected
# 1: Motion detected and remain in frame
state = 0

stop_threads = False
#Setup player to play music
Instance = vlc.Instance()
player = Instance.media_player_new()

#RGB LED
pixels = Pixels()  #To control the RGB LED

BUTTON_PIN = 17  #Power off button on top of ReSpeaker Hat
PIR_PIN = 12  #PIR Sensor Pin connection on ReSpeaker Hat
GPIO.setmode(GPIO.BCM)
GPIO.setup(BUTTON_PIN, GPIO.IN)
GPIO.setup(PIR_PIN, GPIO.IN)


#Function for blinking LED
def ledBlink():
    global pixels
    pixels.wakeup()
    pixels.think()
    time.sleep(8)
    pixels.off()
Пример #8
0
                wait_time = wait_time - 1
                if wait_time == 0:
                    break

            try:
                recognized_text = r.recognize_google(audio)
            except sr.UnknownValueError as e:
                pass
            except sr.RequestError as e:
                logger.error("service is down")
                pass
            os.remove(filename)
            return recognized_text


px = Pixels()  # Initializing the Pixel class for RE-SPEAKER PiHAT LED.
px.wakeup()
time.sleep(2)
px.off()

a = voice()  # Initializing the voice class.
"""
Infinite loop:
    1. Reading microphone for 3 sec and generation .wav file.
    2. Creating thread with voice_command_processor() method for converting speech to text.
    3. IF wake word is detected (in my case Gideon):

        a. Clearing recognized_text global variable.
        b. Turing on the LED.
        c. Audio reply with "how can i help you"
        d. Start reading from pyaudio stream for next 5 sec for question.
Пример #9
0
def main(id=None):
    upload_path = "static/done/"
    temp_path = "img/"

    entries = (os.path.join(upload_path, fn) for fn in os.listdir(upload_path))
    entries = ((os.stat(path), path) for path in entries)
    entries = ((stat[ST_CTIME], path) for stat, path in entries
               if S_ISREG(stat[ST_MODE]))
    sorted_entries = sorted(entries)
    sorted_ids = [
        cpath.split("/")[-1][:-4] for ctime, cpath in sorted_entries
        if "orig_" not in cpath
    ]

    if len(sorted_entries) > MAX_FILES:
        for i in range(0, len(sorted_entries) - MAX_FILES):
            path = sorted_entries[i][1]
            if "example" not in path:
                os.remove(path)

    if request.method == "POST":
        file = request.files["file"]
        if file:
            id = generate_slug(2)
            orig_file_path = upload_path + "orig_" + id + ".png"
            done_file_path = upload_path + id + ".png"

        img = Image.open(file)
        img = fix_orientation(img)

        if img.width > MAX_WIDTH:
            ratio = img.width / MAX_WIDTH
            img = img.resize((int(img.width / ratio), int(img.height / ratio)))
        elif img.height > MAX_HEIGHT:
            ratio = img.height / MAX_HEIGHT
            img = img.resize((int(img.width / ratio), int(img.height / ratio)))

        img.save(orig_file_path)

        faces = detect_face(orig_file_path)

        os.remove(orig_file_path)

        if len(faces) > 0:
            face = faces[0]
            pixels = Pixels(img, faces)
            pixels.faceSwap()
            emotions = pixels.getEmotions(faces[0])
            max_emotion_level = 0
            max_emotion = "neutral"
            for emotion, level in emotions.items():
                if level > max_emotion_level:
                    max_emotion_level = level
                    max_emotion = emotion

            id = max_emotion + "-" + id

            orig_file_path = upload_path + "orig_" + id + ".png"
            done_file_path = upload_path + id + ".png"

            img.save(orig_file_path)

            img.putdata(pixels.data)
            img.save(done_file_path)
        else:
            img.save(orig_file_path)
            img.save(done_file_path)

        return redirect("/" + id)
    else:
        if len(sorted_ids) > 0:
            if not id:
                return redirect("/" + choice(sorted_ids))
            try:
                curr_idx = sorted_ids.index(id)
                prev_id = sorted_ids[len(sorted_ids) -
                                     1 if curr_idx - 1 < 0 else curr_idx - 1]
                next_id = sorted_ids[0 if curr_idx +
                                     1 >= len(sorted_ids) else curr_idx + 1]
            except:
                rand_id = choice(sorted_ids)
                return redirect("/" + rand_id)

            if "-" in id:
                emotion = id.split("-")[0]
            else:
                emotion = "example"
            return render_template("main.html",
                                   emotion=emotion,
                                   id=id,
                                   prev_id=prev_id,
                                   next_id=next_id)
        return render_template("main.html")