Exemplo n.º 1
0
    def setUpClass(cls):
        cls.comm = cm.Communicator([cm.SYNTH_REQ, cm.READY_REP, cm.DEATH_PUB])

        cls.listen = mp.Process(target=listen, args=(True, ))
        cls.listen.start()

        cm.Waiter(cls.comm, [SYNTH_READY])
Exemplo n.º 2
0
    def setUpClass(cls):
        cls.comm = cm.Communicator([ cm.SENSOR_PUSH, cm.LEARNING_MODE_PUSH,
                                     cm.DEATH_PUB ])
        
        cls.processes = []
        cls.processes.append(mp.Process(target=data.inputs.run, args=(True,)))

        for p in cls.processes:
            p.start()
Exemplo n.º 3
0
    def setUpClass(cls):
        cls.comm = cm.Communicator([
            cm.LEARN_REQ, cm.PLAY_REQ, cm.DEATH_PUB, cm.LEARN_COUNT_SUB,
            cm.FILE_IO_REQ
        ])

        cls.processes = []
        cls.processes.append(mp.Process(target=shape.run))

        for p in cls.processes:
            p.start()
Exemplo n.º 4
0
def run(n_classes=10, noise_std=.1):
    comm = cm.Communicator([
        cm.READY_REP, cm.TRAIN_PUSH, cm.SYNTH_REQ, cm.PLAY_REP, cm.MODEL_PULL,
        cm.LEARN_REP, cm.FILE_IO_REP
    ])

    processes = []
    processes.append(mp.Process(target=core.train, args=(
        N_CLASSES,
        False,
    )))
    processes.append(mp.Process(target=synth.interface.listen))

    for p in processes:
        p.start()

    model = None

    for socket, msg in next(comm):
        if socket == cm.MODEL_PULL:
            model = _load_model(msg)

        if socket == cm.PLAY_REP:
            try:
                gesture = msg[np.newaxis, :]
                gesture_prediction, synth_prms_prediction = model.predict(
                    gesture)
                comm.PLAY_REP_SEND([gesture_prediction, synth_prms_prediction])
            except AttributeError as e:
                print('Model not ready')
                comm.PLAY_REP_SEND(None)

        if socket == cm.LEARN_REP:
            comm.TRAIN_PUSH_SEND(msg)
            comm.LEARN_REP_SEND(True)

        if socket == cm.FILE_IO_REP:
            favourite = '{}/favourite/favourite.h5'.format(PROJECT_ROOT)

            try:
                if msg == ins.LOAD:
                    model = _load_model(favourite)
                if msg == ins.SAVE:
                    model.save(favourite, include_optimizer=False)
                    print('Favourite saved to {}'.format(favourite))
                comm.FILE_IO_REP_SEND(True)
            except Error as e:
                print('Loading/saving error', e)
                comm.FILE_IO_REP_SEND(False)

    for p in processes:
        p.join()

    print('Shape exit')
Exemplo n.º 5
0
def listen(sync=False):

    comm = cm.Communicator([ cm.SYNTH_REP, cm.READY_REQ ])

    if sync:
        comm.READY_REQ_SEND(SYNTH_READY)
        comm.READY_REQ_RECV()

    pool = mp.Pool()
    for _, (parameters, instrument_name, gesture, plot) in next(comm):
        func = partial(play_and_analyze, instrument_name=instrument_name,
                       gesture=gesture, plot=plot)
        outputs = pool.map(func, parameters)
        comm.SYNTH_REP_SEND(outputs)

    print('Synth interface process exit')
Exemplo n.º 6
0
def train(n_classes, sync=False):
    comm = cm.Communicator(
        [cm.TRAIN_PULL, cm.MODEL_PUSH, cm.READY_REQ, cm.LEARN_COUNT_PUB])

    if sync:
        comm.READY_REQ_SEND(TRAIN_READY)
        comm.READY_REQ_RECV()

    model = None

    learn_counts = 0

    for socket, novelty in next(comm):
        x_gesture, y_synth_prms = novelty

        input_dim = x_gesture.shape[1]
        synth_parameters_dim = y_synth_prms.shape[1]

        if model is None:
            print('Gesture mapper created')
            model = GestureMapper(input_dim, n_classes, synth_parameters_dim)
            model.add_datapoint(x_gesture, y_synth_prms)

        else:
            model.add_datapoint(x_gesture, y_synth_prms)
            model.train()

            # Cannot pickle Keras model, so save to disk.
            model_file = ('/shape/trained_models/'
                          '{}_gesture_mapper_{}.h5').format(
                              input_dim, uuid.uuid4())
            model.model.save(model_file, include_optimizer=False)

            print('Training done, sending model')
            comm.MODEL_PUSH_SEND(model_file)

        learn_counts += 1
        comm.LEARN_COUNT_PUB_SEND(learn_counts)

    print('Training process exit')
Exemplo n.º 7
0
    def setUpClass(cls):

        cls.comm = cm.Communicator([cm.TRAIN_PUSH, cm.MODEL_PULL, cm.DEATH_PUB, cm.READY_REP])
        
        cls.processes = []
        cls.n_classes = 5
        cls.synth_parameters_dim = 7
        cls.audio_features_dim = 10

        cls.processes.append(mp.Process(target=core.train, args=(cls.n_classes, cls.synth_parameters_dim,
                                                                 cls.audio_features_dim, True,)))

        cls.test_gestures = [ (fg.circle,
                               np.random.rand(cls.synth_parameters_dim),
                               np.random.rand(cls.audio_features_dim)),
                               (fg.spiral,
                                np.random.rand(cls.synth_parameters_dim),
                                np.random.rand(cls.audio_features_dim)) ]

        for p in cls.processes:
            p.start()

        waiting = [ core.TRAIN_READY ]
        cm.Waiter(cls.comm, waiting)
Exemplo n.º 8
0
def run(select_lowest_mse=False):
    comm = cm.Communicator([
        cm.SENSOR_PULL, cm.LEARNING_MODE_PULL, cm.LEARN_REQ, cm.PLAY_REQ,
        cm.SYNTH_REQ, cm.SYNTH_PLAY_PUSH, cm.FILE_IO_REQ
    ])

    status = CHILL
    recorder = deque(maxlen=200)
    favourite_log = []

    for socket, msg in next(comm):
        if socket == cm.SENSOR_PULL:
            if status == CHILL:
                continue

            recorder.append(msg)

            if status == PLAY and len(recorder):
                gesture = np.stack(recorder)[-HISTORY_LENGTH:]

                if len(gesture) < HISTORY_LENGTH:
                    gesture = np.pad(gesture,
                                     pad_width=((HISTORY_LENGTH - len(gesture),
                                                 0), (0, 0)),
                                     mode='constant',
                                     constant_values=MASK_VALUE)

                comm.PLAY_REQ_SEND(gesture)
                response = comm.PLAY_REQ_RECV()

                if response is not None:
                    gesture_prediction, synth_prms_prediction = response
                    synth_prms_prediction = np.clip(synth_prms_prediction, 0,
                                                    1)
                    print('{}\r'.format(
                        np.around(np.squeeze(gesture_prediction), decimals=2)),
                          end='')
                    comm.SYNTH_PLAY_PUSH_SEND(synth_prms_prediction)

        if socket == cm.LEARNING_MODE_PULL:
            if msg == SAVE:
                json_filename = '{}/favourite/favourite.json'.format(
                    PROJECT_ROOT)

                out = {i: d for i, d in enumerate(favourite_log)}

                with open(json_filename, 'w') as _file:
                    json.dump(out, _file, indent=4, sort_keys=True)

            if msg in [LOAD, SAVE]:
                comm.FILE_IO_REQ_SEND(msg)
                print(msg, ':', comm.FILE_IO_REQ_RECV())
                continue

            if len(recorder) and status == REC and msg in [PLAY, CHILL]:
                print('Recorded {} samples, making suggestions'.format(
                    len(recorder)))
                gesture = np.stack(recorder)

                # Can deal with both mouse (2D) and Myo (4D)
                X = gesture[:, 0]
                Y = gesture[:, 1]

                plt.plot(X, Y)
                plt.xlim(-.1, 1.1)
                plt.ylim(-.1, 1.1)
                gesture_plot = '{}/sounds/_{}.png'.format(
                    PROJECT_ROOT, SYNTH_INSTR.name)
                plt.savefig(gesture_plot, dpi=300)
                plt.clf()

                parameters = [
                    create(gesture, SYNTH_INSTR.n_parameters)
                    for _ in range(N_EXAMPLES)
                ]

                comm.SYNTH_REQ_SEND(
                    [parameters, SYNTH_INSTR.name, gesture, True])

                sounds = comm.SYNTH_REQ_RECV()
                filenames, similarities = zip(*sounds)
                sounds = list(zip(filenames, similarities, parameters))

                sounds = sorted(sounds, key=lambda L: L[1])

                title = SYNTH_INSTR.name
                html = ('<html><title>{}</title><body><h1>{}</h1>'
                        '<img src="_{}.png" width="50%">'
                        '<hr>').format(title, title, SYNTH_INSTR.name)

                for i, (filename, similarity, _) in enumerate(sounds):
                    html += (
                        '<table><tr><td><b>Candidate {}<br>'
                        'Similarity: {} </b><br><br> <audio controls>'
                        '<source src="{}" type="audio/wav"> </audio></td>'
                        '<td><img src="{}.png" width="60%"> </td></tr></table>'
                        '<hr>').format(i, similarity, filename, filename)

                html += '</body></html>'

                html_file = '{}/sounds/{}.html'.format(PROJECT_ROOT,
                                                       SYNTH_INSTR.name)
                with open(html_file, 'w') as out_file:
                    out_file.write(html)

                if not select_lowest_mse:
                    q = 'Open {} and select your favourite:'.format(html_file)
                    favourite = int(input(q))
                else:
                    favourite = 0

                if favourite > -1:
                    print('You chose {}, similarity {}.'.format(
                        favourite, sounds[favourite][1]))

                    filename, similarity, synth_parameters = sounds[favourite]

                    favourite_log.append([filename, similarity])

                    comm.LEARN_REQ_SEND([gesture, synth_parameters])
                    comm.LEARN_REQ_RECV()
                else:
                    print('None selected, continue.')

            status = msg
            print('STATUS:', status)

            recorder.clear()
Exemplo n.º 9
0
#
#    The Shape is distributed in the hope that it will be useful,
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#    GNU General Public License for more details.
#
#    You should have received a copy of the GNU General Public License
#    along with The Shape package.
#    If not, see <http://www.gnu.org/licenses/>.
"""
Keyboard control of record enable via ZMK
"""

import threading
import data.communicator as cm
comm = cm.Communicator([cm.LEARNING_MODE_PUSH])
from data.inputs import REC, PLAY, CHILL, SAVE, LOAD
chill = 0


class KeyboardThread(threading.Thread):
    def __init__(self, input_cbk=None, name='keyboard-input-thread'):
        self.input_cbk = input_cbk
        super(KeyboardThread, self).__init__(name=name)
        self.start()

    def run(self):
        while True:
            self.input_cbk(input())  #waits to get input + Return

Exemplo n.º 10
0
Myo-to-ZMQ
Connects to a Myo, then sends EMG and IMU data as ZMQ messages to SHAPE
"""
from collections import deque
from host_io.myo import *
import argparse
import math
import sys
import time
import numpy as np
import threading
import host_io.zmqKeyboard as kbd  # keyboard control of record enable/disable
from utils.constants import GESTURE_SAMPLING_FREQUENCY
import data.communicator as cm

comm = cm.Communicator([cm.SENSOR_PUSH])

parser = argparse.ArgumentParser(
    description=
    'Connects to a Myo, then sends EMG and IMU data as OSC messages to localhost:3000.'
)
parser.add_argument('-l',
                    '--log',
                    dest='logging',
                    action="store_true",
                    help='Save Myo data to a log file.')
parser.add_argument(
    '-d',
    '--discover',
    dest='discover',
    action='store_true',
Exemplo n.º 11
0
"""
Example for how to connect externally to a running container.
"""

import time

import zmq
import numpy as np

import data.communicator as cm
from core.faux_gestures import trajectories
from core.candidate import create
from utils.constants import ADDITIVE

comm = cm.Communicator([ cm.LEARN_REQ, cm.PLAY_REQ ])

n = 3

for gesture in trajectories[:n]:
    comm.LEARN_REQ_SEND([ gesture, create(gesture, ADDITIVE.n_parameters) ])
    comm.LEARN_REQ_RECV()

ready = False

while not ready:
    comm.PLAY_REQ_SEND(trajectories[0])
    reply = comm.PLAY_REQ_RECV()

    if reply is not None:
        ready = True
Exemplo n.º 12
0
#
#    You should have received a copy of the GNU General Public License
#    along with The Shape package.
#    If not, see <http://www.gnu.org/licenses/>.
"""
ZMK server, receive synth parameters and send them over OSC to synth
"""

import sys
import numpy as np
from pythonosc.dispatcher import Dispatcher
from pythonosc import osc_server
from pythonosc import udp_client

import data.communicator as cm
comm = cm.Communicator([cm.SYNTH_PLAY_PULL])

# OSC client
send_port = 8903
osc_client = udp_client.SimpleUDPClient(
    "127.0.0.1", send_port)  # OSC Client for sending messages.


def send_to_synth(parameters):
    print(parameters[0])
    osc_client.send_message("/shapesynth", parameters)


while True:
    parameters = comm.SYNTH_PLAY_PULL_RECV()
    parameters = np.squeeze(parameters)