Esempio n. 1
0
    def setUpClass(cls):
        input_dim = trajectories[0].shape[1]
        n_classes = len(trajectories)
        synth_parameters_dim = ADDITIVE.n_parameters
        cls.model = GestureMapper(input_dim, n_classes, synth_parameters_dim)

        for x in trajectories:
            cls.model.add_datapoint(x, create(x, synth_parameters_dim))
Esempio n. 2
0
    def evaluate(self, instrument_name, n_parameters):
        trajectory_names = [
            'zero', 'circle', 'line', 'r_line', 'sine', 'mega_sine', 'spiral',
            'tanh', 'random'
        ]
        for trajectory_name, trajectory in zip(trajectory_names, trajectories):
            X = trajectory[:, 0]
            Y = trajectory[:, 1]
            plt.plot(X, Y)
            plt.xlim(-.1, 1.1)
            plt.ylim(-.1, 1.1)
            gesture_plot = '/shape/sounds/_{}.png'.format(trajectory_name)
            plt.savefig(gesture_plot, dpi=300)
            plt.clf()

            n = 8
            parameters = [create(trajectory, n_parameters) for _ in range(n)]

            self.comm.SYNTH_REQ_SEND(
                [parameters, instrument_name, trajectory, True])

            sounds = self.comm.SYNTH_REQ_RECV()
            sounds = sorted(sounds, key=lambda L: L[1])

            title = '{}:{}'.format(instrument_name, trajectory_name)
            html = ('<html><title>{}</title><body><h1>{}</h1>'
                    '<img src="_{}.png" width="50%">'
                    '<hr>').format(title, title, trajectory_name)

            for filename, similarity in sounds:
                html += (
                    '<table><tr><td><b> {} </b><br><br> <audio controls>'
                    '<source src="{}" type="audio/wav"> </audio></td>'
                    '<td><img src="{}.png" width="60%"> </td></tr></table>'
                    '<hr>').format(similarity, filename, filename)

            html += '</body></html>'

            html_file = '/shape/sounds/{}_{}.html'.format(
                instrument_name, trajectory_name)
            with open(html_file, 'w') as out_file:
                out_file.write(html)
Esempio n. 3
0
    def test_learn_predict(self):
        n = 2

        for gesture in trajectories[:n]:
            self.comm.LEARN_REQ_SEND(
                [gesture, create(gesture, ADDITIVE.n_parameters)])
            self.comm.LEARN_REQ_RECV()

        for socket, msg in next(self.comm):
            if socket == cm.LEARN_COUNT_SUB:
                if msg == n:
                    break

        for i, gesture in enumerate(trajectories[:n]):
            self.comm.PLAY_REQ_SEND(gesture)
            gesture_prediction, synth_prms_prediction = self.comm.PLAY_REQ_RECV(
            )
            self.assertTrue(i == np.argmax(gesture_prediction))

        self.comm.FILE_IO_REQ_SEND(SAVE)
        self.assertTrue(self.comm.FILE_IO_REQ_RECV())

        self.comm.FILE_IO_REQ_SEND(LOAD)
        self.assertTrue(self.comm.FILE_IO_REQ_RECV())
Esempio n. 4
0
 def test_4d_input(self):
     gesture = np.hstack([trajectories[1], trajectories[3]])
     self.comm.SYNTH_REQ_SEND([[create(gesture, ADDITIVE.n_parameters)],
                               ADDITIVE.name, gesture, True])
     sounds = self.comm.SYNTH_REQ_RECV()
Esempio n. 5
0
 def test_3d_input(self):
     gesture = np.random.rand(20, 3)
     self.comm.SYNTH_REQ_SEND([[create(gesture, ADDITIVE.n_parameters)],
                               ADDITIVE.name, gesture, True])
     sounds = self.comm.SYNTH_REQ_RECV()
Esempio n. 6
0
def run(select_lowest_mse=False):
    comm = cm.Communicator([
        cm.SENSOR_PULL, cm.LEARNING_MODE_PULL, cm.LEARN_REQ, cm.PLAY_REQ,
        cm.SYNTH_REQ, cm.SYNTH_PLAY_PUSH, cm.FILE_IO_REQ
    ])

    status = CHILL
    recorder = deque(maxlen=200)
    favourite_log = []

    for socket, msg in next(comm):
        if socket == cm.SENSOR_PULL:
            if status == CHILL:
                continue

            recorder.append(msg)

            if status == PLAY and len(recorder):
                gesture = np.stack(recorder)[-HISTORY_LENGTH:]

                if len(gesture) < HISTORY_LENGTH:
                    gesture = np.pad(gesture,
                                     pad_width=((HISTORY_LENGTH - len(gesture),
                                                 0), (0, 0)),
                                     mode='constant',
                                     constant_values=MASK_VALUE)

                comm.PLAY_REQ_SEND(gesture)
                response = comm.PLAY_REQ_RECV()

                if response is not None:
                    gesture_prediction, synth_prms_prediction = response
                    synth_prms_prediction = np.clip(synth_prms_prediction, 0,
                                                    1)
                    print('{}\r'.format(
                        np.around(np.squeeze(gesture_prediction), decimals=2)),
                          end='')
                    comm.SYNTH_PLAY_PUSH_SEND(synth_prms_prediction)

        if socket == cm.LEARNING_MODE_PULL:
            if msg == SAVE:
                json_filename = '{}/favourite/favourite.json'.format(
                    PROJECT_ROOT)

                out = {i: d for i, d in enumerate(favourite_log)}

                with open(json_filename, 'w') as _file:
                    json.dump(out, _file, indent=4, sort_keys=True)

            if msg in [LOAD, SAVE]:
                comm.FILE_IO_REQ_SEND(msg)
                print(msg, ':', comm.FILE_IO_REQ_RECV())
                continue

            if len(recorder) and status == REC and msg in [PLAY, CHILL]:
                print('Recorded {} samples, making suggestions'.format(
                    len(recorder)))
                gesture = np.stack(recorder)

                # Can deal with both mouse (2D) and Myo (4D)
                X = gesture[:, 0]
                Y = gesture[:, 1]

                plt.plot(X, Y)
                plt.xlim(-.1, 1.1)
                plt.ylim(-.1, 1.1)
                gesture_plot = '{}/sounds/_{}.png'.format(
                    PROJECT_ROOT, SYNTH_INSTR.name)
                plt.savefig(gesture_plot, dpi=300)
                plt.clf()

                parameters = [
                    create(gesture, SYNTH_INSTR.n_parameters)
                    for _ in range(N_EXAMPLES)
                ]

                comm.SYNTH_REQ_SEND(
                    [parameters, SYNTH_INSTR.name, gesture, True])

                sounds = comm.SYNTH_REQ_RECV()
                filenames, similarities = zip(*sounds)
                sounds = list(zip(filenames, similarities, parameters))

                sounds = sorted(sounds, key=lambda L: L[1])

                title = SYNTH_INSTR.name
                html = ('<html><title>{}</title><body><h1>{}</h1>'
                        '<img src="_{}.png" width="50%">'
                        '<hr>').format(title, title, SYNTH_INSTR.name)

                for i, (filename, similarity, _) in enumerate(sounds):
                    html += (
                        '<table><tr><td><b>Candidate {}<br>'
                        'Similarity: {} </b><br><br> <audio controls>'
                        '<source src="{}" type="audio/wav"> </audio></td>'
                        '<td><img src="{}.png" width="60%"> </td></tr></table>'
                        '<hr>').format(i, similarity, filename, filename)

                html += '</body></html>'

                html_file = '{}/sounds/{}.html'.format(PROJECT_ROOT,
                                                       SYNTH_INSTR.name)
                with open(html_file, 'w') as out_file:
                    out_file.write(html)

                if not select_lowest_mse:
                    q = 'Open {} and select your favourite:'.format(html_file)
                    favourite = int(input(q))
                else:
                    favourite = 0

                if favourite > -1:
                    print('You chose {}, similarity {}.'.format(
                        favourite, sounds[favourite][1]))

                    filename, similarity, synth_parameters = sounds[favourite]

                    favourite_log.append([filename, similarity])

                    comm.LEARN_REQ_SEND([gesture, synth_parameters])
                    comm.LEARN_REQ_RECV()
                else:
                    print('None selected, continue.')

            status = msg
            print('STATUS:', status)

            recorder.clear()
Esempio n. 7
0
import time

import zmq
import numpy as np

import data.communicator as cm
from core.faux_gestures import trajectories
from core.candidate import create
from utils.constants import ADDITIVE

comm = cm.Communicator([ cm.LEARN_REQ, cm.PLAY_REQ ])

n = 3

for gesture in trajectories[:n]:
    comm.LEARN_REQ_SEND([ gesture, create(gesture, ADDITIVE.n_parameters) ])
    comm.LEARN_REQ_RECV()

ready = False

while not ready:
    comm.PLAY_REQ_SEND(trajectories[0])
    reply = comm.PLAY_REQ_RECV()

    if reply is not None:
        ready = True
    else:
        print('Model not ready, waiting 5 seconds before trying again.')
        time.sleep(5)