Exemple #1
0
    def setup(self):
        if self.pwm:
            self.io.setMode(self.pin)
            self.io.write(self.pin, 0)

        else:
            self.dac = DAC(self.io)
            self.dac.setup()
    def __init__(self, namespace):
        Tk.__init__(self, className="Koshka")

        self.dac = DAC()
        self.dac.start()

        self.score_path = namespace.score

        self.sequencer = None
        self.sequencer_frame = None
        self.mixer_window = None
        self.scale_window = None
        self._open_score(self.score_path)

        menu = Menu(self)
        self.config(menu=menu)
        filemenu = Menu(menu)
        menu.add_cascade(label="File", menu=filemenu)
        filemenu.add_command(label="Open...", command=self.open, accelerator="meta-o")
        filemenu.add_command(label="Save", command=self.save, accelerator="meta-s")
        filemenu.add_command(label="Save As...", command=self.save_as, accelerator="meta-shift-s")
        filemenu.add_separator()
        filemenu.add_command(label="Exit", command=self.quit)

        menu.add_cascade(label="Help", menu=filemenu)
        filemenu.add_command(label="Online Help...", command=lambda: webbrowser.open_new_tab(URL_HELP_DOC))

        # Note: This is only implemented and tested for Mac OS
        self.bind_all("<Command-o>", self.open)
        self.bind_all("<Command-s>", self.save)
        self.bind_all("<Command-Shift-s>", self.save_as)
        self.bind_all("<Meta-o>", self.open)
        self.bind_all("<Meta-s>", self.save)
        self.bind_all("<Meta-Shift-s>", self.save_as)
def dac_test():
    tb = DAC()
    cd_dac = ClockDomain("dac", reset_less=True)
    tb.clock_domains += cd_dac

    def run(tb):
        ida = 0x28FC
        qda = 0x3CFC
        dut = tb
        for i in range(12):
            yield dut.idata.eq(ida)
            yield dut.qdata.eq(qda)
            yield
            yield dut.idata.eq(ida + 10)
            yield dut.qdata.eq(qda - 20)
            yield
            yield dut.idata.eq(ida + 20)
            yield dut.qdata.eq(qda - 30)
            yield
        for i in range(20):
            yield
    
    run_simulation(tb, run(tb), 
            vcd_name="dac_test.vcd",
            clocks = {
                "sys":   (8, 0),
                "dac":   (4, 0),
            },
        )
Exemple #4
0
 def train_dac_model(self, model_params):
     dac = DAC()
     smoother_model = dac.build_model(hidden_sizes=[64, 64],
                                      seq_len=50,
                                      no_words=40000,
                                      emb_layer=self.embedding_layer,
                                      lr=0.01)
     generator = Generator(sequences=self.sequences,
                           batch_size=SMOOTH_BS,
                           max_words=MAX_NUM_WORDS,
                           max_len=MAX_LEN,
                           split=SMOOTH_SPLIT)
     smoother_model = dac.train(generator,
                                full_model=smoother_model,
                                model_params=model_params,
                                bs=SMOOTH_BS,
                                split=SMOOTH_SPLIT,
                                pretrain_epochs=4,
                                epochs=SMOOTH_EPOCHS)
class TestSampler(unittest.TestCase):
    def setUp(self):
        self.dac = DAC(BUFFER_SIZE, SAMPLE_RATE)
        self.dac.start()

    def tearDown(self):
        self.dac.stop()

    def test_instantiation(self):
        sample = SingleSoundSampler(SAMPLE_FILE)
        self.assertIsNotNone(sample)

    def test_polyphonic_play(self):
        sampler = Sampler()
        sampler.add_sample(0, SAMPLE_FILE)
        sampler.add_sample(1, SAMPLE_FILE)
        self.dac.connect(sampler.callback)
        sampler.on(0)
        sleep(0.5)
        sampler.on(0)
        sleep(0.5)
        sampler.on(1)
        sleep(2)
        sampler.off(0)
        sampler.off(1)
class TestInstrument(unittest.TestCase):
    def setUp(self):
        self.scale = PythagSeries(BASE_FREQUENCY)
        self.dac = DAC(BUFFER_SIZE, SAMPLE_RATE)
        self.dac.start()

    def tearDown(self):
        self.dac.stop()

    def test_instantiation(self):
        instrument = MonophonicScaleSynth(SAMPLE_RATE, BUFFER_SIZE, self.scale)
        self.assertIsNotNone(instrument)

    def test_play(self):
        instrument = ScaleSynth(SAMPLE_RATE, BUFFER_SIZE, self.scale)
        # instrument = ScaleSynth(SAMPLE_RATE, BUFFER_SIZE, self.scale)

        self.dac.connect(instrument.callback)

        instrument.on(0)
        sleep(.5)
        instrument.off(0)
        sleep(.1)
        instrument.on(1)
        instrument.on(4)
        sleep(.5)
        instrument.off(1)
        sleep(.5)
Exemple #7
0
    def run(self, predict_path, smoother_path, eval_path):
        self._parse_corpus(MIN_SEQ_LEN, TEXT_DATA_DIR + TEXT_DATA)
        self.prepare_emb(EMBEDDING_DIM, MAX_LEN)

        predict_model = None
        if predict_path is None:
            model_params = {
                'lstm': [16],
                'merge_layer': 'concat',
                'dense': {
                    'size': [64, 32],
                    'act': 'elu',
                    'dropout': 0
                },
                'optimizer': 'adam',
                'lr': 0.0005
            }
            predict_model = self.train_predict_model(model_params)
        else:
            pass
            #predict_model = self.load_predict_model(predict_path)

        #smoother_model = None
        if smoother_path is None:
            model_params = {'size': [64, 64], 'lr': 0.01}
            #smoother_model = self.train_dac_model(model_params)
        else:
            self.dac = DAC()
            self.dac.load_model(smoother_path)

        #GENERATE PUN
        while True:
            try:
                final = pungen.form_pun(eval_path)
                break
            except Exception:
                pass

        print(final)
Exemple #8
0
class Throttle():
    vref = 5
    def __init__(self, io, pin, pwm):
        '''exit
        pwm = 0 for DAC output
        pwm = 1 for PWM output 
        '''
        self.io = io
        self.pin = pin 
        self.pwm = pwm
        self.maxV = 3.7
        self.minV = 0.0

    def setup(self):
        if self.pwm:
            self.io.setMode(self.pin)
            self.io.write(self.pin, 0)

        else:
            self.dac = DAC(self.io)
            self.dac.setup()

    def setVolt(self, volt):
        if volt < self.minV or volt > self.maxV:
            raise ThrottleError('Volt out of bound: ', volt, ' V')

        if self.pwm:
            duty = self.calcDuty(volt)
            self.pwmWrite(duty)

        else:
            self.dacWrite(volt)

    def calcDuty(self, volt):
        return int(volt/Throttle.vref * (2**8-1))

    def off(self):
        if self.pwm:
            self.pwmWrite(0)
        else:
            self.dac.off()

    def pwmWrite(self, duty, freq = 1000):
        if duty == 0: 
            self.io.pwmStop(self.pin)
        else:
            self.io.pwmStart(self.pin, duty, freq)

    def dacWrite(self, volt):
        self.dac.setVolt(volt)
 def setUp(self):
     self.dac = DAC(BUFFER_SIZE, SAMPLE_RATE)
     self.dac.start()
Exemple #10
0
from dac import DAC
from sequencers.grid_sequencer import GridSequencer

DEFAULT_SCORE_FILE = "./scores/score_sample_blank.txt"

if __name__ == "__main__":

    parser = argparse.ArgumentParser(description="Koshka - MSP project")

    parser.add_argument("score", nargs="?", type=str, default=DEFAULT_SCORE_FILE)
    parser.add_argument("--no_gui", type=bool, default=False, const=True, nargs="?")
    parser.add_argument("-l", "--loop", type=int, default=0, const=GridSequencer.INFINIT_LOOP, nargs="?")
    namespace = parser.parse_args()

    if namespace.no_gui:
        dac = DAC()
        dac.start()
        try:
            sequencer = GridSequencer(namespace.score, buffer_size=dac.bufferSize, sample_rate=dac.getSamplerate())
            dac.connect(sequencer.callback)
            sequencer.play(namespace.loop)

            while sequencer.running:
                sleep(0.1)

        finally:
            dac.stop()

    else:
        from gui.main_window import MainWindow
 def setUp(self):
     self.scale = PythagSeries(BASE_FREQUENCY)
     self.dac = DAC(BUFFER_SIZE, SAMPLE_RATE)
     self.dac.start()
import instruments

import argparse

DEFAULT_INSTRUMENT = "ScaleSynth EvenTempered 528"
DEFAULT_SPEED = 120

if __name__ == "__main__":

    parser = argparse.ArgumentParser(description='MSP homework 9')
    parser.add_argument('score', type=str)
    parser.add_argument('-i', '--instrument', type=str, default=DEFAULT_INSTRUMENT)
    parser.add_argument('-s', '--speed', type=int, default=DEFAULT_SPEED)
    namespace = parser.parse_args()

    dac = DAC(bufferSize=2 ** 10, rate=44100)
    dac.start()

    try:
        sequencer = Sequencer(buffer_size=dac.bufferSize, sample_rate=dac.getSamplerate(), speed=namespace.speed)
        dac.connect(sequencer.callback)

        instrument = instruments.parse(namespace.instrument.split(),
                                       buffer_size=dac.bufferSize,
                                       sample_rate=dac.getSamplerate())

        sequencer.add_instrument(instrument)

        file = open(namespace.score, 'r')
        score_string = file.read()
        file.close()
class MainWindow(Tk):
    def __init__(self, namespace):
        Tk.__init__(self, className="Koshka")

        self.dac = DAC()
        self.dac.start()

        self.score_path = namespace.score

        self.sequencer = None
        self.sequencer_frame = None
        self.mixer_window = None
        self.scale_window = None
        self._open_score(self.score_path)

        menu = Menu(self)
        self.config(menu=menu)
        filemenu = Menu(menu)
        menu.add_cascade(label="File", menu=filemenu)
        filemenu.add_command(label="Open...", command=self.open, accelerator="meta-o")
        filemenu.add_command(label="Save", command=self.save, accelerator="meta-s")
        filemenu.add_command(label="Save As...", command=self.save_as, accelerator="meta-shift-s")
        filemenu.add_separator()
        filemenu.add_command(label="Exit", command=self.quit)

        menu.add_cascade(label="Help", menu=filemenu)
        filemenu.add_command(label="Online Help...", command=lambda: webbrowser.open_new_tab(URL_HELP_DOC))

        # Note: This is only implemented and tested for Mac OS
        self.bind_all("<Command-o>", self.open)
        self.bind_all("<Command-s>", self.save)
        self.bind_all("<Command-Shift-s>", self.save_as)
        self.bind_all("<Meta-o>", self.open)
        self.bind_all("<Meta-s>", self.save)
        self.bind_all("<Meta-Shift-s>", self.save_as)

        # self.wm_attributes("-titlepath",'What is this?')

    def __delete__(self, instance):
        print "__delete__"
        self._quit()

    def quit(self):
        print "tk quit"
        Tk.quit(self)
        pass

    def _quit(self):
        print "_quit"
        self.dac.stop()
        self.destroy()
        self.quit()

    def _open_sequencer(self, sequencer):
        print "\nMainWindow: _open_sequencer"

        # print("Stop DAC...")
        self.dac.stop()
        # print("Stop DAC...done")

        if self.sequencer_frame:
            self.sequencer_frame.destroy()
        if self.mixer_window:
            self.mixer_window.destroy()
        if self.scale_window is not None:
            self.scale_window.destroy()

        # print("Reset sequencer...")
        if self.sequencer is not None:
            self.sequencer.stop()
            self.sequencer.remove_all_observers()
            for i in self.sequencer.instruments:
                i.remove_all_observers()
        # print("Reset sequencer...done")

        # print("Connect sequencder...")
        self.sequencer = sequencer
        self.dac.connect(self.sequencer.callback)
        # print("Connect sequencder...done")

        for i in sequencer.instruments:
            i.id_variable = StringVar()
            i.id_variable.set(i.name_id)

        self.sequencer_frame = SequencerFrame(self, self.sequencer)
        self.sequencer_frame.pack()

        self.mixer_window = mixer_gui.MixerWindow(self, self.sequencer)

        self.scale_window = keyboard.ScaleWindow(self)
        for i in sequencer.instruments:
            self.scale_window.add_instrument(i)

        self.dac.start()
        pass

    def _open_score(self, score_path):
        self.score_path = score_path
        sequencer = GridSequencer(score_path, buffer_size=self.dac.bufferSize, sample_rate=self.dac.getSamplerate())

        self._open_sequencer(sequencer)

    def open(self, val=Tkinter.Event()):
        score_path = tkFileDialog.askopenfilename(filetypes=[("Text file", ".txt")])
        if score_path:
            self._open_score(score_path)

    def save(self, val=None):
        self.sequencer.save(self.score_path)
        # self.wm_attributes("-modified", 0)

    def save_as(self, val=None):
        self.score_path = tkFileDialog.asksaveasfilename(filetypes=[("Text file", ".txt")])
        self.save()
Exemple #14
0
###############################################################################
# SCHEDULER
##########
# You shall modify speedCoeff only
###############################################################################
scheduler = Scheduler(speedCoeff=1)

###############################################################################
# REGISTERS
##########
# Add registers like that : reg = Register("NAME")
###############################################################################

###############################################################################
# ECUS
##########
# You should always add registers in same order. Else there is an overlap.
# TODO I want to find out how to add registers with no order constraint
###############################################################################
allEcus = [
    Ecu("../examples/viper2/App-GBF/trampoline", scheduler, [
        Timer("TIMER0", 1, type=timer.AUTO, delay=10),
        DAC("DAC0", 2, position=[0, 0]),
        LCD("LCD1", 3, position=[360, 0]),
        LCD("LCD2", 4, position=[360, 120]),
        BP("BPPlus", 10, position=[360, 240], picture="pictures/BPPlus"),
        BP("BPMinus", 11, position=[460, 240], picture="pictures/BPMinus"),
        Power("POWER", 9),
    ])
]
Exemple #15
0
class Pungen:
    def __init__(self, **kwargs):
        self.filepath = kwargs.get('filepath')
        self.embedding_layer = None

    def _parse_corpus(self, min_seq_len, filepath):
        print('Indexing word vectors.')
        self.texts = []
        with open(filepath, encoding='utf-8') as fp:
            for line in fp:
                if line == "\n":
                    continue
                self.texts.append(line)

        self.tokenizer = Tokenizer(num_words=MAX_NUM_WORDS,
                                   filters=TOKEN_FILTER)
        self.tokenizer.fit_on_texts(self.texts)
        self.sequences = self.tokenizer.texts_to_sequences(self.texts)
        self.sequences = [x for x in self.sequences if len(x) >= min_seq_len]
        self.word_index = self.tokenizer.word_index
        print('Found %s unique tokens.' % len(self.word_index))

        print('Found %s texts.' % len(self.sequences))

    def prepare_emb(self, emb_dim, input_length):
        print('Indexing word vectors.')

        emb_name = 'glove.6B.' + str(emb_dim) + "d.txt"

        self.embeddings_index = {}
        with open(os.path.join(GLOVE_DIR, emb_name), encoding='utf-8') as f:
            for line in f:
                word, coefs = line.split(maxsplit=1)
                coefs = np.fromstring(coefs, 'f', sep=' ')
                self.embeddings_index[word] = coefs

        print('Found %s word vectors.' % len(self.embeddings_index))
        # prepare embedding matrix
        num_words = MAX_NUM_WORDS
        self.embedding_matrix = np.zeros((num_words, emb_dim))
        for word, i in self.word_index.items():
            if i >= num_words:
                continue
            embedding_vector = self.embeddings_index.get(word)
            if embedding_vector is not None:
                # words not found in embedding index will be all-zeros.
                self.embedding_matrix[i] = embedding_vector

        # load pre-trained word embeddings into an Embedding layer
        # note that we set trainable = False so as to keep the embeddings fixed
        self.embedding_layer = Embedding(num_words,
                                         emb_dim,
                                         embeddings_initializer=Constant(
                                             self.embedding_matrix),
                                         input_length=input_length,
                                         trainable=False)

    def check_generator(self):
        texts = self.tokenizer.sequences_to_texts(self.sequences)

        if len(texts) != len(self.texts):
            print("Different sizes of texts")
            return

        filter = set(TOKEN_FILTER)

        for i in range(len(texts)):
            if texts[i].lower() != self.texts[i][:-1].lower():

                if any((c in filter) for c in self.texts[i][:-1].lower()):
                    continue

                print(texts[i], self.texts[i][:-1])
                print(self.texts[i][:-1].lower())
                print("Tokenizer failed to tokenize properly!")
                return

        print("Tokenizer check was succesfull!")

    def form_pun(self, eval_path):
        retrieve = Retrieve(sentence_path=TEXT_DATA_DIR + TEXT_DATA,
                            pun_path=PUN_DATA_DIR + PUN_DATA)
        (pun, sentence, score) = retrieve.retrieve()

        if not sentence:
            print("No sentence with word {} was found. Exiting...".format(
                pun[1]))
            raise Exception()

        text = word_tokenize(sentence)
        tokenized = nltk.pos_tag(text)

        print(tokenized)
        print(sentence, pun[0], pun[1])
        pre = self.tokenizer.texts_to_sequences([sentence])
        wp = self.tokenizer.texts_to_sequences([pun[0]])
        wa = self.tokenizer.texts_to_sequences([pun[1]])

        if (not wa[0]) or (not wp[0]):
            print(
                "The pair of pun and word does not exist in the parsed corpus. Exit..."
            )
            raise Exception()

        index_wa = -1
        for seq in pre[0]:
            index_wa = index_wa + 1
            if seq == wa[0][0]:
                pre[0][index_wa] = wp[0][0]
                break

        wordsimilarity = WordSimilarity()
        wordsimilarity.word2vec()
        wordsimilarity.load()

        try_limit = 5
        try_count = 0
        index_topic = 0
        while True:
            try:
                topic_word = None
                for i in range(index_topic, len(tokenized)):
                    (word, pos) = tokenized[i]
                    if (pos == 'NNP'):
                        topic_word = "man"
                        print(word, pos)
                        index_topic = index_topic + 1
                        break

                    if (pos == 'NN') or (pos == 'PRP') or (pos == 'NNS') or (
                            pos == 'PRP$'):
                        topic_word = word
                        print(word, pos)
                        index_topic = index_topic + 1
                        break
                    index_topic = index_topic + 1

                result = wordsimilarity.getSimilar([topic_word, pun[0]],
                                                   [pun[1]], 10)
                other_result = wordsimilarity.getSimilar([pun[0]], [], 10)

                break
            except KeyError:
                print("Word {} is not in vocabulary, try with the next one".
                      format(topic_word))
                try_count = try_count + 1
                if try_limit == try_count:
                    print("Limit of trys has been reached. Exit...")
                    raise Exception()

        eval_surprisal = Evaluate()
        eval_surprisal.load_model(eval_path)

        finals = []
        mean_amalgam = 0
        for (word, prob) in result:
            swap = self.tokenizer.texts_to_sequences([word])

            context_window = 2
            surprise = eval_surprisal.compute_surpisal(
                sentence=pre[0],
                pun_word=wa[0][0],
                pun_alternative=wp[0][0],
                context_window=context_window)
            mean_amalgam = mean_amalgam + surprise
            print(surprise)

            pre[0][index_topic] = swap[0][0]

            post_simple = self.tokenizer.sequences_to_texts([pre[0]])
            print(post_simple)

            pre[0][index_topic + 1] = 0
            if index_topic >= 2:
                pre[0][index_topic - 1] = 0

            post_smoothing = self.dac.inference(pre[0])
            post_smoothing = self.tokenizer.sequences_to_texts(
                post_smoothing.tolist())
            finals.append(post_smoothing)
            print(post_smoothing)
        print(finals)
        print(mean_amalgam / 10)

        other_finals = []
        mean_similar = 0
        for (word, prob) in other_result:
            swap = self.tokenizer.texts_to_sequences([word])

            context_window = 2
            surprise = eval_surprisal.compute_surpisal(
                sentence=pre[0],
                pun_word=wa[0][0],
                pun_alternative=wp[0][0],
                context_window=context_window)
            mean_similar = mean_similar + surprise
            print(surprise)

            pre[0][index_topic] = swap[0][0]

            post_simple = self.tokenizer.sequences_to_texts([pre[0]])
            print(post_simple)

            pre[0][index_topic + 1] = 0
            if index_topic >= 2:
                pre[0][index_topic - 1] = 0

            post_smoothing = self.dac.inference(pre[0])
            post_smoothing = self.tokenizer.sequences_to_texts(
                post_smoothing.tolist())
            other_finals.append(post_smoothing)
            print(post_smoothing)
        print(other_finals)
        print(mean_similar / 10)

        return finals.extend(other_finals)

    def train_predict_model(self, model_params):
        predict_word = WordPredict(max_len=MAX_LEN,
                                   max_words=MAX_NUM_WORDS,
                                   emb_layer=self.embedding_layer)
        predict_word.build_model(**model_params)
        predict_word.compile_model(model_params)

        generator = Generator(sequences=self.sequences,
                              batch_size=PREDICT_BS,
                              max_words=MAX_NUM_WORDS,
                              max_len=MAX_LEN,
                              split=PREDICT_SPLIT)

        predict_word.train(generator, PREDICT_BS, PREDICT_SPLIT,
                           PREDICT_EPOCHS)
        return predict_word

    def load_predict_model(self, path):
        predict_word = load_model(path)
        return predict_word

    def train_dac_model(self, model_params):
        dac = DAC()
        smoother_model = dac.build_model(hidden_sizes=[64, 64],
                                         seq_len=50,
                                         no_words=40000,
                                         emb_layer=self.embedding_layer,
                                         lr=0.01)
        generator = Generator(sequences=self.sequences,
                              batch_size=SMOOTH_BS,
                              max_words=MAX_NUM_WORDS,
                              max_len=MAX_LEN,
                              split=SMOOTH_SPLIT)
        smoother_model = dac.train(generator,
                                   full_model=smoother_model,
                                   model_params=model_params,
                                   bs=SMOOTH_BS,
                                   split=SMOOTH_SPLIT,
                                   pretrain_epochs=4,
                                   epochs=SMOOTH_EPOCHS)

    def run(self, predict_path, smoother_path, eval_path):
        self._parse_corpus(MIN_SEQ_LEN, TEXT_DATA_DIR + TEXT_DATA)
        self.prepare_emb(EMBEDDING_DIM, MAX_LEN)

        predict_model = None
        if predict_path is None:
            model_params = {
                'lstm': [16],
                'merge_layer': 'concat',
                'dense': {
                    'size': [64, 32],
                    'act': 'elu',
                    'dropout': 0
                },
                'optimizer': 'adam',
                'lr': 0.0005
            }
            predict_model = self.train_predict_model(model_params)
        else:
            pass
            #predict_model = self.load_predict_model(predict_path)

        #smoother_model = None
        if smoother_path is None:
            model_params = {'size': [64, 64], 'lr': 0.01}
            #smoother_model = self.train_dac_model(model_params)
        else:
            self.dac = DAC()
            self.dac.load_model(smoother_path)

        #GENERATE PUN
        while True:
            try:
                final = pungen.form_pun(eval_path)
                break
            except Exception:
                pass

        print(final)