Ejemplo n.º 1
0
def main():
    usage= 'usage: %prog [options] infname'
    parser= OptionParser(usage=usage)

    options, args= parser.parse_args(argv[1:])
    if len(args) < 1: parser.error('not enaught args')

    infname= args[0]
    outfname= infname.replace('notes', 'mid')
    assert infname != outfname

    score= Score(480)
    instrument= Instrument()
    instrument.patch=21 
    lines= open(infname).read().split('\n')
    notes= []
    for l in lines:
        if len(l) == 0: continue
        [str_note, start, end, pitch]= l.split()
        start= int(start)
        end= int(end)
        duration= end-start
        pitch= int(pitch)

        score.note_played(instrument, pitch, start, duration, 100)

    writer= writerclass()
    writer.dump(score, outfname)
Ejemplo n.º 2
0
    def compose(self, score, **optional):
        params= bind_params(self.params, optional)
        
        piano= score.instruments[0]
        interval_size= measure_interval_size(score, params['n_measures']) 
        
        # XXX ver si hay algun problema con no copiar score
        #chords_notes_alg= ScoreHarmonicContext(score)
        chords_notes_alg= ChordHarmonicContext(score)
        rythm_alg= RythmHMM(interval_size, multipart=False, instrument=piano.patch, channel=piano.channel)
        melody_alg= MelodyHMM(instrument=piano.patch, channel=piano.channel)

        rythm_alg.train(score)
        chords_notes_alg.train(score)
        melody_alg.train(score)
        applier= AlgorithmsApplier(chords_notes_alg, rythm_alg, melody_alg)
        
        notes= applier.create_melody(score.duration, params['print_info'])
        instrument= Instrument()
        instrument.patch= params['melody_instrument']

        res= score.copy()
        for n in res.get_notes(skip_silences=True):
            n.volume= 70

        res.notes_per_instrument[instrument]= notes
        return res
Ejemplo n.º 3
0
    def compose(self, score, **optional):
        params= bind_params(self.params, optional)
        
        melody_instrument= None
        rythm_instrument= None
        for instrument in score.instruments:
            if self.matches_description(instrument, params['melody_patch'], params['melody_channel']):
                melody_instrument= instrument
            if self.matches_description(instrument, params['rythm_patch'], params['rythm_channel']):
                rythm_instrument= instrument
        if (melody_instrument is None or rythm_instrument is None) and len(score.instruments) > 1: raise Exception("Que instrument?")
        else: 
            rythm_instrument= melody_instrument= score.instruments[0]
        
        interval_size= measure_interval_size(score, params['n_measures']) 
        
        #harmonic_context_alg= YamlHarmonicContext('/home/prakuso/tesis/src/electrozart/electrozart/composers/base2.yaml', score.divisions)
        harmonic_context_alg= ChordHarmonicContext(score)
        harmonic_context_alg= PhraseRepetitions(harmonic_context_alg, alpha=7)

        rythm_alg= RythmHMM(interval_size, instrument=rythm_instrument.patch, channel=rythm_instrument.channel)
        phrase_rythm_alg= rythm_alg
        phrase_rythm_alg= ListRythm(rythm_alg)
        phrase_rythm_alg= RythmCacheAlgorithm(ListRythm(rythm_alg), 'phrase_id')

        melody_alg= NarmourHMM(instrument=melody_instrument.patch, channel=melody_instrument.channel)
        phrase_melody_alg= melody_alg
        phrase_melody_alg= ListMelody(melody_alg)
        phrase_melody_alg= CacheAlgorithm(ListMelody(melody_alg), 'phrase_id')

        #rythm_score= score.copy()
        #rythm_score.notes_per_instrument.pop(piano)
        print "todos los intrsumentos"
        for instrument in score.instruments:
            if not instrument.is_drums:
                melody_alg.obsSeqBuilder.builder.patch= instrument.patch
                melody_alg.train(score)
            rythm_alg.obsSeqBuilder.builder.patch= instrument.patch
            rythm_alg.train(score)

        harmonic_context_alg.train(score)

        applier= AlgorithmsApplier(harmonic_context_alg, phrase_rythm_alg, phrase_melody_alg)
        self.applier= applier
        applier.start_creation()
        rythm_alg.draw_model('rythm.png', score.divisions)
        melody_alg.model.draw('melody.png', str)

        duration= score.duration
        #duration= harmonic_context_alg.harmonic_context_alg.chordlist[-1].end
        #duration= harmonic_context_alg.chordlist[-1].end
        
        # octave range
        score_notes= score.get_notes(skip_silences=True)
        mean_pitch= sum(float(n.pitch) for n in score_notes)/len(score_notes)
        std_dev= sqrt(sum((n.pitch-mean_pitch)**2 for n in score_notes)/len(score_notes))
        #import ipdb;ipdb.set_trace()
        octave= int(mean_pitch/12) #+ 1
        min_pitch= octave*12
        max_pitch= (octave+2)*12 + 6
        offset= 0
        min_pitch= int(mean_pitch - std_dev+offset)
        max_pitch= int(mean_pitch + std_dev+offset)

        print "MIN PITCH", min_pitch
        print "MAX PITCH", max_pitch
        general_input= AcumulatedInput()
        general_input.min_pitch= min_pitch
        general_input.max_pitch= max_pitch


        notes= applier.create_melody(duration, params['print_info'], general_input=general_input)
        #seed(time())

        #chord_notes= []
        #for c in harmonic_context_alg.chordlist:
        #    for n in c.notes:
        #        chord_notes.append(PlayedNote(n.pitch+3*12, c.start, c.duration, 80))
        #duration= chord_notes[-1].end

        res= score.copy()
        rythm_alg.model.calculate_metrical_accents()
        #rythm_alg.model.draw_accents('accents.png', score.divisions)
        import random
        cache= {}
        def random_accent(note):
            moment= (note.start%rythm_alg.model.interval_size)/rythm_alg.model.global_gcd
            res= cache.get(moment)
            if res is None:
                res= randint(1, 6)
                cache[moment]= res
            return res
        def dec_accent(note):            
            moment= (note.start%rythm_alg.model.interval_size)/rythm_alg.model.global_gcd
            res= cache.get(moment)
            if res is None:
                res= 7-moment
                cache[moment]= res
            return res
        def inc_accent(note):            
            moment= (note.start%rythm_alg.model.interval_size)/rythm_alg.model.global_gcd
            res= cache.get(moment)
            if res is None:
                res= moment + 1 
                cache[moment]= res
            return res

        accent_func= rythm_alg.model.get_metrical_accent 
        accent_func= inc_accent
        accent_func= dec_accent
        accent_func= random_accent

        #seed(time())
        #max_accent= max(rythm_alg.model.metrical_accents.itervalues())
        #min_accent= max(rythm_alg.model.metrical_accents.itervalues())
        max_accent= 6
        for i, n in enumerate(notes):
            accent= accent_func(n)
            n.volume=  min(100, max(60, (100 * accent)/max_accent))
            if not n.is_silence: n.pitch+=12
            #if rythm_alg.model.get_metrical_accent(n)== min_accent  and random.random() > 0.5:
            #    pass
            #    #notes[i]= Silence(n.start, n.duration)

        print "min volume:", min(n.volume for n in notes if not n.is_silence) 
        print "max volume:", max(n.volume for n in notes if not n.is_silence)

        print "accent mapping"
        for moment, accent in sorted(cache.items(), key=lambda x:x[0]):
            print moment, accent
        for n in res.get_notes(skip_silences=True):
            n.volume= 50

        #piano= res.notes_per_instrument.keys()[0]
        #piano= Instrument()
        instrument= Instrument()
        instrument.patch= params['melody_patch_to_dump']
        instrument.patch= 26
        instrument.patch= 73
        instrument.patch= 21
        instrument.patch= 74
        res.notes_per_instrument[instrument]= notes
        #res.notes_per_instrument= {instrument: notes, melody_instrument:res.notes_per_instrument[melody_instrument]}
        #res.notes_per_instrument= {instrument: notes}
        #res.notes_per_instrument[piano]= chord_notes

        #rythm_alg.draw_model('rythm.png')

        import electrozart
        from electrozart import base
        from electrozart.algorithms import ExecutionContext
        #from pycana import CodeAnalyzer

        #analyzer= CodeAnalyzer(electrozart)
        #relations= analyzer.analyze(exceptions= [ExecutionContext])
        #analyzer.draw_relations(relations, 'relations.png')


        return res
Ejemplo n.º 4
0
    def compose(self, score, **optional):
        params= bind_params(self.params, optional)
        
        piano= score.instruments[0]
        interval_size= measure_interval_size(score, params['n_measures']) 
        
        harmonic_context_alg= YamlHarmonicContext('/home/prakuso/tesis/src/electrozart/electrozart/composers/base34.yaml', score.divisions)
        harmonic_context_alg= PhraseRepetitions(harmonic_context_alg)

        rythm_alg= RythmHMM(interval_size, multipart=False, instrument=piano.patch, channel=piano.channel)
        phrase_rythm_alg= RythmCacheAlgorithm(ListRythm(rythm_alg), 'part_id')
        #phrase_rythm_alg= rythm_alg

        melody_alg= NarmourHMM(instrument=piano.patch, channel=piano.channel)
        #phrase_melody_alg= ListMelody(melody_alg)
        phrase_melody_alg= CacheAlgorithm(ListMelody(melody_alg), 'part_id')
        #phrase_melody_alg= melody_alg

        rythm_alg.train(score)
        harmonic_context_alg.train(score)
        melody_alg.train(score)

        applier= AlgorithmsApplier(harmonic_context_alg, phrase_rythm_alg, phrase_melody_alg)

        duration= harmonic_context_alg.harmonic_context_alg.chordlist[-1].end

        notes= applier.create_melody(duration, params['print_info'])

        chord_notes= []
        for c in harmonic_context_alg.harmonic_context_alg.chordlist:
            for n in c.notes:
                chord_notes.append(PlayedNote(n.pitch+3*12, c.start, c.duration, 80))
        duration= chord_notes[-1].end

        instrument= Instrument()
        instrument.patch= params['melody_instrument']

        res= score.copy()
        rythm_alg.model.calculate_metrical_accents()
        rythm_alg.model.draw_accents('accents.png', score.divisions)
        max_accent= max(rythm_alg.model.metrical_accents.itervalues())
        import random
        for i, n in enumerate(notes):
            accent= rythm_alg.model.get_metrical_accent(n)
            n.volume=  max(62, (100 * accent)/max_accent)
            if not n.is_silence: n.pitch+=24
            if accent == 1: #  and random.random() > 0.5:
                pass
                #notes[i]= Silence(n.start, n.duration)

        for n in res.get_notes(skip_silences=True):
            n.volume= 70

        piano= res.notes_per_instrument.keys()[0]
        piano= Instrument()
        instrument.patch= 74
        instrument.patch= 73
        res.notes_per_instrument= {}
        res.notes_per_instrument[instrument]= notes
        res.notes_per_instrument[piano]= chord_notes

        #rythm_alg.draw_model('rythm.png')

        return res
Ejemplo n.º 5
0
def train2(options, args):
    partition_algorithm= options.partition_algorithm
    patch= options.patch
    channel= options.channel
    level= options.level
    infname= args[0]
    outfname= args[1]

    parser= parserclass()
    score= parser.parse(infname)
    #score= quantize(score)
    orig_score= score.copy()
    if not score: import ipdb;ipdb.set_trace() # por que podria devolver None?

    #########
    # BOCHA DE AJUSTES DE PARSING
    instr= score.notes_per_instrument.keys()[0]
    patch= instr.patch
    channel= instr.channel
    #########
    #########


    if options.partition_algorithm == 'MGRID':
        interval_size= metrical_grid_interval_size(score, notes, level)
    elif options.partition_algorithm == 'MEASURE':
        interval_size= measure_interval_size(score, options.n_measures)

    nintervals= 16
    nintervals= orig_score.get_notes()[-1].end/interval_size
    nphrases= 5
    composition_length= interval_size*nintervals
    alpha= nphrases/log(nintervals,2)

    # para que la copie del tema original
    chords_notes_alg= ScoreHarmonicContext(orig_score)
    rythm_alg= RythmHMM(interval_size, multipart=False, instrument=patch, channel=channel)
    melody_alg= HarmonyHMM(instrument=patch, channel=channel)
    algorithm= StackAlgorithm(rythm_alg, chords_notes_alg, melody_alg)

    # para que arme la base
    #chords_notes_alg= HMMHarmonicContext(3)
    #chords_rythm_alg= RythmHMM(interval_size, multipart=True, instrument=patch, channel=channel)
    #chord_maker= StackAlgorithm(chords_rythm_alg, chords_notes_alg)

    #phrase_maker= PhraseAlgorithm(orig_score.divisions, alpha, chord_maker)

    #rythm_alg= RythmHMM(interval_size, multipart=True, instrument=patch, channel=channel)
    #melody_alg= HarmonyHMM(instrument=patch, channel=channel)

    #algorithm= StackAlgorithm(phrase_maker, rythm_alg, phrase_maker, melody_alg)

    algorithm.train(score)
    applier= AlgorithmsApplier(algorithm)
    notes= applier.create_melody(composition_length, print_info=True)
    
    #for c1, c2 in zip(phrase_maker.ec.chords, phrase_maker.ec.chords[1:]):
    #    if c1.end != c2.start: import ipdb;ipdb.set_trace()

    if options.print_model: print algorithm.model

    drums= Instrument(is_drums=True)
    #drums.patch= int('0x12', 16)
    instrument= Instrument()
    instrument2= Instrument()
    instrument3= Instrument()
    instrument.patch= 33
    instrument2.patch= 21
    instrument3.patch= 0

    for i, ns in orig_score.notes_per_instrument.iteritems():
        for n in ns: n.volume= 75

    full_new= False
    if full_new:
        chords= []
        duration= 0
        metro= []
        for i in xrange(0, nintervals*interval_size, score.divisions):
            metro.append(PlayedNote(31, i, i+1, 65))


        for chord in phrase_maker.ec.chords:
            for note in chord.notes:
                chords.append(PlayedNote(note.pitch, chord.start, chord.duration, chord.volume))
        orig_score.notes_per_instrument= {instrument3:chords, instrument:notes}#, drums:metro}
        #orig_score.notes_per_instrument= {drums:metro}
        #orig_score.notes_per_instrument= {instrument:notes}
        orig_score.notes_per_instrument= {instrument3:chords}#, drums:metro}
    else:        
        orig_score.notes_per_instrument[instrument]= notes
    writer= writerclass()
    writer.dump(orig_score, outfname)
    print 'done!'
Ejemplo n.º 6
0
    def compose(self, score, **optional):
        applier= self.build_models(score, **optional) 
        params= self.params= bind_params(self.params, optional)
        
        #import pickle
        #f=open('r.pickle','w')
        #pickle.dump(rythm_alg.model, f, 2)
        #f.close()
        #1/0
        # XXX
        #self.rythm_alg= rythm_alg#.draw_model('rythm.png', score.divisions)
        #self.melody_alg= melody_alg #.model.draw('melody.png', str)

        duration= score.duration
        #duration= harmonic_context_alg.harmonic_context_alg.chordlist[-1].end
        #duration= harmonic_context_alg.chordlist[-1].end
        
        general_input= AcumulatedInput()
        general_input.min_pitch= params['min_pitch']
        general_input.max_pitch= params['max_pitch']


        notes= applier.create_melody(duration, params['print_info'], general_input=general_input)
        #seed(time())

        #chord_notes= []
        #for c in harmonic_context_alg.chordlist:
        #    for n in c.notes:
        #        chord_notes.append(PlayedNote(n.pitch+3*12, c.start, c.duration, 80))
        #duration= chord_notes[-1].end

        res= score.copy()
        self.algorithms['rythm_alg'].model.calculate_metrical_accents()
        #rythm_alg.model.draw_accents('accents.png', score.divisions)
        import random
        rnd= random.Random(params['seed'])
        cache= {}
        def random_accent(note):
            moment= (note.start%self.algorithms['rythm_alg'].model.interval_size)/self.algorithms['rythm_alg'].model.global_gcd
            res= cache.get(moment)
            if res is None:
                res= rnd.randint(1, 6)
                cache[moment]= res
            return res
        def dec_accent(note):            
            moment= (note.start%self.algorithms['rythm_alg'].model.interval_size)/self.algorithms['rythm_alg'].model.global_gcd
            res= cache.get(moment)
            if res is None:
                res= 7-moment
                cache[moment]= res
            return res
        def inc_accent(note):            
            moment= (note.start%self.algorithms['rythm_alg'].model.interval_size)/self.algorithms['rythm_alg'].model.global_gcd
            res= cache.get(moment)
            if res is None:
                res= moment + 1 
                cache[moment]= res
            return res

        accent_func= self.algorithms['rythm_alg'].model.get_metrical_accent 
        accent_func= inc_accent
        accent_func= dec_accent
        accent_func= random_accent

        #seed(time())
        #max_accent= max(rythm_alg.model.metrical_accents.itervalues())
        #min_accent= max(rythm_alg.model.metrical_accents.itervalues())
        max_accent= 6
        for i, n in enumerate(notes):
            accent= accent_func(n)
            n.volume=  min(100, max(60, (100 * accent)/max_accent))
            if not n.is_silence: n.pitch+=12
            #if rythm_alg.model.get_metrical_accent(n)== min_accent  and random.random() > 0.5:
            #    pass
            #    #notes[i]= Silence(n.start, n.duration)

        min_volume= min(n.volume for n in notes if not n.is_silence) 
        max_volume= max(n.volume for n in notes if not n.is_silence)
        self.params['min volume']= min_volume 
        self.params['max volume']= max_volume 
        print "min volume:", min_volume 
        print "max volume:", max_volume 

        self.params['algorithms params']= applier.algorithms_params()

        self.params['accent mapping']= cache
        print "accent mapping"
        for moment, accent in sorted(cache.items(), key=lambda x:x[0]):
            print moment, accent
        for n in res.get_notes(skip_silences=True):
            n.volume= 50

        #piano= res.notes_per_instrument.keys()[0]
        #piano= Instrument()
        instrument= Instrument()
        instrument.patch= 73
        instrument.patch= 26 
        instrument.patch= 32
        instrument.patch= params['output_patch']
        instrument.patch= 30 #electrica
        instrument.patch= 74 #flauta
        instrument.patch= 25
        res.notes_per_instrument[instrument]= notes
        #res.notes_per_instrument= {instrument: notes}
        #res.notes_per_instrument[piano]= chord_notes

        #rythm_alg.draw_model('rythm.png')

        import electrozart
        from electrozart import base
        from electrozart.algorithms import ExecutionContext
        #from pycana import CodeAnalyzer

        #analyzer= CodeAnalyzer(electrozart)
        #relations= analyzer.analyze(exceptions= [ExecutionContext])
        #analyzer.draw_relations(relations, 'relations.png')


        #XXX
        return res, instrument