def generate13(self):
     # mkg-2008-12-25-b
     gcg = GeneralizedContextualGroup.GeneralizedContextualGroup()
     gcg.thisown = 0
     gcg.setAxiom('S(0,3,7,10,14) R36 WC A')
     gcg.addRule('A', 'K WC Q7 K WC A')
     gcg.setIterationCount(9)
     gcg.debug = True
     gcg.generate()
     random = CsoundAC.Random()
     random.thisown = 0
     random.createDistribution("uniform_real")
     random.setElement(7, 11, 1)
     rescale = CsoundAC.Rescale()
     rescale.thisown = 0
     ### rescale.setRescale( CsoundAC.Event.TIME,       1, 0,  1,     120    )
     rescale.setRescale(CsoundAC.Event.TIME, True, False, 20, 120)
     rescale.setRescale(CsoundAC.Event.DURATION, False, False, 0.25, 1.0)
     rescale.setRescale(CsoundAC.Event.INSTRUMENT, True, False, 1, 0)
     rescale.setRescale(CsoundAC.Event.KEY, True, False, 60, 36)
     rescale.setRescale(CsoundAC.Event.VELOCITY, True, True, 30, 12)
     rescale.setRescale(CsoundAC.Event.PAN, True, True, -0.875, 1.75)
     # random.addChild(lindenmayer)
     rescale.addChild(gcg)
     self.sequence.addChild(rescale)
     print
Пример #2
0
 def generate(self):
     print('CREATING MUSIC MODEL...')
     ##sys.path.append('d:/utah/home/mkg/projects/icmc2009-mkg')
     sys.path.append('/home/mkg/Dropbox/studio')
     import GeneralizedContextualGroup
     gcg = GeneralizedContextualGroup.GeneralizedContextualGroup()
     gcg.setAxiom('S(0,3,7,11,14) R60 WV A')
     gcg.addRule(
         'A',
         'V+8 K WV Q7 K D-1.5 WC V+9 WC V-7 WC V+11 A D-2 B D+2 D+1.5 L+ K WV A L- Q7 WV A'
     )
     gcg.addRule('B', 'V+8 WV Q3 V+8 WV B')
     gcg.setIterationCount(4)
     gcg.debug = True
     gcg.generate()
     random = CsoundAC.Random()
     random.thisown = 0
     random.createDistribution("uniform_real")
     random.setElement(7, 11, 1)
     rescale = CsoundAC.Rescale()
     rescale.thisown = 0
     rescale.setRescale(CsoundAC.Event.TIME, True, False, 1, 120)
     rescale.setRescale(CsoundAC.Event.DURATION, False, False, 0.25, 1.0)
     rescale.setRescale(CsoundAC.Event.INSTRUMENT, True, False, 1, 0)
     rescale.setRescale(CsoundAC.Event.KEY, True, False, 36, 60)
     rescale.setRescale(CsoundAC.Event.VELOCITY, True, True, 15, 12)
     rescale.setRescale(CsoundAC.Event.PAN, True, True, -0.875, 1.75)
     # random.addChild(lindenmayer)
     rescale.addChild(gcg)
     self.model.addChild(rescale)
     print
 def generate9(self):
     # mkg-2009-01-10-c
     gcg = GeneralizedContextualGroup.GeneralizedContextualGroup()
     gcg.thisown = 0
     gcg.setAxiom('S(0,3,7,11,14) R48 WV B')
     gcg.addRule(
         'A',
         'V+8 K WV Q7 K D-1.5 WC V+9 WC V-7 WC V+11 A D-3 B D+3 D+1.5 L+ K WV A L- Q7 WV A'
     )
     gcg.addRule(
         'B',
         'V+8 WC V-7 WC V+8 WC V-7 WC V+8 WC V-7 WC B B B K WV V+8 WC V-7 WC B B B Q7 WV V+8 WC V-7 WC B B B'
     )
     gcg.setIterationCount(3)
     gcg.debug = True
     gcg.generate()
     random = CsoundAC.Random()
     random.thisown = 0
     random.createDistribution("uniform_real")
     random.setElement(7, 11, 1)
     rescale = CsoundAC.Rescale()
     rescale.thisown = 0
     # rescale.setRescale( CsoundAC.Event.TIME,       True, False,  1,     120    )
     rescale.setRescale(CsoundAC.Event.TIME, True, False, 20, 120)
     rescale.setRescale(CsoundAC.Event.DURATION, False, False, 0.25, 1.0)
     rescale.setRescale(CsoundAC.Event.INSTRUMENT, True, False, 1, 0)
     rescale.setRescale(CsoundAC.Event.KEY, True, False, 48, 36)
     rescale.setRescale(CsoundAC.Event.VELOCITY, True, True, 15, 12)
     rescale.setRescale(CsoundAC.Event.PAN, True, True, -0.875, 1.75)
     # random.addChild(lindenmayer)
     rescale.addChild(gcg)
     self.sequence.addChild(rescale)
     print
Пример #4
0
def addVoiceleadingTest(sequence, voiceleadingNode, duration):
    print 'ADDING TEST...'
    random = CsoundAC.Random()
    random.thisown = 0
    random.createDistribution("uniform_01")
    random.eventCount = 200
    random.setElement(CsoundAC.Event.INSTRUMENT, 11, 1)
    random.setElement(CsoundAC.Event.TIME, 11, 1)
    random.setElement(CsoundAC.Event.DURATION, 11, 1)
    random.setElement(CsoundAC.Event.KEY, 11, 1)
    random.setElement(CsoundAC.Event.VELOCITY, 11, 1)
    random.setElement(CsoundAC.Event.PAN, 11, 1)
    rescale = CsoundAC.Rescale()
    rescale.setRescale(CsoundAC.Event.INSTRUMENT, 1, 1, 1., 4.)
    rescale.setRescale(CsoundAC.Event.TIME, 1, 1, 1., duration)
    rescale.setRescale(CsoundAC.Event.DURATION, 1, 1, 0.25, 1.)
    rescale.setRescale(CsoundAC.Event.STATUS, 1, 1, 144., 0.)
    rescale.setRescale(CsoundAC.Event.KEY, 1, 1, 36., 60.)
    rescale.setRescale(CsoundAC.Event.VELOCITY, 1, 1, 60., 9.)
    rescale.setRescale(CsoundAC.Event.PAN, 1, 1, -0.25, 1.5)
    rescale.addChild(random)
    rescale.thisown = 0
    voiceleading.normalizeTimes = True
    voiceleading.thisown = 0
    voiceleading.addChild(rescale)
    sequence.addChild(voiceleading)
 def generate18(self):
     # mkg-2009-12-24-a
     gcg = GeneralizedContextualGroup.GeneralizedContextualGroup()
     gcg.thisown = 0
     gcg.setAxiom('S(0,4,7,10) R36 V+ V+8 WV V-6 WV B')
     gcg.addRule(
         'B',
         'B Q2 WV V+2 W Q2 WV V+1 W V+3 V+ WV V-3 Q7 WV D- WV Q5 D+ D+ WV K D- D- B D+'
     )
     gcg.setIterationCount(7)
     gcg.debug = True
     gcg.generate()
     random = CsoundAC.Random()
     random.thisown = 0
     random.createDistribution("uniform_real")
     random.setElement(7, 11, 1)
     rescale = CsoundAC.Rescale()
     rescale.thisown = 0
     ### rescale.setRescale( CsoundAC.Event.TIME,       1, 0,  1,     120    )
     rescale.setRescale(CsoundAC.Event.TIME, True, False, 20, 120)
     rescale.setRescale(CsoundAC.Event.DURATION, False, False, 0.25, 1.0)
     rescale.setRescale(CsoundAC.Event.INSTRUMENT, True, False, 1, 0)
     rescale.setRescale(CsoundAC.Event.KEY, True, False, 42, 36)
     rescale.setRescale(CsoundAC.Event.VELOCITY, True, True, 30, 12)
     rescale.setRescale(CsoundAC.Event.PAN, True, True, -0.875, 1.75)
     # random.addChild(lindenmayer)
     rescale.addChild(gcg)
     self.sequence.addChild(rescale)
     print
Пример #6
0
def toCsoundAcEvent(athencacl_event):
    csoundac_event = CsoundAC.Event()
    # "Note on" event.
    csoundac_event.setStatus(144)
    # Not used: bpm = athencacl_event['bpm']
    # Csound instrument number
    instrument = athencacl_event['inst']
    csoundac_event.setInstrument(instrument)
    # athenaCL's time is apparently in seconds, the 'bpm' field does not seem
    # to get into the Csound .sco file.
    time_ = athencacl_event['time']
    csoundac_event.setTime(time_)
    # Duration, ditto.
    duration = athencacl_event['dur']
    csoundac_event.setDuration(duration)
    # athenaCL "pitch space" is just semitones, with 0 = C4 = MIDI key 60.
    midi_key = athencacl_event['ps'] + 60.
    csoundac_event.setKey(midi_key)
    # athenaCL's amplitude is in [0, 1].
    midi_velocity = athencacl_event['amp'] * 127.
    csoundac_event.setVelocity(midi_velocity)
    # athenaCL's pan is in [0, 1].
    pan = athencacl_event['pan']
    csoundac_event.setPan(pan)
    return csoundac_event
    def generate(self):
        # mkg-2009-09-14-s
        self.sequence = CsoundAC.Sequence()
        self.model.addChild(self.sequence)
        #self.generate18() # Maybe.
        #self.generate17() # Good, too short.
        #self.generate16() # Probably not.
        #self.generate15() # I think so.
        #self.generate14() # Maybe.
        #self.generate13() # Good, too short.
        #self.generate12() # No.
        #self.generate11() # No.
        #self.generate10() # Maybe.
        #self.generate9() # Yes, could be shorter.
        #self.generate8() # Similar to 9.
        #self.generate6() # Yes, this is a whole piece.
        #self.generate5() # Similar to 6.
        #self.generate4() # Maybe, a little different.
        #self.generate3() # Good.
        #self.generate2() # Not quite.
        #self.generate1() # I think so, may be a duplicate.

        self.generate7()  # Yes! Like 60x60 piece.
        self.generate15()  # I think so.
        self.generate6()  # Yes, this is a whole piece.
        #self.generate17() # Good, too short.
        print
    def generate3(self):
        # mkg-2009-09-14-o-1
        gcg = GeneralizedContextualGroup.GeneralizedContextualGroup()
        gcg.thisown = 0
        # Ends on Cm9.
        gcg.setAxiom('pcs1 V+47 WC R45 a3 Q5 R50 a4 R55 a3 R60 a4 R65 a3 ')
        gcg.addRule('pcs1', 'P(0,4,7,11,14)')

        gcg.addRule('a3', 'a3k a3q a3 a3')
        gcg.addRule('a3k', 'K  WV')
        gcg.addRule('a3q', 'Q3 K D/1.25 WV Q3 V+1 D*1.25 WC')

        gcg.addRule('a4', 'L*2 a4k a4q D/1.25 a4 D/1.25  a4 D*1.25 D*1.25 L/2')
        gcg.addRule('a4k', 'K  WV')
        gcg.addRule('a4q', 'Q4 WV Q4 K V+4 WC')

        ### gcg.setIterationCount(2)
        gcg.setIterationCount(6)
        gcg.debug = True
        gcg.generate()
        rescale = CsoundAC.Rescale()
        rescale.thisown = 0
        ### rescale.setRescale( CsoundAC.Event.TIME,       True, False, (1.0 / 40.0), 120    )
        rescale.setRescale(CsoundAC.Event.TIME, True, False, 20, 120)
        rescale.setRescale(CsoundAC.Event.INSTRUMENT, True, True, 1, 3.99)
        rescale.setRescale(CsoundAC.Event.KEY, True, False, 37, 36)
        rescale.setRescale(CsoundAC.Event.VELOCITY, True, True, 43, 17)
        rescale.setRescale(CsoundAC.Event.PAN, True, True, 0.05, 0.9)
        rescale.addChild(gcg)
        self.sequence.addChild(rescale)
        print
Пример #9
0
 def createGlobalObjects(self):
     print 'CREATING GLOBAL OBJECTS...'
     print
     self.model = CsoundAC.MusicModel()
     self.csound = self.model.getCppSound()
     self.csound.setPythonMessageCallback()  
     self.score = self.model.getScore()
def buildTrack(sequence, channel, bass):
    print 'Building track for channel %3d bass %3d...' % (channel, bass)
    cumulativeTime = 1.0
    for i in xrange(1, 16):
        factor = random.choice([4., 1., 2., 3.])
        for j in xrange(2, 6):
            pitches = random.choice([CM, Em, CM, Em, BbM, GM9, Ab9])
            repeatCount = 1 + int(random.random() * 12)
            for k in xrange(repeatCount):
                measure = readMeasure(minuetTable[j][i], pitches)
                duration = measure.getScore().getDuration()
                offset = factor * duration / (22 / 7)
                rescale = CsoundAC.Rescale()
                rescale.setRescale(CsoundAC.Event.TIME, bool(1), bool(0),
                                   cumulativeTime + offset, 0)
                rescale.setRescale(CsoundAC.Event.INSTRUMENT, bool(1), bool(0),
                                   channel, 0)
                rescale.setRescale(CsoundAC.Event.KEY, bool(1), bool(1),
                                   float(bass), 48)
                rescale.thisown = 0
                rescale.addChild(measure)
                print 'Repeat %4d of %4d at %8.3f with %3d notes of duration %7.3f at bass %7.3f...' % (
                    k + 1, repeatCount, cumulativeTime, len(
                        measure.getScore()), duration, bass)
                sequence.addChild(rescale)
                cumulativeTime = cumulativeTime + duration
    def generate12(self):
        gcg = GeneralizedContextualGroup.GeneralizedContextualGroup()
        gcg.thisown = 0
        gcg.avoidParallelFifths = True
        gcg.setAxiom('pcs1 V+47 WC R48 a3 seq a4 seq a3 seq a4 seq a3 ')
        gcg.addRule('pcs1', 'P(0,4,7,11,14)')

        gcg.addRule('a3', 'a3k a3q a3 a3')
        gcg.addRule('a3k', 'K  WV')
        gcg.addRule('a3q', 'Q2 K D/1.25 WV Q7 D*1.25 WC')

        gcg.addRule('a4', 'L*2 a4k a4q D/1.25 a4 D/1.25 a4 D*1.25 D*1.25 L/2')
        gcg.addRule('a4k', 'K  WV')
        gcg.addRule('a4q', 'Q4 WV Q4 K V+4 WC')

        gcg.addRule('seq', 'L/5 D/2 Q-1 WV Q-1 WV Q-1 WV Q-1 L*5 D*2')

        gcg.setIterationCount(7)
        gcg.debug = True
        gcg.generate()
        rescale = CsoundAC.Rescale()
        rescale.thisown = 0
        ### rescale.setRescale( CsoundAC.Event.TIME,       True, False, (1.0 / 40.0), 120    )
        rescale.setRescale(CsoundAC.Event.TIME, True, False, 20, 120)
        rescale.setRescale(CsoundAC.Event.INSTRUMENT, True, True, 1, 0)
        rescale.setRescale(CsoundAC.Event.KEY, True, False, 42, 36)
        rescale.setRescale(CsoundAC.Event.VELOCITY, True, True, 43, 17)
        rescale.setRescale(CsoundAC.Event.PAN, True, True, 0.05, 0.9)
        rescale.addChild(gcg)
        self.sequence.addChild(rescale)
        print
def pitch_scale(measure, scale):
    score = measure.getScore()
    for i, event in enumeration(score):
        key = event.getKey()
        key = key * scale
        key = CsoundAC.Conversions_temper(key, 12.)
        event.setKey(key)
        event.temper(12.)
Пример #13
0
def to_csoundac_event(midi_event):
    csoundac_event = CsoundAC.Event()
    csoundac_event.setStatus(144)
    csoundac_event.setInstrument(1 + midi_event.channel())
    csoundac_event.setTime(midi_event.time)
    csoundac_event.setDuration(midi_event.duration)
    csoundac_event.setKey(midi_event.keynum())
    csoundac_event.setVelocity(midi_event.velocity())
    return csoundac_event
Пример #14
0
def read_measure(number):
    score_node = CsoundAC.ScoreNode()
    score_node.thisown = 0
    filename = 'M' + str(number) + '.mid'
    score_for_measure = score_node.getScore()
    score_for_measure.load(filename)
    # Remove false notes.
    for i, event in reverse_enumeration(score_for_measure):
        if event.getChannel() < 0:
            score_for_measure.remove(i)
    return score_node
Пример #15
0
 def __init__(self, author='Composer', rendering='audio', instrumentLibrary=r'D:\utah\home\mkg\projects\csound-mingw-release\examples\CsoundAC.csd', soundfilePlayer=r'D:\utah\opt\audacity\audacity.exe'):
     print 'SETTING RENDERING AND PLAYBACK OPTIONS...'
     print
     print 'Set "rendering" to:      "cd", "preview" (default), "master", or "audio".'
     print 'Set "playback" to:       True (default) or False.'
     print
     self.rendering = 'preview'
     self.playback = True
     print 'Rendering option:        %s' % self.rendering
     print 'Play after rendering:    %s' % self.playback
     print
     print 'CREATING FILENAMES...'
     print
     self.began = time.clock()
     self.author = author
     print 'Author:                  %s' % self.author
     self.scriptFilename = os.path.realpath(sys.argv[0])
     print 'Full Python script:      %s' % self.scriptFilename
     self.title, ext = os.path.splitext(os.path.basename(self.scriptFilename))
     print 'Base Python script:      %s' % self.title
     self.directory = os.path.dirname(self.scriptFilename)
     if len(self.directory):
         os.chdir(self.directory)
     print 'Working directory:       %s' % self.directory
     self.orcFilename = self.title + '.orc'
     self.instrumentLibrary = instrumentLibrary
     print 'Instrument library:      %s' % self.instrumentLibrary
     print 'Csound orchestra:        %s' % self.orcFilename
     self.scoFilename = self.title + '.sco'
     print 'Csound score:            %s' % self.scoFilename
     self.midiFilename = self.title + '.mid'
     print 'MIDI filename:           %s' % self.midiFilename
     self.soundfileName = self.title + '.wav'
     print 'Soundfile name:          %s' % self.soundfileName
     self.dacName = 'dac'
     print 'Audio output name:       %s' % self.dacName
     self.soundfilePlayer = soundfilePlayer
     print 'Soundfile player:        %s' % self.soundfilePlayer
     commandsForRendering = {
         'audio':    'csound --messagelevel=1  --noheader                  --nodisplays --sample-rate=44100 --control-rate=100   --midi-key=4 --midi-velocity=5                                                                  --output=%s %s %s' % (self.dacName,                                             self.orcFilename, self.scoFilename),
         'preview':  'csound --messagelevel=99 -W -f --rewrite --dither -K --nodisplays --sample-rate=44100 --control-rate=441   --midi-key=4 --midi-velocity=5 -+id_artist=%s -+id_copyright=Copyright_2007_by_%s -+id_title=%s --output=%s %s %s' % (self.author, self.author, self.title, self.soundfileName, self.orcFilename, self.scoFilename),
         'cd':       'csound --messagelevel=99 -W -f --rewrite --dither -K --nodisplays --sample-rate=44100 --control-rate=44100 --midi-key=4 --midi-velocity=5 -+id_artist=%s -+id_copyright=Copyright_2007_by_%s -+id_title=%s --output=%s %s %s' % (self.author, self.author, self.title, self.soundfileName, self.orcFilename, self.scoFilename),
         'master':   'csound --messagelevel=99 -W -f --rewrite --dither -K --nodisplays --sample-rate=88200 --control-rate=88200 --midi-key=4 --midi-velocity=5 -+id_artist=%s -+id_copyright=Copyright_2007_by_%s -+id_title=%s --output=%s %s %s' % (self.author, self.author, self.title, self.soundfileName, self.orcFilename, self.scoFilename)
     }    
     self.csoundCommand = commandsForRendering[self.rendering]
     print 'Csound command line:     %s' % self.csoundCommand
     print
     print 'CREATING GLOBAL OBJECTS...'
     print
     self.model = CsoundAC.MusicModel()
     self.csound = self.model.getCppSound()
     self.csound.setPythonMessageCallback()
     self.score = self.model.getScore()
Пример #16
0
def readMeasure(number):
    scoreNode = CsoundAC.ScoreNode()
    score_ = scoreNode.getScore()
    scoreNode.thisown = 0
    filename = 'M' + str(number) + '.mid'
    print 'Reading "%s"' % (filename)
    score_.load(filename)
    # Remove false notes.
    for i, event in reverse_enumeration(score_):
        if event.getChannel() < 0:
            score_.remove(i)
    print(score_.getCsoundScore())
    return scoreNode
def readMeasure(number, pitches):
    scoreNode = CsoundAC.ScoreNode()
    scoreNode.thisown = 0
    filename = 'M' + str(number) + '.mid'
    score = scoreNode.getScore()
    score.load(filename)
    # Remove false notes.
    for i, event in reverse_enumeration(score):
        event.setPitches(pitches)
        if event.getChannel() < 0:
            score.remove(i)
    #rint score.getCsoundScore()
    score.setDuration(random.choice([12, 9, 15]))
    print 'Read "%s" with duration %9.4f.' % (filename, score.getDuration())
    return scoreNode
    def generate2(self):
        # mkg-2010-03-16-e-1
        gcg = GeneralizedContextualGroup.GeneralizedContextualGroup()
        gcg.avoidParallelFifths = True
        gcg.thisown = 0

        gcg.setAxiom(
            'pcs1 R65 WC V+1 WC V-20 WC a3 a3 dd a4 a4 dd L*2 a3 a3 L/2 arp1 dd K L/4 a4 a3 arp2 dd dd'
        )
        gcg.addRule('pcs1', 'P(0,4,7,14)')

        gcg.addRule('a3', 'a3k a3q a3 a3 ')
        gcg.addRule('a3k', 'K  arp WV WC ')
        gcg.addRule(
            'a3q',
            'Q7 WV K D/1.125 WV arp1 Q3 arp1 arp1 V+2 D*1.12 V+5 WC WC ')

        gcg.addRule('a4',
                    'L*2 a4k a4q a4 arp a3 D/1.012 a3k a4 D*1.0125 L/2 WC ')
        gcg.addRule('a4k', 'K  WV ')
        gcg.addRule('a4q', 'Q3 WV K V+1 WC')

        gcg.addRule('arp', 'V-2 WC V-1 WC ')
        gcg.addRule('arp1',
                    'L/2 D/1.0125 Q5 WV Q5 WV Q5 WV Q5 WV D*1.0125 L*2')
        gcg.addRule(
            'arp2',
            'L/2 D/1.125 Q5 WV Q5 WV Q5 WV Q5 WV Q5 WV Q5 WV D*1.125 L*2')
        gcg.addRule('dd', 'WV WV V+3 WV WV V+3')

        gcg.setIterationCount(4)
        gcg.debug = True
        gcg.generate()
        rescale = CsoundAC.Rescale()
        rescale.thisown = 0
        ### rescale.setRescale( CsoundAC.Event.TIME,       True, False, (1.0 / 40.0), 120     )
        rescale.setRescale(CsoundAC.Event.TIME, True, False, 20, 120)
        rescale.setRescale(CsoundAC.Event.INSTRUMENT, True, True, 1, 0)
        rescale.setRescale(CsoundAC.Event.KEY, True, False, 41, 36)
        rescale.setRescale(CsoundAC.Event.VELOCITY, True, True, 25, 10)
        rescale.setRescale(CsoundAC.Event.PAN, True, True, -0.9, 1.8)
        rescale.addChild(gcg)
        self.sequence.addChild(rescale)
        print
Пример #19
0
def buildTrack(voiceleadingNode,
               sequence,
               channel,
               gain,
               timeoffset,
               pan,
               add=0.0):
    global chord
    print 'Building track for channel %3d gain %3d...' % (channel, gain)
    cumulativeTime = timeoffset
    random.shuffle(repetitions)
    m = 0
    for i in xrange(0, 16):
        for j in xrange(2, 6):
            if True:
                transformation = random.choice(
                    [t2, t5, t7, t9, t10, k0, k2, k5, k7,
                     k10])  #, weights=[1, 5, 2, 1, 1, 3, 3, 1, 1, 1])
                #transformation = random.choice([t2, t5, t7, t9, t10, k0])
                chord = transformation(chord)
                voiceleadingNode.chord(chord, cumulativeTime)
            repeatCount = repetitions[m]
            m = m + 1
            for k in xrange(repeatCount):
                measure = readMeasure(minuetTable[j][i + 1])
                rescale = CsoundAC.Rescale()
                rescale.setRescale(CsoundAC.Event.TIME, bool(1), bool(0),
                                   cumulativeTime, 0)
                rescale.setRescale(CsoundAC.Event.INSTRUMENT, bool(1), bool(1),
                                   channel, 0)
                rescale.setRescale(CsoundAC.Event.VELOCITY, bool(1), bool(0),
                                   gain, 0)
                rescale.setRescale(CsoundAC.Event.PAN, bool(1), bool(1), pan,
                                   0)
                rescale.setRescale(CsoundAC.Event.KEY, bool(1), bool(0), add,
                                   0)
                rescale.thisown = 0
                rescale.addChild(measure)
                print 'Repeat %4d of %4d at %8.3f with %3d notes of duration %7.3f...' % (
                    k + 1, repeatCount, cumulativeTime, len(
                        measure.getScore()), duration)
                sequence.addChild(rescale)
                cumulativeTime = cumulativeTime + duration
    def generate8(self):
        # mkg-2009-12-30-10
        gcg = GeneralizedContextualGroup.GeneralizedContextualGroup()
        gcg.avoidParallelFifths = True

        gcg.setAxiom(
            'pcs1 R66 WC V+11 WC V-2 WC a3 dd a4 dd L*2 a3 L/2 arp1 dd K D*1.875 L/4 a4 arp2 dd dd'
        )
        gcg.addRule('pcs1', 'P(0,4,7,14)')

        gcg.addRule('a3', 'a3k a3q a3 a3 ')
        gcg.addRule('a3k', 'K  arp WV WC ')
        gcg.addRule('a3q', 'Q7     WV K D/1.245 WV arp1 V+2 D*1.25 V+5 WC WC ')

        gcg.addRule(
            'a4',
            'L*2 a4k a4q D/1.28 a4 arp a3 D/1.25 a3k a4 D*1.25 D*1.25 L/2 WC ')
        gcg.addRule('a4k', 'K  WV ')
        gcg.addRule('a4q', 'Q3 WV K V+1 WC')

        gcg.addRule('arp', 'V+18 WC V-21 WC ')
        gcg.addRule('arp1', 'L/2 D/2.125 Q5 WV Q5 WV Q5 WV Q5 WV D*2.125 L*2')
        gcg.addRule(
            'arp2',
            'L/2 D/2.125 Q5 WV Q5 WV Q5 WV Q5 WV Q5 WV Q5 WV D*2.125 L*2')
        gcg.addRule('dd', 'WV WV ')

        gcg.setIterationCount(3)
        gcg.debug = True
        gcg.generate()
        rescale = CsoundAC.Rescale()
        ### rescale.setRescale( CsoundAC.Event.TIME,       True, False, (1.0 / 40.0), 120     )
        rescale.setRescale(CsoundAC.Event.TIME, True, False, 20, 120)
        rescale.setRescale(CsoundAC.Event.INSTRUMENT, True, True, 1, 0)
        rescale.setRescale(CsoundAC.Event.KEY, True, False, 37, 36)
        rescale.setRescale(CsoundAC.Event.VELOCITY, True, True, 25, 40)
        rescale.setRescale(CsoundAC.Event.PAN, True, True, -0.9, 1.8)
        rescale.addChild(gcg)
        self.sequence.addChild(rescale)
        print
    def generate7(self):
        # mkg-2009-12-30-1
        gcg = GeneralizedContextualGroup.GeneralizedContextualGroup()
        gcg.thisown = 0

        gcg.setAxiom(
            'pcs1 R72 WC V+29 WC V-2 WC a3 V+5 WC V+5 WC Q5 a4 R55 a3 a4 a3 arp WV WV WV WV'
        )
        gcg.addRule('pcs1', 'P(0,3,7,10)')

        gcg.addRule('a3', 'a3k a3q a3 a3 WC ')
        gcg.addRule('a3k', 'K  arp WV')
        gcg.addRule('a3q', 'Q2 WV K D/1.245 WV arp V+2 D*1.25 V+5 WC ')

        gcg.addRule(
            'a4',
            'L*2 a4k a4q D/1.27 a4 arp a3 D/1.25  a3k a4 D*1.25 arp D*1.25 L/2 WC '
        )
        gcg.addRule('a4k', 'K  WV')
        gcg.addRule('a4q', 'Q5 WV K V+4 WC')

        gcg.addRule('arp', 'V+12 WC V+2 VC arp V-13 VC ')

        gcg.setIterationCount(3)
        gcg.debug = True
        gcg.generate()
        rescale = CsoundAC.Rescale()
        rescale.thisown = 0
        ###rescale.setRescale( CsoundAC.Event.TIME,       True, False, (1.0 / 40.0), 120    )
        rescale.setRescale(CsoundAC.Event.TIME, True, False, 2, 120)
        ###rescale.setRescale( CsoundAC.Event.INSTRUMENT, True, False,  0,             3.99 )
        rescale.setRescale(CsoundAC.Event.INSTRUMENT, True, True, 2, 0)
        ###rescale.setRescale( CsoundAC.Event.KEY,     True, False, 37,            36    )
        rescale.setRescale(CsoundAC.Event.KEY, True, False, 36, 36)
        rescale.setRescale(CsoundAC.Event.VELOCITY, True, True, 43, 17)
        rescale.setRescale(CsoundAC.Event.PAN, True, True, 0.05, 0.9)
        rescale.addChild(gcg)
        self.sequence.addChild(rescale)
        print
 def generate(self):
     # mkg-2009-09-14-s
     self.sequence = CsoundAC.Sequence()
     self.model.addChild(self.sequence)
     self.generate18()
     self.generate17()
     self.generate16()
     self.generate15()
     self.generate14()
     self.generate13()
     self.generate12()
     self.generate11()
     self.generate10()
     self.generate9()
     self.generate8()
     self.generate7()
     self.generate6()
     self.generate5()
     self.generate4()
     self.generate3()
     self.generate2()
     self.generate1()
     print
Пример #23
0
def readCatalog(section, voiceleadingNode):
    global scale
    global chord
    print('SECTION', section)
    score = CsoundAC.ScoreNode()
    score.thisown = 0
    # Put the section into a Rescale node to position it in the piece.
    rescale = CsoundAC.Rescale()
    rescale.thisown = 0
    rescale.addChild(score)
    f = open(section[0])
    # Read the file until we run into '--'
    # which marks the last line of non-data.
    while(True):
        line = f.readline()
        if line.find('--') >= 0:
            # Read the file until done.
            # The fields we want are:
            # Right ascension   1
            # Declination       2
            # Visual magnitude  8
            # Spectral type     9
            while(True):
                line = f.readline()
                if not line:
                    break
                fields = line.split('\t')
                if not len(fields) > 8:
                    break
                # print(fields
                time = float(fields[1])
                key = float(fields[2])
                velocity = float(fields[8])
                pan = random.uniform(.05, .95)
                if(len(fields) > 9):
                    instrument = float(instrumentsForSpectralTypes[fields[9][0]])
                else:
                    instrument = 8
                score.getScore().append(time, velocity * 0.001, 144.0, instrument, key, velocity, 0.0, pan)
                score_ = score.getScore()
                score_[score_.size() -1].setProperty("section", str(section))
            #print(score.getScore().toString())
            scoreTime = section[1]
            scoreDuration = section[2]
            shortest = 4.0
            durationRange = 8.0
            key = section[3]
            range = section[4]
            lowest = section[5]
            dynamicRange = section[6]
            leftmost = section[7]
            width = section[8]
            print('time:          ',scoreTime)
            print('duration:      ',scoreDuration)
            print('shortest:      ',shortest)
            print('duration range:',durationRange)
            print('key:           ',key)
            print('range:         ',range)
            print('lowest:        ',lowest)
            print('dynamic range: ',dynamicRange)
            print('leftmost:      ',leftmost)
            print('width:         ',width)
            print
            rescale.setRescale(CsoundAC.Event.TIME,      True, True, scoreTime, scoreDuration)
            rescale.setRescale(CsoundAC.Event.DURATION,  True, True, shortest,  durationRange)
            rescale.setRescale(CsoundAC.Event.KEY,       True, True, key,       range)
            rescale.setRescale(CsoundAC.Event.VELOCITY,  True, True, lowest,    dynamicRange)
            rescale.setRescale(CsoundAC.Event.PHASE,     True, True, leftmost,  width)
            # Now generate the harmony as a function of scoreDuration and add the chords.
            changes = random.randint(6, 15)
            progression = random.choices([-2, -4, -8, -10, -12, 3], [10, 1, 2, 1, 2, 4], k=changes)
            secondsPerChord = scoreDuration / len(progression)
            chordTime = scoreTime
            for steps in progression:
                print("Time: {:9.4f} chord: {} name: {}".format(chordTime, chord.toString(), chord.eOP().name()))
                voiceleadingNode.chord(chord, chordTime)
                chord = scale.transpose_degrees(chord, steps)
                chordTime = chordTime + secondsPerChord
            scale = modulate(scale, chord)
            return rescale
Пример #24
0
print('Set dac_name for system.')
dac_name = "dac:plughw:1,0"
print('dac_name:               \"{}\"'.format(dac_name))
print

import CsoundAC
import os
import random
import signal
import string
import sys
import traceback

random.seed(509382)

CsoundAC.System_setMessageLevel(15)

scriptFilename = sys.argv[0]
print('Full Python script:     \"%s\"' % scriptFilename)
title, ext = os.path.splitext(os.path.basename(scriptFilename))

model = CsoundAC.MusicModel()
model.setTitle("Zodiac-v2")
model.setAuthor("Michael Gogins")
model.setArtist("Michael Gogins")
model.setYear("2020")
print("")
model.generateAllNames()
print("")
CsoundAC.System_setMessageLevel(3)
Пример #25
0
import CsoundAC
import os
import random
# Using the same random seed for each performance makes the performance 
# deterministic, not random.
random.seed(221)
import signal
import string
import sys
import traceback

print('Set "rendering" to:     "soundfile" or "audio".')
print
rendering = "audio"

model = CsoundAC.MusicModel()
score = model.getScore()

script_filename = sys.argv[0]
print('Full Python script:     %s' % script_filename)
title, exte = os.path.splitext(os.path.basename(script_filename))
model.setTitle(title)
model.setArtist("Michael Gogins")
model.setAuthor("Michael Gogins")
model.setYear("2020")
model.generateAllNames()
soundfile_name = model.getOutputSoundfileFilepath()
print('Soundfile name:         %s' % soundfile_name)
dac_name = 'dac:plughw:1,0'
print('Audio output name:      %s' % dac_name)
print
Пример #26
0
def build_voice(voiceleading_node, sequence, instrument, bass, time_offset, pan):
    global repetitions_for_measures
    global tempo
    global off_time
    global chord
    global scale
    # Ensure that each voice plays a different sequence of repetitions, as in 
    # "In C". But note that shuffling, rather than resampling, ensures 
    # that each voice actually plays the same number of bars.
    random.shuffle(repetitions_for_measures)
    random.shuffle(forte_measures)
    bars_total = sum(repetitions_for_measures)
    print("Instrument: {:3} measures: {} bars: {} repetitions_for_measures: {}".format(instrument, len(repetitions_for_measures), bars_total, repetitions_for_measures))    
    print("Instrument: {:3} measures: {} bars: {} forte_mearues:            {}".format(instrument, len(repetitions_for_measures), bars_total, forte_measures))    
    print()
    bars_played = 0
    real_time = 1.0
    cumulative_time = real_time + time_offset
    # Make both pitch range and dynamic range get bigger through time.
    bass = bass
    bass_at_end = bass - 5
    bass_increment_per_bar = (bass_at_end - bass) / bars_total
    range_ = 48.
    range_at_end = 52.
    range_increment_per_bar = (range_at_end - range_) / bars_total
    piano = 60.
    piano_at_end = 56.
    piano_increment_per_bar = (piano_at_end - piano) / bars_total
    dynamic_range = 20.
    dynamic_range_at_end = 30.
    dynamic_range_increment_per_bar = (dynamic_range_at_end - dynamic_range) / bars_total
    # Mozart's minuet table has columns indexed [1,16] and rows indexed [2,12]. 
    repetitions_for_measure_index = 0
    # Preserve Mozart's indexing.
    for minuet_column in range(1, columns_to_play + 1):
        for minuet_row in range(2, rows_to_play + 2):
            repetitions_for_measure = repetitions_for_measures[repetitions_for_measure_index]
            forte = forte_measures[repetitions_for_measure]
            repetitions_for_measure_index = repetitions_for_measure_index + 1
            scales = scale.modulations(chord)
            scale_count = len(scales)
            count = 0
            # After picking a number of repetitions for a measure, find if the 
            # current chord can be a pivot chord, and if so, choose one of the 
            # possible modulations to perform. Do this in the first voice 
            # only, but it will be applied to all voices.
            if (scale_count > 1 and time_offset == 0):
                random_index = random.randint(0, scale_count -1)
                for s in scales:
                    print("Possible modulation at: {:9.4f} {} {}".format(cumulative_time, s.toString(), s.name()))
                    if count == random_index:
                        scale = s  
                        print("             Chose modulation to: {} {}".format(scale.toString(), scale.name()))
                    count = count + 1
                print()
            for k in range(repetitions_for_measure):
                if time_offset == 0:
                    # Once the scale is chosen, perform root progressions 
                    # within the scale; away from the tonic in multiples of -2
                    # scale degrees, back to the tonic in multiples of 1 scale 
                    # degree with a preference for 3 steps (as used by V to I). 
                    # These root progressions are random but weighted.
                    progression = random.choices([-2, -4, -6, -8, -10, -12, 3, 6], [10, 3, 2, 1, 1, 1, 8, 3], k=1)
                    steps = progression[0]
                    chord = scale.transpose_degrees(chord, steps)
                    voiceleading_node.chord(chord, cumulative_time)
                measure = read_measure(minuet_table[minuet_row][minuet_column])
                score_for_measure = measure.getScore()
                duration = score_for_measure.getDuration() * tempo
                score_for_measure.setDuration(duration)
                rescale = CsoundAC.Rescale() 
                rescale.setRescale(CsoundAC.Event.TIME, bool(1), bool(0), cumulative_time, 0)
                rescale.setRescale(CsoundAC.Event.INSTRUMENT, bool(1), bool(1), instrument, 0)
                bass = bass + bass_increment_per_bar
                range_ = range_ + range_increment_per_bar
                rescale.setRescale(CsoundAC.Event.KEY, bool(1), bool(1), bass, range_)
                piano = piano + piano_increment_per_bar
                dynamic_range = dynamic_range + dynamic_range_increment_per_bar
                rescale.setRescale(CsoundAC.Event.VELOCITY, bool(1), bool(1), piano + (forte * 4), dynamic_range)
                rescale.setRescale(CsoundAC.Event.PAN, bool(1), bool(1), pan, float(0))
                rescale.thisown = 0
                rescale.addChild(measure)
                bars_played = bars_played + 1
                sequence.addChild(rescale)
                cumulative_time = cumulative_time + duration
                real_time = real_time + duration
    print("Bars played for instrument {}: {}".format(instrument, bars_played))
    print()
Пример #27
0
import CsoundAC
import os
import random
# Using the same random seed for each performance makes the performance 
# deterministic, not random.
random.seed(221)
import signal
import string
import sys
import traceback

print('Set "rendering" to:     "soundfile" or "audio".')
print
rendering = "soundfile"

model = CsoundAC.MusicModel()
score = model.getScore()

script_filename = sys.argv[0]
print('Full Python script:     %s' % script_filename)
title, exte = os.path.splitext(os.path.basename(script_filename))
model.setTitle(title)
model.setArtist("Michael Gogins")
model.setAuthor("Michael Gogins")
model.setYear("2020")
model.generateAllNames()
soundfile_name = model.getOutputSoundfileFilepath()
print('Soundfile name:         %s' % soundfile_name)
dac_name = 'dac:plughw:1,0'
print('Audio output name:      %s' % dac_name)
print
Пример #28
0
            print("command:", line)
            print(interpreter.cmd(line))
    return interpreter


if __name__ == '__main__':

    # Unit test:

    script = '''
    emo cn
    pin a d3,e3,g3,a3,b3,d4,e4,g4,a4,b4,d5,e5,g5,a5,b5
    tmo ha
    # This is a comment.
    tin a 6 27
    tie r pt,(c,16),(ig,(bg,rc,(1,2,3,5,7)),(bg,rc,(3,6,9,12))),(c,1)
    tie a om,(ls,e,9,(ru,.2,1),(ru,.2,1)),(wp,e,23,0,0,1)
    tie d0 c,0
    tie d1 n,100,2,0,14
    tie d2 c,1
    tie d3 c,1
    tie d3 ru,1,4
    '''
    interpreter = interpret(script)
    csoundac_score = CsoundAC.Score()
    toCsoundAcScore(interpreter.ao, csoundac_score)
    print()
    print("Generated CsoundAC Score (in Csound .sco format):")
    print(csoundac_score.getCsoundScore())
    print("Finished.")
Пример #29
0
 def areParallel(self, a, b):
     return CsoundAC.areParallel(a,b)
Пример #30
0
 * You should have received a copy of the GNU Lesser General Public
 * License along with this software; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */
This script tests various Score voice-leading methods.
Most of these methods are tested in separate segments of notes. 
Most methods have more than one test segment.

Run the script, then examine the generated VoiceleadingNodeUnitTests.py.mid MIDI
sequence file in a notation program and verify the correctness of each section.
'''

import CsoundAC
import random
filename = 'VoiceleadingNodeUnitTests.py'
model = CsoundAC.MusicModel()
model.setCppSound(csound)
score = model.getScore()
CsoundAC.System_setMessageLevel(1 + 2 + 4 + 8)


def addVoiceleadingTest(sequence, voiceleadingNode, duration):
    print 'ADDING TEST...'
    random = CsoundAC.Random()
    random.thisown = 0
    random.createDistribution("uniform_01")
    random.eventCount = 200
    random.setElement(CsoundAC.Event.INSTRUMENT, 11, 1)
    random.setElement(CsoundAC.Event.TIME, 11, 1)
    random.setElement(CsoundAC.Event.DURATION, 11, 1)
    random.setElement(CsoundAC.Event.KEY, 11, 1)
    'cd':
    'csound -r 44100 -k 44100 -m195 -+msg_color=0 -RWZdfo %s' %
    (soundfileName),
    'preview':
    'csound -r 44100 -k 100   -m195 -+msg_color=0 -RWZdfo %s' %
    (soundfileName),
    'audio':
    'csound -r 44100 -k 100   -m195 -+msg_color=0 -RWZdfo %s' % (dacName),
}
csoundCommand = commandsForRendering[rendering]
print 'Csound command line:    %s' % csoundCommand
print

print 'CREATING GLOBAL OBJECTS...'
print
model = CsoundAC.MusicModel()
score = model.getScore()

print 'CREATING MUSIC MODEL...'
print

minuetTable = {}
# Put some changes in here, perhaps a bitonal differential.
# There needs to be some association between a measure and a harmony.
# This could be in one, or even several, master tracks, or in a separate track.
minuetTable[2] = {
    1: 96,
    2: 22,
    3: 141,
    4: 41,
    5: 105,