def _scoreMain(self): """creates score >>> from athenaCL.libATH.libTM import texture >>> ti = texture.factory('lg') >>> ti.tmName == 'LineGroove' True >>> ti.loadDefault() >>> ti.score() == True True """ # texture-wide time elements inst = self.getInst() tStart, tEnd = self.getTimeRange() tCurrent = tStart #texture-wide (self.textQ amd)options #used for optional parallel voices textParallelVoiceList = self.getTextStatic('pml', 'transpositionList') textParallelDelayTime = self.getTextStatic('pml', 'timeDelay') # get field, octave selection method value textFieldLevel = self.getTextStatic('lfm', 'level') textOctaveLevel = self.getTextStatic('lom', 'level') textPitchSelectorControl = self.getTextStatic('psc', 'selectionString') # create a list of chords from the appropriate pitch mode for pathPos in self.getPathPos(): chordCurrent = self.getPitchGroup(pathPos) multisetCurrent = self.getMultiset(pathPos) tStartSet, tEndSet = self.clockPoints() selectorChordPos = basePmtr.Selector(list(range(len(chordCurrent))), textPitchSelectorControl) tStartSetReal = copy.deepcopy(tCurrent) self.stateUpdate(tCurrent, chordCurrent, None, multisetCurrent, None, None) if textFieldLevel == 'set': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'set': octCurrent = self.getOct(tCurrent) # choose OCTAVE while 1: # pitch in chord if tCurrent >= tEndSet: break # choose pc from chord ps = chordCurrent[selectorChordPos()] # get position w/n chord self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, None) if textFieldLevel == 'event': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'event': octCurrent = self.getOct(tCurrent) # choose OCTAVE psReal = pitchTools.psToTempered(ps, octCurrent, self.temperamentObj, transCurrent) self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, psReal) bpm, pulse, dur, sus, acc = self.getRhythm(tCurrent) if acc == 0 and not self.silenceMode: # this is a rest tCurrent = tCurrent + dur continue amp = self.getAmp(tCurrent) * acc # choose amp, pan pan = self.getPan(tCurrent) auxiliary = self.getAux(tCurrent) # chooose AUX, pack into list eventDict = self.makeEvent(tCurrent, bpm, pulse, dur, sus, acc, amp, psReal, pan, auxiliary) self.storeEvent(eventDict) # Parallel transposition offset = 0 for parallelVoice in textParallelVoiceList: #offset to avoid amp problems, correct error w/ offset tCurrent = tCurrent + textParallelDelayTime offset = offset + textParallelDelayTime psText = pitchTools.psTransposer(psReal, parallelVoice) eventDict = self.makeEvent(tCurrent, bpm, pulse, dur, sus, acc, amp, psText, pan, auxiliary) self.storeEvent(eventDict) # move clocks forward by dur unit tCurrent = (tCurrent + dur) - offset self.clockForward() # advances path positon return 1
def _scoreMain(self): """creates score >>> from athenaCL.libATH.libTM import texture >>> ti = texture.factory('IntervalExpansion') >>> ti.tmName == 'IntervalExpansion' True >>> ti.loadDefault() >>> ti.score() == True True """ self.ornamentObj = ornament.Ornament(self.pmtrObjDict, self.temperamentObj) # texture-wide PATH/PITCH elements #pitches do not come from here, but from below path = self.path.get('scPath') # texture-wide time elements inst = self.getInst() tStart, tEnd = self.getTimeRange() tCurrent = tStart #texture-wide TEXTURE (self.textQ amd)options textRepeatToggle = self.getTextStatic('lws', 'onOff') ornamentSwitch = self.getTextStatic('ols', 'libraryName') ornamentMaxDensity = self.getTextStatic('omd', 'percent') # get field, octave selection method value textFieldLevel = self.getTextStatic('lfm', 'level') textOctaveLevel = self.getTextStatic('lom', 'level') # create a randomUniform parameter object to control ornament control # values between 0 and 1; if pmtr() <= ornamentMaxDensity ruPmtrObj = parameter.factory(('randomUniform', 0, 1)) # create a list of chords from the appropriate pitch mode for pathPos in self.getPathPos(): chordCurrent = self.getPitchGroup(pathPos) multisetCurrent = self.getMultiset(pathPos) tStartSet, tEndSet = self.clockPoints() chordLength = len(chordCurrent) chordIndex = 0 muteSet = 'off' psTest = [] ornamentIndex = 0 tStartSetReal = copy.deepcopy(tCurrent) self.stateUpdate(tCurrent, chordCurrent, None, multisetCurrent, None, None) if textFieldLevel == 'set': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'set': octCurrent = self.getOct(tCurrent) # choose OCTAVE while 1: # PITCH in CHORD if tCurrent >= tEndSet: break ps = chordCurrent[chordIndex] psTest.append(ps) chordIndex = chordIndex + 1 # shift to next pitch if chordIndex >= chordLength: chordIndex = 0 self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, None) if textFieldLevel == 'event': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'event': octCurrent = self.getOct(tCurrent) # choose OCTAVE psReal = pitchTools.psToTempered(ps, octCurrent, self.temperamentObj, transCurrent) self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, psReal) bpm, pulse, dur, sus, acc = self.getRhythm(tCurrent) if muteSet == 'on': # make all rests acc = 0 # everything is a rest after chord inex reset if acc == 0 and not self.silenceMode: # this is a rest chordIndex = chordIndex - 1 # dont count this note if a rest psTest = psTest[:-1] # drop off last addition tCurrent = tCurrent + dur continue # choose AMP, PAN amp = self.getAmp(tCurrent) * acc pan = self.getPan(tCurrent) auxiliary = self.getAux(tCurrent) parentEventDict = self.makeEvent(tCurrent, bpm, pulse, dur, sus, acc, amp, psReal, pan, auxiliary) refDict = self.getRefDict(tCurrent) # "rhythm" not "dur" (dur includes overlap) if ornamentSwitch != 'off': # check if an ru value is <= ornament density (if 1, always) # time value is not important if ruPmtrObj(tCurrent) <= ornamentMaxDensity: repretory = self._ornGroupGet(ornamentSwitch) #a, b, c, d = repretory[ornamentIndex] # this will do in order a, b, c, d = random.choice( repretory) # choose orn at random subEventArray = self.ornamentObj.create( refDict, a, b, c, d) # process sub event array for iSub in range(len(subEventArray)): # get time from subEvent subEvent = subEventArray[iSub] val = self.getTextDynamic('ornamentShift', subEvent['time']) subEvent['ps'] = subEvent['ps'] + val self.storePolyEvent(parentEventDict, subEventArray, 'orn') ornamentIndex = ornamentIndex + 1 # increment for when ordered if ornamentIndex == len(repretory): ornamentIndex = 0 else: self.storeEvent(parentEventDict) else: # ornament == 'off': # dont do ornaments self.storeEvent(parentEventDict) # turn of further notes if all gotten if textRepeatToggle == 'off' and len(psTest) >= chordLength: muteSet = 'on' # move clocks forward by rhythm unit tCurrent = tCurrent + dur self.clockForward() # advances path positon return 1
def _scoreMain(self): """creates score note: octave choose for every note >>> from athenaCL.libATH.libTM import texture >>> ti = texture.factory('HarmonicShuffle') >>> ti.tmName == 'HarmonicShuffle' True >>> ti.loadDefault() >>> ti.score() == True True """ # texture-wide time elements inst = self.getInst() # needed for preliminary parameter values tStart, tEnd = self.getTimeRange() tCurrent = tStart # get static texture values textMultisetSelectorControl = self.getTextStatic("msc", "selectionString") textPitchSelectorControl = self.getTextStatic("psc", "selectionString") textMaxTimeOffset = self.getTextStatic("mto", "time") textFieldLevel = self.getTextStatic("lfp", "level") textOctaveLevel = self.getTextStatic("lop", "level") pLen = self.getPathLen() selectorMultisetPos = basePmtr.Selector(range(pLen), textMultisetSelectorControl) # random generator for creating offset in vetical attacks # same technique used in LiteralVertical, DroneArticulate self.gaussPmtrObj = parameter.factory(("randomGauss", 0.5, 0.1, -1, 1)) while tCurrent < tEnd: pathPos = selectorMultisetPos() # select path position chordCurrent = self.getPitchGroup(pathPos) multisetCurrent = self.getMultiset(pathPos) # get number of simultaneities from this multiset # count is probabilistic, absolute value; cannot be zero multisetCount = abs(drawer.floatToInt(self.getTextDynamic("countPerMultiset", tCurrent), "weight")) # make zero == 1; alternatively, make zero a skib and continue if multisetCount == 0: multisetCount = 1 if textFieldLevel == "set": transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == "set": octCurrent = self.getOct(tCurrent) # choose OCTAVE # number of times a simultaneity is drawn for k in range(multisetCount): if tCurrent > tEnd: break # create a selector to get pitches from chord as index values # only need to create one for each chord selectorChordPos = basePmtr.Selector(range(len(chordCurrent)), textPitchSelectorControl) # determine how many pitches in this simultaneity # abs value, rounded to nearest integer simultaneityCount = abs( drawer.floatToInt(self.getTextDynamic("countPerSimultaneity", tCurrent), "weight") ) # if zero set to max chord size if simultaneityCount == 0: simultaneityCount = len(chordCurrent) elif simultaneityCount > len(chordCurrent): simultaneityCount = len(chordCurrent) self.stateUpdate(tCurrent, chordCurrent, None, multisetCurrent, None, None) if textFieldLevel == "event": transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == "event": octCurrent = self.getOct(tCurrent) # choose OCTAVE # rhythm, amp, pan, aux: all chosen once per simultaneity bpm, pulse, dur, sus, acc = self.getRhythm(tCurrent) if acc == 0 and not self.silenceMode: # this is a rest tCurrent = tCurrent + dur continue amp = self.getAmp(tCurrent) * acc # choose amp, pan pan = self.getPan(tCurrent) tThisChord = copy.deepcopy(tCurrent) # get each pitch in the simultaneity for i in range(simultaneityCount): # pitch in chord ps = chordCurrent[selectorChordPos()] # get position w/n chord self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, None) if textFieldLevel == "voice": transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == "voice": octCurrent = self.getOct(tCurrent) # choose OCTAVE psReal = pitchTools.psToTempered(ps, octCurrent, self.temperamentObj, transCurrent) self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, psReal) # aux values are drawn here once per voice; # this is common to TMs: DroneArticulate, DroneSustain auxiliary = self.getAux(tCurrent) # chooose AUX, pack into list # offset value is between -textMaxOffset, 0, and +textMaxOffset offset = self.gaussPmtrObj(0.0) * textMaxTimeOffset tCurrent = tCurrent + offset if tCurrent < 0: # cant start before 0 tCurrent = tThisChord # reset eventDict = self.makeEvent(tCurrent, bpm, pulse, dur, sus, acc, amp, psReal, pan, auxiliary) self.storeEvent(eventDict) # restore time to tCurrent before processing offset again tCurrent = tThisChord # move clocks forward by dur unit tCurrent = tCurrent + dur return 1
def _scoreMain(self): """creates score note: octave choose for every note >>> from athenaCL.libATH.libTM import texture >>> ti = texture.factory('HarmonicAssembly') >>> ti.tmName == 'HarmonicAssembly' True >>> ti.loadDefault() >>> ti.score() == True True """ # texture-wide time elements inst = self.getInst() # needed for preliminary parameter values tStart, tEnd = self.getTimeRange() tCurrent = tStart # get field, octave selection method value textMaxTimeOffset = self.getTextStatic('mto', 'time') textFieldLevel = self.getTextStatic('lfp', 'level') textOctaveLevel = self.getTextStatic('lop', 'level') pLen = self.getPathLen() # random generator for creating offset in vetical attacks # same technique used in LiteralVertical, DroneArticulate self.gaussPmtrObj = parameter.factory(('randomGauss', .5, .1, -1, 1)) while tCurrent < tEnd: # takes absolute value, and proportionally weight toward nearest int # modulus of path length pathPos = abs(drawer.floatToInt( self.getTextDynamic('multisetPosition', tCurrent), 'weight')) % pLen chordCurrent = self.getPitchGroup(pathPos) multisetCurrent = self.getMultiset(pathPos) # get number of simultaneities from this multiset # count is probabilistic, absolute value; cannot be zero multisetCount = abs(drawer.floatToInt( self.getTextDynamic('countPerMultiset', tCurrent), 'weight')) # make zero == 1; alternatively, make zero a skib and continue if multisetCount == 0: multisetCount = 1 if textFieldLevel == 'set': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'set': octCurrent = self.getOct(tCurrent) # choose OCTAVE # number of times a simultaneity is drawn for k in range(multisetCount): if tCurrent > tEnd: break # determine how many pitches in this simultaneity # abs value, rounded to nearest integer simultaneityCount = abs(drawer.floatToInt( self.getTextDynamic('countPerSimultaneity', tCurrent), 'weight')) # if zero set to max chord size if simultaneityCount == 0: simultaneityCount = len(chordCurrent) elif simultaneityCount > len(chordCurrent): simultaneityCount = len(chordCurrent) self.stateUpdate(tCurrent, chordCurrent, None, multisetCurrent, None, None) if textFieldLevel == 'event': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'event': octCurrent = self.getOct(tCurrent) # choose OCTAVE # rhythm, amp, pan, aux: all chosen once per simultaneity bpm, pulse, dur, sus, acc = self.getRhythm(tCurrent) if acc == 0 and not self.silenceMode: # this is a rest tCurrent = tCurrent + dur continue amp = self.getAmp(tCurrent) * acc # choose amp, pan pan = self.getPan(tCurrent) tThisChord = copy.deepcopy(tCurrent) # get each pitch in the simultaneity for i in range(simultaneityCount): # pitch in chord # use a generator to get pitches from chord as index values # get values from generator for each pitch in simultaneity # may want to reset parameter object for each chord above # take modulus of chord length; proportional weighting to integer chordPos = abs(drawer.floatToInt( self.getTextDynamic('pitchPosition', tCurrent), 'weight')) % len(chordCurrent) # get position w/n chord ps = chordCurrent[chordPos] self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, None) if textFieldLevel == 'voice': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'voice': octCurrent = self.getOct(tCurrent) # choose OCTAVE psReal = pitchTools.psToTempered(ps, octCurrent, self.temperamentObj, transCurrent) self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, psReal) # aux values are drawn here once per voice; # this is common to TMs: DroneArticulate, DroneSustain auxiliary = self.getAux(tCurrent) # chooose AUX, pack into list # offset value is between -textMaxOffset, 0, and +textMaxOffset offset = self.gaussPmtrObj(0.0) * textMaxTimeOffset tCurrent = tCurrent + offset if tCurrent < 0: # cant start before 0 tCurrent = tThisChord # reset eventDict = self.makeEvent(tCurrent, bpm, pulse, dur, sus, acc, amp, psReal, pan, auxiliary) self.storeEvent(eventDict) # restore time to tCurrent before processing offset again tCurrent = tThisChord # move clocks forward by dur unit tCurrent = tCurrent + dur return 1
def _scoreMain(self): """creates score note: octave choose for every note >>> from athenaCL.libATH.libTM import texture >>> ti = texture.factory('TimeSegment') >>> ti.tmName == 'TimeSegment' True >>> ti.loadDefault() >>> ti.score() == True True """ # texture-wide time elements inst = self.getInst() # needed for preliminary parameter values # tStart, tEnd = self.getTimeRange() # tCurrent = tStart # get field, octave selection method value textPitchSelectorControl = self.getTextStatic('psc', 'selectionString') textFieldLevel = self.getTextStatic('lfm', 'level') textOctaveLevel = self.getTextStatic('lom', 'level') # this is level tt event count numbers are applied (not count itself) textLevelEventCount = self.getTextStatic('lec', 'level') textTotalSegmentCount = self.getTextStatic('tsc', 'count') segmentManifest = [] # store [count, segWeight, start, end] # process segments first, determine events per segemtn tSeg = 0 if textLevelEventCount == 'segment': # event count per segement for q in range(textTotalSegmentCount): eventCount = self.getTextDynamic('eventCountGenerator', tSeg) segmentManifest.append([int(round(eventCount))]) # store as list tSeg = tSeg + 1 elif textLevelEventCount == 'count': # event count is total # get one value and divide eventCount = self.getTextDynamic('eventCountGenerator', tSeg) segEventCount = int(round((eventCount/textTotalSegmentCount))) if segEventCount <= 0: segEventCount = 1 # force minimum per seg for q in range(textTotalSegmentCount): segmentManifest.append([segEventCount]) # store as list #print _MOD, 'levelEventCount', textLevelEventCount #print _MOD, 'textTotalSegmentCount', textTotalSegmentCount #print _MOD, 'segmentManifest', segmentManifest # get total duration tStart, tEnd = self.getTimeRange() tDurSpan = tEnd - tStart # not final dur, but time start span # get segment proportions tSeg = 0 # segment count as event step size segmentWidth = [] # store widths before getting scaled size for q in range(textTotalSegmentCount): # what if segment widht is zero? val = self.getTextDynamic('segmentWidthGenerator', tSeg) if val <= 0: pass # continue or warn? segmentWidth.append(val) tSeg = tSeg + 1 # transfrom segment width into a collection of boundaries #print _MOD, 'segmentWidth', segmentWidth segmentBounds = unit.unitBoundaryProportion(segmentWidth) for q in range(textTotalSegmentCount): s, m, e = segmentBounds[q] segmentManifest[q].append(s * tDurSpan) segmentManifest[q].append(e * tDurSpan) #print _MOD, 'segmentWidth', segmentManifest # get texture start time as init time tCurrent = tStart # defined abovie # if field/oct vals are taken once per set, pre calculate and store # in a list; access from this list with pathPos index fieldValBuf = [] if textFieldLevel == 'set': for q in range(self.getPathLen()): s, e = self.clockPoints(q) # use path df start time fieldValBuf.append(self.getField(s)) octValBuf = [] if textOctaveLevel == 'set': for q in range(self.getPathLen()): s, e = self.clockPoints(q) octValBuf.append(self.getOct(s)) # iterate through segments in order for segPos in range(textTotalSegmentCount): segEventCount = segmentManifest[segPos][0] # count is first in list tStartSeg = segmentManifest[segPos][1] tEndSeg = segmentManifest[segPos][2] # create events for thsi segment #print _MOD, 'segPos', segPos for i in range(segEventCount): # # get generator value w/n unit interval tUnit = unit.limit(self.getTextDynamic('fillGenerator', tCurrent)) tCurrent = unit.denorm(tUnit, tStartSeg, tEndSeg) pathPos = self.clockFindPos(tCurrent) # get pos for current time if pathPos == None: raise ValueError, 'tCurrent out of all time ranges' #print _MOD, 'pp, tc', pathPos, tCurrent #print _MOD, 'tss, tes', tStartSeg, tEndSeg # need to determin path position based on time point of event chordCurrent = self.getPitchGroup(pathPos) multisetCurrent = self.getMultiset(pathPos) # create a generator to get pitches from chord as index values selectorChordPos = basePmtr.Selector(range(0,len(chordCurrent)), textPitchSelectorControl) # choose pc from chord ps = chordCurrent[selectorChordPos()] # get position w/n chord # a if the division of path dfs is w/n a single segment # either side of the path may occur more than once. # perhaps pre calculate and store in a list? if textFieldLevel == 'event': # every event transCurrent = self.getField(tCurrent) # choose PITCHFIELD elif textFieldLevel == 'set': transCurrent = fieldValBuf[pathPos] # choose PITCHFIELD if textOctaveLevel == 'event': octCurrent = self.getOct(tCurrent) # choose OCTAVE elif textOctaveLevel == 'set': octCurrent = octValBuf[pathPos] # choose OCTAVE #print _MOD, 'pathPos, oct, t', pathPos, octCurrent, tCurrent psReal = pitchTools.psToTempered(ps, octCurrent, self.temperamentObj, transCurrent) self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, psReal) bpm, pulse, dur, sus, acc = self.getRhythm(tCurrent) if acc == 0 and not self.silenceMode: # this is a rest tCurrent = tCurrent + dur continue amp = self.getAmp(tCurrent) * acc # choose amp, pan pan = self.getPan(tCurrent) auxiliary = self.getAux(tCurrent) # chooose AUX, pack into list eventDict = self.makeEvent(tCurrent, bpm, pulse, dur, sus, acc, amp, psReal, pan, auxiliary) self.storeEvent(eventDict) # tCurrent = tCurrent + dur # move clocks forward by dur unit # self.clockForward() # advances path positon return 1
def _scoreMain(self): """creates score >>> from athenaCL.libATH.libTM import texture >>> ti = texture.factory('lg') >>> ti.tmName == 'LineGroove' True >>> ti.loadDefault() >>> ti.score() == True True """ # texture-wide time elements inst = self.getInst() tStart, tEnd = self.getTimeRange() tCurrent = tStart # texture-wide (self.textQ amd)options # used for optional parallel voices textParallelVoiceList = self.getTextStatic("pml", "transpositionList") textParallelDelayTime = self.getTextStatic("pml", "timeDelay") # get field, octave selection method value textFieldLevel = self.getTextStatic("lfm", "level") textOctaveLevel = self.getTextStatic("lom", "level") textPitchSelectorControl = self.getTextStatic("psc", "selectionString") # create a list of chords from the appropriate pitch mode for pathPos in self.getPathPos(): chordCurrent = self.getPitchGroup(pathPos) multisetCurrent = self.getMultiset(pathPos) tStartSet, tEndSet = self.clockPoints() selectorChordPos = basePmtr.Selector(range(len(chordCurrent)), textPitchSelectorControl) tStartSetReal = copy.deepcopy(tCurrent) self.stateUpdate(tCurrent, chordCurrent, None, multisetCurrent, None, None) if textFieldLevel == "set": transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == "set": octCurrent = self.getOct(tCurrent) # choose OCTAVE while 1: # pitch in chord if tCurrent >= tEndSet: break # choose pc from chord ps = chordCurrent[selectorChordPos()] # get position w/n chord self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, None) if textFieldLevel == "event": transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == "event": octCurrent = self.getOct(tCurrent) # choose OCTAVE psReal = pitchTools.psToTempered(ps, octCurrent, self.temperamentObj, transCurrent) self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, psReal) bpm, pulse, dur, sus, acc = self.getRhythm(tCurrent) if acc == 0 and not self.silenceMode: # this is a rest tCurrent = tCurrent + dur continue amp = self.getAmp(tCurrent) * acc # choose amp, pan pan = self.getPan(tCurrent) auxiliary = self.getAux(tCurrent) # chooose AUX, pack into list eventDict = self.makeEvent(tCurrent, bpm, pulse, dur, sus, acc, amp, psReal, pan, auxiliary) self.storeEvent(eventDict) # Parallel transposition offset = 0 for parallelVoice in textParallelVoiceList: # offset to avoid amp problems, correct error w/ offset tCurrent = tCurrent + textParallelDelayTime offset = offset + textParallelDelayTime psText = pitchTools.psTransposer(psReal, parallelVoice) eventDict = self.makeEvent(tCurrent, bpm, pulse, dur, sus, acc, amp, psText, pan, auxiliary) self.storeEvent(eventDict) # move clocks forward by dur unit tCurrent = (tCurrent + dur) - offset self.clockForward() # advances path positon return 1
def _scoreMain(self): """creates score note: octave choose for every note >>> from athenaCL.libATH.libTM import texture >>> ti = texture.factory('TimeSegment') >>> ti.tmName == 'TimeSegment' True >>> ti.loadDefault() >>> ti.score() == True True """ # texture-wide time elements inst = self.getInst() # needed for preliminary parameter values # tStart, tEnd = self.getTimeRange() # tCurrent = tStart # get field, octave selection method value textPitchSelectorControl = self.getTextStatic('psc', 'selectionString') textFieldLevel = self.getTextStatic('lfm', 'level') textOctaveLevel = self.getTextStatic('lom', 'level') # this is level tt event count numbers are applied (not count itself) textLevelEventCount = self.getTextStatic('lec', 'level') textTotalSegmentCount = self.getTextStatic('tsc', 'count') segmentManifest = [] # store [count, segWeight, start, end] # process segments first, determine events per segemtn tSeg = 0 if textLevelEventCount == 'segment': # event count per segement for q in range(textTotalSegmentCount): eventCount = self.getTextDynamic('eventCountGenerator', tSeg) segmentManifest.append([int(round(eventCount)) ]) # store as list tSeg = tSeg + 1 elif textLevelEventCount == 'count': # event count is total # get one value and divide eventCount = self.getTextDynamic('eventCountGenerator', tSeg) segEventCount = int(round((eventCount / textTotalSegmentCount))) if segEventCount <= 0: segEventCount = 1 # force minimum per seg for q in range(textTotalSegmentCount): segmentManifest.append([segEventCount]) # store as list #print _MOD, 'levelEventCount', textLevelEventCount #print _MOD, 'textTotalSegmentCount', textTotalSegmentCount #print _MOD, 'segmentManifest', segmentManifest # get total duration tStart, tEnd = self.getTimeRange() tDurSpan = tEnd - tStart # not final dur, but time start span # get segment proportions tSeg = 0 # segment count as event step size segmentWidth = [] # store widths before getting scaled size for q in range(textTotalSegmentCount): # what if segment widht is zero? val = self.getTextDynamic('segmentWidthGenerator', tSeg) if val <= 0: pass # continue or warn? segmentWidth.append(val) tSeg = tSeg + 1 # transfrom segment width into a collection of boundaries #print _MOD, 'segmentWidth', segmentWidth segmentBounds = unit.unitBoundaryProportion(segmentWidth) for q in range(textTotalSegmentCount): s, m, e = segmentBounds[q] segmentManifest[q].append(s * tDurSpan) segmentManifest[q].append(e * tDurSpan) #print _MOD, 'segmentWidth', segmentManifest # get texture start time as init time tCurrent = tStart # defined abovie # if field/oct vals are taken once per set, pre calculate and store # in a list; access from this list with pathPos index fieldValBuf = [] if textFieldLevel == 'set': for q in range(self.getPathLen()): s, e = self.clockPoints(q) # use path df start time fieldValBuf.append(self.getField(s)) octValBuf = [] if textOctaveLevel == 'set': for q in range(self.getPathLen()): s, e = self.clockPoints(q) octValBuf.append(self.getOct(s)) # iterate through segments in order for segPos in range(textTotalSegmentCount): segEventCount = segmentManifest[segPos][ 0] # count is first in list tStartSeg = segmentManifest[segPos][1] tEndSeg = segmentManifest[segPos][2] # create events for thsi segment #print _MOD, 'segPos', segPos for i in range(segEventCount): # # get generator value w/n unit interval tUnit = unit.limit( self.getTextDynamic('fillGenerator', tCurrent)) tCurrent = unit.denorm(tUnit, tStartSeg, tEndSeg) pathPos = self.clockFindPos( tCurrent) # get pos for current time if pathPos == None: raise ValueError, 'tCurrent out of all time ranges' #print _MOD, 'pp, tc', pathPos, tCurrent #print _MOD, 'tss, tes', tStartSeg, tEndSeg # need to determin path position based on time point of event chordCurrent = self.getPitchGroup(pathPos) multisetCurrent = self.getMultiset(pathPos) # create a generator to get pitches from chord as index values selectorChordPos = basePmtr.Selector( range(0, len(chordCurrent)), textPitchSelectorControl) # choose pc from chord ps = chordCurrent[selectorChordPos()] # get position w/n chord # a if the division of path dfs is w/n a single segment # either side of the path may occur more than once. # perhaps pre calculate and store in a list? if textFieldLevel == 'event': # every event transCurrent = self.getField(tCurrent) # choose PITCHFIELD elif textFieldLevel == 'set': transCurrent = fieldValBuf[pathPos] # choose PITCHFIELD if textOctaveLevel == 'event': octCurrent = self.getOct(tCurrent) # choose OCTAVE elif textOctaveLevel == 'set': octCurrent = octValBuf[pathPos] # choose OCTAVE #print _MOD, 'pathPos, oct, t', pathPos, octCurrent, tCurrent psReal = pitchTools.psToTempered(ps, octCurrent, self.temperamentObj, transCurrent) self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, psReal) bpm, pulse, dur, sus, acc = self.getRhythm(tCurrent) if acc == 0 and not self.silenceMode: # this is a rest tCurrent = tCurrent + dur continue amp = self.getAmp(tCurrent) * acc # choose amp, pan pan = self.getPan(tCurrent) auxiliary = self.getAux( tCurrent) # chooose AUX, pack into list eventDict = self.makeEvent(tCurrent, bpm, pulse, dur, sus, acc, amp, psReal, pan, auxiliary) self.storeEvent(eventDict) # tCurrent = tCurrent + dur # move clocks forward by dur unit # self.clockForward() # advances path positon return 1
def _scoreMain(self): """creates score >>> from athenaCL.libATH.libTM import texture >>> ti = texture.factory('InterpolateFill') >>> ti.tmName == 'InterpolateFill' True >>> ti.loadDefault() >>> ti.score() == True True """ # texture-wide time elements inst = self.getInst() tStart, tEnd = self.getTimeRange() tCurrent = tStart # get field, octave selection method value textFieldLevel = self.getTextStatic('lfm', 'level') textOctaveLevel = self.getTextStatic('lom', 'level') textPitchSelectorControl = self.getTextStatic('psc', 'selectionString') textEventCount = self.getTextStatic('tec', 'count') textEventPartition = self.getTextStatic('lep', 'level') textDensityPartition = self.getTextStatic('edp', 'level') textInterpolationMethodControl = self.getTextStatic('imc', 'method') textLevelFrameDuration = self.getTextStatic('lfd', 'level') textParameterInterpolationControl = self.getTextStatic('pic', 'onOff') textSnapSustainTime = self.getTextStatic('sst', 'onOff') # cannot snap event time in this context #textSnapEventTime = self.getTextStatic('set', 'onOff') if textDensityPartition == 'set': # get a list of values pLen = self.getPathLen() eventPerSet = [int(round(textEventCount / pLen))] * pLen else: # duration fraction scalars = self.getPathDurationPercent() eventPerSet = [int(round(x*textEventCount)) for x in scalars] eventIndex = 0 # a list of frame data: tStart, dur, eventFlag, interpMethod,interpExponet tFrameArray = [] # create a list of chords from the appropriate pitch mode for pathPos in self.getPathPos(): chordCurrent = self.getPitchGroup(pathPos) multisetCurrent = self.getMultiset(pathPos) # start and end of this set is in real-time, not local to path # if not by set, boundaries here are always tt of entire texture if textEventPartition == 'set': tStartSet, tEndSet = self.clockPoints() # value relative to start else: # its a path based, treat tiem as one set tStartSet, tEndSet = self.getTimeRange() # value relative to start # create a generator to get pitches from chord as index values selectorChordPos = basePmtr.Selector(range(len(chordCurrent)), textPitchSelectorControl) # real set start is always the formal start time here tCurrent = copy.deepcopy(tStartSet) tStartSetReal = copy.deepcopy(tStartSet) self.stateUpdate(tCurrent, chordCurrent, None, multisetCurrent, None, None) if textFieldLevel == 'set': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'set': octCurrent = self.getOct(tCurrent) # choose OCTAVE # get event count from list of eventPerSet list by pathPos for i in range(eventPerSet[pathPos]): # pitch in chord eventIndex = eventIndex + 1 # cumulative count # even when rounded, dont exceed maximum; last set may have less if eventIndex > textEventCount: break # tCurrent here is assumed as start of set initiall, although # this is not exactly correct tUnit = unit.limit(self.getTextDynamic('fillGenerator', tCurrent)) tCurrent = unit.denorm(tUnit, tStartSet, tEndSet) # choose pc from chord ps = chordCurrent[selectorChordPos()] # get position w/n chord self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, None) if textFieldLevel == 'event': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'event': octCurrent = self.getOct(tCurrent) # choose OCTAVE psReal = pitchTools.psToTempered(ps, octCurrent, self.temperamentObj, transCurrent) self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, psReal) bpm, pulse, dur, sus, acc = self.getRhythm(tCurrent) # silence mode has to be ignored amp = self.getAmp(tCurrent) * acc # choose amp, pan pan = self.getPan(tCurrent) auxiliary = self.getAux(tCurrent) # chooose AUX, pack into list eventDict = self.makeEvent(tCurrent, bpm, pulse, dur, sus, acc, amp, psReal, pan, auxiliary) self.storeEvent(eventDict) # tCurrent = tCurrent + dur # move clocks forward by dur unit # always store event time in array, w/ interp type and exponent tFrameArray.append((tCurrent, dur, 1, textInterpolationMethodControl, self.getTextDynamic('exponent', tCurrent))) # tFrame = copy.deepcopy(tCurrent) self.clockForward() # advances path positon # sort frames and events; should both be the same size and in order self.esObj.sort() tFrameArray.sort() # sort stored events # process frame start times # store first event, as well as interp exponet if needed # tFrame is set to tCurrent tFrameArrayPost = [] for i in range(len(tFrameArray)-1): # dont do last event eventDict = self.esObj[i] tCurrent = eventDict['time'] tFrame = copy.deepcopy(tCurrent) # get relative duration to next event durRel = self.esObj[i+1]['time'] - eventDict['time'] # transfer old tFrame to new tFrameArrayPost.append(tFrameArray[i]) if textLevelFrameDuration == 'event': # one frame dur / event frameDur = self.getTextDynamic('frameDuration', tCurrent) if frameDur < durRel: # can eval in loop b/c frameDur is constant while (tFrame + frameDur) < (tCurrent + durRel): tFrame = tFrame + frameDur tFrameArrayPost.append((tFrame, frameDur, 0)) # frame updates / frame elif textLevelFrameDuration == 'frame': while 1: # must calc frameDur to see if it is over e next event frameDur = self.getTextDynamic('frameDuration', tFrame) if (tFrame + frameDur) > (tCurrent + durRel): break # cannot fit another frame w/o passing next event tFrame = tFrame + frameDur tFrameArrayPost.append((tFrame, frameDur, 0)) # cannot snap event time here; woudl require repositioning # next event # restore the last tFrame to the new tFrame tFrameArrayPost.append(tFrameArray[-1]) # configure which parameters, in EventSequence object, are interpolated if textParameterInterpolationControl == 'on': active = ['time', 'acc', 'bpm', 'amp', 'ps', 'pan', 'aux'] # elif textParameterInterpolationControl == 'off': active = ['time', 'bpm'] # interpolate events self.interpolate(tFrameArrayPost, textSnapSustainTime, active) return 1
def _scoreMain(self): """creates score >>> from athenaCL.libATH.libTM import texture >>> ti = texture.factory('DroneArticulate') >>> ti.tmName == 'DroneArticulate' True >>> ti.loadDefault() >>> ti.score() == True True """ # texture-wide PATH/PITCH elements # texture-wide time elements inst = self.getInst() tStart, tEnd = self.getTimeRange() tCurrent = tStart tCumulative = copy.deepcopy(tStart) # store fake time for pmtr gen #texture-wide TEXTURE (self.textQ amd)options #used for optional parallel voices textMaxTimeOffset = self.getTextStatic('mto', 'time') # get field, octave selection method value textFieldLevel = self.getTextStatic('lfm', 'level') textOctaveLevel = self.getTextStatic('lom', 'level') # create range of offsets to dray from # scale base by distribution from -1 to # this gives a range from -1 to 1 self.gaussPmtrObj = parameter.factory(('randomGauss', .5, .1, -1, 1)) # create a list of chords from the appropriate pitch mode for pathPos in self.getPathPos(): chordCurrent = self.getPitchGroup(pathPos) multisetCurrent = self.getMultiset(pathPos) tStartSet, tEndSet = self.clockPoints() chordLength = len(chordCurrent) muteSet = 'off' #not sure what this was used for #pcTest = 'noNote' # set one note found to filter out rests tStartSetReal = copy.deepcopy(tCurrent) self.stateUpdate(tCurrent, chordCurrent, None, multisetCurrent, None, None) if textFieldLevel == 'set': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'set': octCurrent = self.getOct(tCurrent) # choose OCTAVE # create a clock for each voice tVoice = [] for x in range(0, len(chordCurrent)): tVoice.append(copy.deepcopy(tCurrent)) i = 0 # voice count for ps in chordCurrent: # psReal values in chord self.stateUpdate(tVoice[i], chordCurrent, ps, multisetCurrent, None, None) while tVoice[i] < tEndSet: self.stateUpdate(tVoice[i], chordCurrent, ps, multisetCurrent, None, None) bpm, pulse, dur, sus, acc = self.getRhythm(tCumulative) if acc == 0 and not self.silenceMode: # this is a rest tVoice[i] = tVoice[i] + dur tCumulative = tCumulative + dur continue if textFieldLevel == 'event': transCurrent = self.getField( tCumulative) # choose PITCHFIELD if textOctaveLevel == 'event': octCurrent = self.getOct(tCumulative) # choose OCTAVE psReal = pitchTools.psToTempered(ps, octCurrent, self.temperamentObj, transCurrent) self.stateUpdate(tVoice[i], chordCurrent, ps, multisetCurrent, None, psReal) amp = self.getAmp(tCumulative) * acc pan = self.getPan(tCumulative) auxiliary = self.getAux(tCumulative) # offset value is b/n -textMaxOffset, 0, and +textMaxOffset offset = self.gaussPmtrObj(0.0) * textMaxTimeOffset tVoice[i] = tVoice[i] + offset tCumulative = tCumulative + offset if tVoice[i] < 0: # cant start before 0 tVoice[i] = tStartSetReal # reset eventDict = self.makeEvent(tVoice[i], bpm, pulse, dur, sus, acc, amp, psReal, pan, auxiliary) self.storeEvent(eventDict) tVoice[i] = tVoice[i] + dur tCumulative = tCumulative + dur i = i + 1 # increment voice count # find longest voice tMaxVoice = 0 for i in range(0, len(chordCurrent)): if tVoice[i] >= tMaxVoice: tMaxVoice = tVoice[i] # move clocks forward by dur unit tCurrent = tMaxVoice # new current is at max length # do not chang tCumulative, is is already expanding self.clockForward() # advances path positon return 1
def create(self, refDict, presetName='trill', ornPos='release', pitchLang='path', microTone=.5): """ psBase is a pitch space integer, or not, depending on path form """ self.refDict = refDict tCurrent = refDict['stateCurrentTime'] psBaseRaw = refDict['stateCurrentPitchRaw'] # this is psBaseInt, no temper presetDict = self._getOrnLibrary(presetName, ornPos, pitchLang, microTone) # load parameters contourForm = presetDict['contourForm'] ornStyle = presetDict['ornStyle'] ornPos = presetDict['ornPos'] pitchLang = presetDict['pitchLang'] ornNotePcent = presetDict['ornNotePcent'] durOrnGoal = presetDict['durOrnGoal'] durInstPcentOffset = presetDict['durInstPcentOffset'] microTone = presetDict['microTone'] ampScalerMedian = presetDict['ampScalerMedian'] ampInstPcentOffset = presetDict['ampInstPcentOffset'] # from parameter objects inst = self.pmtrObjDict['inst'].currentValue pan = self.pmtrObjDict['panQ'].currentValue amp = self.pmtrObjDict['ampQ'].currentValue octCurrent = self.pmtrObjDict['octQ'].currentValue transCurrent = self.pmtrObjDict['fieldQ'].currentValue pulseObj = self.pmtrObjDict['rhythmQ'].currentPulse baseRhythmTuple = pulseObj.get('triple') currentBeatTime = self.pmtrObjDict['rhythmQ'].bpm # duration of base rhyth, first of triple rhythmBase = self.pmtrObjDict['rhythmQ'].currentValue[0] # gets time in ms # calculat a tempered, transposed position # this value may be a float psBase = pitchTools.psToTempered(psBaseRaw, octCurrent, self.temperamentObj, transCurrent) # translate contourForm into PCH list of appropriate pitches #print _MOD, pitchLang, contourForm, psBase, microTone # psBase here needs to an int, in pitch space, that is somewhere on # the path. psCountourReference psContourReference, refScales = self.psScale(pitchLang, contourForm, psBaseRaw, microTone) # estimated, this will chang once rhythms meausred estOrnDurFraction = rhythmBase * ornNotePcent # find number of notes, and actual duartion of all oraments totOrnDur, durList = self._getOrnDurStyle(presetDict, estOrnDurFraction) # time of ornament, time of base # get timings posTimes = self._setOrnPos(ornPos, rhythmBase, tCurrent, totOrnDur) durBase, tBaseStart, tBaseEnd, tOrnStart, tOrnEnd = posTimes # make notes tLocalCurrent = copy.deepcopy(tCurrent) ampOrnament = amp * ampScalerMedian # get amp base value eventList = [] # event list does not store compelete events # make base note: if durBase > .0001:# if very short, ommit this note (value in seconds) abortOrnament = 0 else: abortOrnament = 1 if not abortOrnament: baseNoteEvent = self._makeEventDict(tBaseStart, durBase, amp, psBase, pan) else: # abort ornament, user tempered pitch print lang.WARN, "ornamants aborted" baseNoteEvent = self._makeEventDict(tCurrent, rhythmBase, amp, psBase, pan) eventList.append(baseNoteEvent) return eventList # build ornament from ornament start time tLocalCurrent = tOrnStart # clock may move backwards scalePosition = 0 # index ot contourForm and durList durPosition = 0 durBufferSpace = .003 # 3 ms gap to avoid clips while 1: #print scalePosition, tLocalCurrent, durList[durPosition], tOrnEnd ampInstance = self._addAmpNoise(ampOrnament, ampInstPcentOffset) # indices hold ps, tempered version of of contourForm psCurrent = psContourReference[scalePosition] durInst = durList[durPosition] durFake = durInst - durBufferSpace # create shorter dur for b/n notes eventDict = self._makeEventDict(tLocalCurrent, durFake, ampInstance, psCurrent, pan) # not complete event eventList.append(eventDict) tLocalCurrent = tLocalCurrent + durInst # append actual # iterators scalePosition = scalePosition + 1 if scalePosition == len(psContourReference): scalePosition = 0 durPosition = durPosition + 1 if durPosition == len(durList): durPosition = 0 # exits if ornStyle == 'loop': if tLocalCurrent >= tOrnEnd: break if tLocalCurrent + durList[durPosition] >= tOrnEnd: break # dont want trill that spills over else: # ornaments that are 'single' or 'scale' if durPosition == 0: # its gone through once already break # only have enough durs for each ornament # resort notes so this looks better if ornPos == 'release': eventList.insert(0, baseNoteEvent) # insert at beginning else: # place base note after ornament eventList.append(baseNoteEvent) # add at end return eventList
def _scoreMain(self): """creates score >>> from athenaCL.libATH.libTM import texture >>> ti = texture.factory('LiteralHorizontal') >>> ti.tmName == 'LiteralHorizontal' True >>> ti.loadDefault() >>> ti.score() == True True """ # texture-wide PATH/PITCH elements # texture-wide time elements inst = self.getInst() tStart, tEnd = self.getTimeRange() tCurrent = tStart #texture-wide TEXTURE (self.textQ amd)options #used for optional parallel voices textRepeatToggle = self.getTextStatic('lws', 'onOff') # get field, octave selection method value textFieldLevel = self.getTextStatic('lfm', 'level') textOctaveLevel = self.getTextStatic('lom', 'level') # create a list of chords from the appropriate pitch mode for pathPos in self.getPathPos(): chordCurrent = self.getPitchGroup(pathPos) multisetCurrent = self.getMultiset(pathPos) tStartSet, tEndSet = self.clockPoints() chordLength = len(chordCurrent) chordIndex = 0 muteSet = 'off' pcTest = [] tStartSetReal = copy.deepcopy(tCurrent) self.stateUpdate(tCurrent, chordCurrent, None, multisetCurrent, None, None) if textFieldLevel == 'set': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'set': octCurrent = self.getOct(tCurrent) # choose OCTAVE while 1: # PITCH in CHORD if tCurrent >= tEndSet: break ps = chordCurrent[chordIndex] # choose PC from CHORD pcTest.append(ps) chordIndex = chordIndex + 1 # shift to next pitch if chordIndex >= chordLength: chordIndex = 0 self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, None) if textFieldLevel == 'event': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'event': octCurrent = self.getOct(tCurrent) # choose OCTAVE psReal = pitchTools.psToTempered(ps, octCurrent, self.temperamentObj, transCurrent) self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, psReal) bpm, pulse, dur, sus, acc = self.getRhythm(tCurrent) # choose RHYTHM if muteSet == 'on': # make all rests acc = 0 # everything is a rest after chord inex reset if acc == 0 and not self.silenceMode: # this is a rest chordIndex = chordIndex - 1 # dont count this note if a rest pcTest = pcTest[:-1] # drop off last addition tCurrent = tCurrent + dur continue amp = self.getAmp(tCurrent) * acc # choose AMP, PAN pan = self.getPan(tCurrent) auxiliary = self.getAux(tCurrent) # chooose AUX, pack into list eventDict = self.makeEvent(tCurrent, bpm, pulse, dur, sus, acc, amp, psReal, pan, auxiliary) self.storeEvent(eventDict) # turn of further notes if all gotten if textRepeatToggle == 'off' and len(pcTest) >= chordLength: muteSet = 'on' # move clocks forward by dur unit tCurrent = tCurrent + dur # return value to check for errors self.clockForward() # advances path positon return 1
def psScale(self, pitchFormat, contourForm, psBase, microTone=.5): """translates a scale form in notation [0,-1,0,1] into various pitch representations. integers in the scale form are interpreted in three ways: as chromatic 1/2 steps, as diatonic scale pitches (either from the local set or the entire path), or as units of some microtonal size this returns a list representing the scale steps in relation to psBase psBase needs to be found in terms of the path, which may not consist only of ints returns a psCountourReference, which is alway tempered pitch values """ # not sure this needs to be an int #assert drawer.isInt(psBase) octCurrent = self.pmtrObjDict['octQ'].currentValue transCurrent = self.pmtrObjDict['fieldQ'].currentValue #currentChord = self.stateCurrentChord # not needed for all forms, but myst always get if pitchFormat == 'set': pitchGroup = self.refDict['stateCurrentChord'] elif pitchFormat == 'path': pitchGroup = self.refDict['statePathList'] else: # non given, but note used pitchGroup = self.refDict['stateCurrentChord'] refScales, lowerUpper = extractNeighbors(pitchGroup, psBase) pcContourDict = mapNeighbors(refScales, psBase, contourForm) #print pcContourDict # get pitch scale # this has the mapping with the appropriate pitches # N.B: danger here of getting mistransposed values # previously was an error and corrected in _splitPch psContourRef = [] if pitchFormat == 'chromatic': for entry in contourForm: # transpose before getting temperament pcSpace = pitchTools.psTransposer(psBase, entry) psReal = pitchTools.psToTempered(pcSpace, octCurrent, self.temperamentObj, transCurrent) psContourRef.append(psReal) # transpose by half steps # sets: derive scale from set elif pitchFormat == 'set' or pitchFormat == 'path': for entry in contourForm: pcSpace = pcContourDict[entry] # scale step is a key, gets pcSpace psReal = pitchTools.psToTempered(pcSpace, octCurrent, self.temperamentObj, transCurrent) psContourRef.append(psReal) # transpose by half steps elif pitchFormat == 'microtone': # microtonal for entry in contourForm: # treat scale step as microtone scaler # must do transposition after converting to PCH if entry * microTone > entry * 2: environment.printWarn([lang.WARN, 'microtone large (%s)' % (entry * microTone)]) trans = (transCurrent + (entry * microTone)) psReal = pitchTools.psToTempered(psBase, octCurrent, self.temperamentObj, trans) psContourRef.append(psReal) # transpose by half steps else: raise ValueError, 'no such pitchFormat' # this now returns psReals, not pch values return psContourRef, refScales
def _scoreMain(self): """creates score >>> from athenaCL.libATH.libTM import texture >>> ti = texture.factory('LiteralVertical') >>> ti.tmName == 'LiteralVertical' True >>> ti.loadDefault() >>> ti.score() == True True """ # texture-wide PATH/PITCH elements # texture-wide time elements inst = self.getInst() tStart, tEnd = self.getTimeRange() tCurrent = tStart #texture-wide TEXTURE (self.textQ amd)options #used for optional parallel voices textRepeatToggle = self.getTextStatic('lws', 'onOff') textMaxTimeOffset = self.getTextStatic('mto', 'time') # get field, octave selection method value textFieldLevel = self.getTextStatic('lfp', 'level') textOctaveLevel = self.getTextStatic('lop', 'level') # when off, the number of sets in the path completely # determines the number of events in the texture pathDurationFraction = self.getTextStatic('pdf', 'onOff') # create range of offsets to draw from # scale base by distribution from -1 to # this gives a range from -1 to 1 self.gaussPmtrObj = parameter.factory(('randomGauss', .5, .1, -1, 1)) # used below now # create a list of chords from the appropriate pitch mode for pathPos in self.getPathPos(): chordCurrent = self.getPitchGroup(pathPos) multisetCurrent = self.getMultiset(pathPos) tStartSet, tEndSet = self.clockPoints() chordLength = len(chordCurrent) muteSet = 'off' pcTest = 'noNote' # set one note found to filter out rests tStartSetReal = copy.deepcopy(tCurrent) self.stateUpdate(tCurrent, chordCurrent, None, multisetCurrent, None, None) if textFieldLevel == 'set': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'set': octCurrent = self.getOct(tCurrent) # choose OCTAVE while 1: # dur of each path # to do one chord per event this needs to be controlled # not by time per set, but simply by if a single chord # has been executed if pathDurationFraction == 'off': if tCurrent > tStartSetReal: break else: # sustain entire path over desired dur fraction if tCurrent >= tEndSet: break # no ps yet found, give as None, get default self.stateUpdate(tCurrent, chordCurrent, None, multisetCurrent, None, None) if textFieldLevel == 'event': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'event': octCurrent = self.getOct(tCurrent) # choose OCTAVE # choose RHYTHM bpm, pulse, dur, sus, acc = self.getRhythm(tCurrent) if muteSet == 'on': # make all rests acc = 0 # everything is a rest after chord inex reset if acc != 0: # this is a note pcTest = 'noteFound' #only set first time note found if acc == 0 and not self.silenceMode: # this is a rest tCurrent = tCurrent + dur continue # amp and pan per chord, not voice amp = self.getAmp(tCurrent) * acc pan = self.getPan(tCurrent) tThisChord = copy.deepcopy(tCurrent) for ps in chordCurrent: self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, None) if textFieldLevel == 'voice': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'voice': octCurrent = self.getOct(tCurrent) # choose OCTAVE psReal = pitchTools.psToTempered(ps, octCurrent, self.temperamentObj, transCurrent) self.stateUpdate(tCurrent, chordCurrent, None, multisetCurrent, None, psReal) # aux per voice, post psReal definition auxiliary = self.getAux(tCurrent) # offset value is between -textMaxOffset, 0, and +textMaxOffset offset = self.gaussPmtrObj(0.0) * textMaxTimeOffset tCurrent = tCurrent + offset if tCurrent < 0: # cant start before 0 tCurrent = tThisChord # reset eventDict = self.makeEvent(tCurrent, bpm, pulse, dur, sus, acc, amp, psReal, pan, auxiliary) self.storeEvent(eventDict) # restore time to tCurrent before processing offset again tCurrent = tThisChord # turn of further notes if all gotten if textRepeatToggle == 'off' and pcTest == 'noteFound': muteSet = 'on' # move clocks forward by dur unit tCurrent = tCurrent + dur self.clockForward() # advances path positon return 1
def _scoreMain(self): """creates score >>> from athenaCL.libATH.libTM import texture >>> ti = texture.factory('InterpolateLine') >>> ti.tmName == 'InterpolateLine' True >>> ti.loadDefault() >>> ti.score() == True True """ # texture-wide time elements inst = self.getInst() tStart, tEnd = self.getTimeRange() tCurrent = tStart # get field, octave selection method value textFieldLevel = self.getTextStatic('lfm', 'level') textOctaveLevel = self.getTextStatic('lom', 'level') textPitchSelectorControl = self.getTextStatic('psc', 'selectionString') textInterpolationMethodControl = self.getTextStatic('imc', 'method') textLevelFrameDuration = self.getTextStatic('lfd', 'level') textParameterInterpolationControl = self.getTextStatic('pic', 'onOff') textSnapSustainTime = self.getTextStatic('sst', 'onOff') textSnapEventTime = self.getTextStatic('set', 'onOff') # a list of frame data: tStart, dur, eventFlag, interpMethod,interpExponet tFrameArray = [] # create a list of chords from the appropriate pitch mode for pathPos in self.getPathPos(): chordCurrent = self.getPitchGroup(pathPos) multisetCurrent = self.getMultiset(pathPos) tStartSet, tEndSet = self.clockPoints() selectorChordPos = basePmtr.Selector(range(len(chordCurrent)), textPitchSelectorControl) tStartSetReal = copy.deepcopy(tCurrent) self.stateUpdate(tCurrent, chordCurrent, None, multisetCurrent, None, None) if textFieldLevel == 'set': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'set': octCurrent = self.getOct(tCurrent) # choose OCTAVE while 1: # pitch in chord if tCurrent >= tEndSet: break # choose pc from chord ps = chordCurrent[selectorChordPos()] # get position w/n chord self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, None) if textFieldLevel == 'event': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'event': octCurrent = self.getOct(tCurrent) # choose OCTAVE psReal = pitchTools.psToTempered(ps, octCurrent, self.temperamentObj, transCurrent) self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, psReal) bpm, pulse, dur, sus, acc = self.getRhythm(tCurrent) # silence mode has to be ignored amp = self.getAmp(tCurrent) * acc # choose amp, pan pan = self.getPan(tCurrent) auxiliary = self.getAux(tCurrent) # chooose AUX, pack into list eventDict = self.makeEvent(tCurrent, bpm, pulse, dur, sus, acc, amp, psReal, pan, auxiliary) # need to store events in order to do proper post-event processing self.storeEvent(eventDict) # always store event time in array, w/ interp type and exponent tFrameArray.append((tCurrent, dur, 1, textInterpolationMethodControl, self.getTextDynamic('exponent', tCurrent))) tFrame = copy.deepcopy(tCurrent) # check if this is the last event of entire texture # if so, do not calculate frames if tCurrent + dur > tEnd: tCurrent = tCurrent + dur break # process frame start times # store first event, as well as interp exponet if needed # tFrame is set to tCurrent else: if textLevelFrameDuration == 'event': # one frame dur / event frameDur = self.getTextDynamic('frameDuration', tCurrent) if frameDur < dur: # can eval in loop b/c frameDur is constant while (tFrame + frameDur) < (tCurrent + dur): tFrame = tFrame + frameDur tFrameArray.append((tFrame, frameDur, 0)) # frame updates / frame elif textLevelFrameDuration == 'frame': while 1: # must calc frameDur to see if it is over e next event frameDur = self.getTextDynamic('frameDuration', tFrame) if (tFrame + frameDur) > (tCurrent + dur): break # cannot fit another frame w/o passing next event tFrame = tFrame + frameDur tFrameArray.append((tFrame, frameDur, 0)) # update current time after fram processing if textSnapEventTime == 'on': # always use existing frame dur for both event and frame proc # add to last set frame time tCurrent = tFrame + frameDur else: tCurrent = tCurrent + dur # advances path positon self.clockForward() # configure which parameters, in EventSequence object, are interpolated if textParameterInterpolationControl == 'on': active = ['time', 'acc', 'bpm', 'amp', 'ps', 'pan', 'aux'] # elif textParameterInterpolationControl == 'off': active = ['time', 'bpm'] # interpolate events self.interpolate(tFrameArray, textSnapSustainTime, active) return 1
def _scoreMain(self): """creates score >>> from athenaCL.libATH.libTM import texture >>> ti = texture.factory('InterpolateLine') >>> ti.tmName == 'InterpolateLine' True >>> ti.loadDefault() >>> ti.score() == True True """ # texture-wide time elements inst = self.getInst() tStart, tEnd = self.getTimeRange() tCurrent = tStart # get field, octave selection method value textFieldLevel = self.getTextStatic('lfm', 'level') textOctaveLevel = self.getTextStatic('lom', 'level') textPitchSelectorControl = self.getTextStatic('psc', 'selectionString') textInterpolationMethodControl = self.getTextStatic('imc', 'method') textLevelFrameDuration = self.getTextStatic('lfd', 'level') textParameterInterpolationControl = self.getTextStatic('pic', 'onOff') textSnapSustainTime = self.getTextStatic('sst', 'onOff') textSnapEventTime = self.getTextStatic('set', 'onOff') # a list of frame data: tStart, dur, eventFlag, interpMethod,interpExponet tFrameArray = [] # create a list of chords from the appropriate pitch mode for pathPos in self.getPathPos(): chordCurrent = self.getPitchGroup(pathPos) multisetCurrent = self.getMultiset(pathPos) tStartSet, tEndSet = self.clockPoints() selectorChordPos = basePmtr.Selector(range(len(chordCurrent)), textPitchSelectorControl) tStartSetReal = copy.deepcopy(tCurrent) self.stateUpdate(tCurrent, chordCurrent, None, multisetCurrent, None, None) if textFieldLevel == 'set': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'set': octCurrent = self.getOct(tCurrent) # choose OCTAVE while 1: # pitch in chord if tCurrent >= tEndSet: break # choose pc from chord ps = chordCurrent[selectorChordPos()] # get position w/n chord self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, None) if textFieldLevel == 'event': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'event': octCurrent = self.getOct(tCurrent) # choose OCTAVE psReal = pitchTools.psToTempered(ps, octCurrent, self.temperamentObj, transCurrent) self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, psReal) bpm, pulse, dur, sus, acc = self.getRhythm(tCurrent) # silence mode has to be ignored amp = self.getAmp(tCurrent) * acc # choose amp, pan pan = self.getPan(tCurrent) auxiliary = self.getAux( tCurrent) # chooose AUX, pack into list eventDict = self.makeEvent(tCurrent, bpm, pulse, dur, sus, acc, amp, psReal, pan, auxiliary) # need to store events in order to do proper post-event processing self.storeEvent(eventDict) # always store event time in array, w/ interp type and exponent tFrameArray.append( (tCurrent, dur, 1, textInterpolationMethodControl, self.getTextDynamic('exponent', tCurrent))) tFrame = copy.deepcopy(tCurrent) # check if this is the last event of entire texture # if so, do not calculate frames if tCurrent + dur > tEnd: tCurrent = tCurrent + dur break # process frame start times # store first event, as well as interp exponet if needed # tFrame is set to tCurrent else: if textLevelFrameDuration == 'event': # one frame dur / event frameDur = self.getTextDynamic('frameDuration', tCurrent) if frameDur < dur: # can eval in loop b/c frameDur is constant while (tFrame + frameDur) < (tCurrent + dur): tFrame = tFrame + frameDur tFrameArray.append((tFrame, frameDur, 0)) # frame updates / frame elif textLevelFrameDuration == 'frame': while 1: # must calc frameDur to see if it is over e next event frameDur = self.getTextDynamic( 'frameDuration', tFrame) if (tFrame + frameDur) > (tCurrent + dur): break # cannot fit another frame w/o passing next event tFrame = tFrame + frameDur tFrameArray.append((tFrame, frameDur, 0)) # update current time after fram processing if textSnapEventTime == 'on': # always use existing frame dur for both event and frame proc # add to last set frame time tCurrent = tFrame + frameDur else: tCurrent = tCurrent + dur # advances path positon self.clockForward() # configure which parameters, in EventSequence object, are interpolated if textParameterInterpolationControl == 'on': active = ['time', 'acc', 'bpm', 'amp', 'ps', 'pan', 'aux'] # elif textParameterInterpolationControl == 'off': active = ['time', 'bpm'] # interpolate events self.interpolate(tFrameArray, textSnapSustainTime, active) return 1
def _scoreMain(self): """creates score >>> from athenaCL.libATH.libTM import texture >>> ti = texture.factory('DroneArticulate') >>> ti.tmName == 'DroneArticulate' True >>> ti.loadDefault() >>> ti.score() == True True """ # texture-wide PATH/PITCH elements # texture-wide time elements inst = self.getInst() tStart, tEnd = self.getTimeRange() tCurrent = tStart tCumulative = copy.deepcopy(tStart) # store fake time for pmtr gen #texture-wide TEXTURE (self.textQ amd)options #used for optional parallel voices textMaxTimeOffset = self.getTextStatic('mto', 'time') # get field, octave selection method value textFieldLevel = self.getTextStatic('lfm', 'level') textOctaveLevel = self.getTextStatic('lom', 'level') # create range of offsets to dray from # scale base by distribution from -1 to # this gives a range from -1 to 1 self.gaussPmtrObj = parameter.factory(('randomGauss', .5, .1, -1, 1)) # create a list of chords from the appropriate pitch mode for pathPos in self.getPathPos(): chordCurrent = self.getPitchGroup(pathPos) multisetCurrent = self.getMultiset(pathPos) tStartSet, tEndSet = self.clockPoints() chordLength = len(chordCurrent) muteSet = 'off' #not sure what this was used for #pcTest = 'noNote' # set one note found to filter out rests tStartSetReal = copy.deepcopy(tCurrent) self.stateUpdate(tCurrent, chordCurrent, None, multisetCurrent, None, None) if textFieldLevel == 'set': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'set': octCurrent = self.getOct(tCurrent) # choose OCTAVE # create a clock for each voice tVoice = [] for x in range(0,len(chordCurrent)): tVoice.append(copy.deepcopy(tCurrent)) i = 0 # voice count for ps in chordCurrent: # psReal values in chord self.stateUpdate(tVoice[i], chordCurrent, ps, multisetCurrent, None, None) while tVoice[i] < tEndSet: self.stateUpdate(tVoice[i], chordCurrent, ps, multisetCurrent, None, None) bpm, pulse, dur, sus, acc = self.getRhythm(tCumulative) if acc == 0 and not self.silenceMode: # this is a rest tVoice[i] = tVoice[i] + dur tCumulative = tCumulative + dur continue if textFieldLevel == 'event': transCurrent = self.getField(tCumulative) # choose PITCHFIELD if textOctaveLevel == 'event': octCurrent = self.getOct(tCumulative) # choose OCTAVE psReal = pitchTools.psToTempered(ps, octCurrent, self.temperamentObj, transCurrent) self.stateUpdate(tVoice[i], chordCurrent, ps, multisetCurrent, None, psReal) amp = self.getAmp(tCumulative) * acc pan = self.getPan(tCumulative) auxiliary = self.getAux(tCumulative) # offset value is b/n -textMaxOffset, 0, and +textMaxOffset offset = self.gaussPmtrObj(0.0) * textMaxTimeOffset tVoice[i] = tVoice[i] + offset tCumulative = tCumulative + offset if tVoice[i] < 0: # cant start before 0 tVoice[i] = tStartSetReal # reset eventDict = self.makeEvent(tVoice[i], bpm, pulse, dur, sus, acc, amp, psReal, pan, auxiliary) self.storeEvent(eventDict) tVoice[i] = tVoice[i] + dur tCumulative = tCumulative + dur i = i + 1 # increment voice count # find longest voice tMaxVoice = 0 for i in range(0,len(chordCurrent)): if tVoice[i] >= tMaxVoice: tMaxVoice = tVoice[i] # move clocks forward by dur unit tCurrent = tMaxVoice # new current is at max length # do not chang tCumulative, is is already expanding self.clockForward() # advances path positon return 1
def _scoreMain(self): """creates score >>> from athenaCL.libATH.libTM import texture >>> ti = texture.factory('InterpolateFill') >>> ti.tmName == 'InterpolateFill' True >>> ti.loadDefault() >>> ti.score() == True True """ # texture-wide time elements inst = self.getInst() tStart, tEnd = self.getTimeRange() tCurrent = tStart # get field, octave selection method value textFieldLevel = self.getTextStatic('lfm', 'level') textOctaveLevel = self.getTextStatic('lom', 'level') textPitchSelectorControl = self.getTextStatic('psc', 'selectionString') textEventCount = self.getTextStatic('tec', 'count') textEventPartition = self.getTextStatic('lep', 'level') textDensityPartition = self.getTextStatic('edp', 'level') textInterpolationMethodControl = self.getTextStatic('imc', 'method') textLevelFrameDuration = self.getTextStatic('lfd', 'level') textParameterInterpolationControl = self.getTextStatic('pic', 'onOff') textSnapSustainTime = self.getTextStatic('sst', 'onOff') # cannot snap event time in this context #textSnapEventTime = self.getTextStatic('set', 'onOff') if textDensityPartition == 'set': # get a list of values pLen = self.getPathLen() eventPerSet = [int(round(textEventCount / pLen))] * pLen else: # duration fraction scalars = self.getPathDurationPercent() eventPerSet = [int(round(x * textEventCount)) for x in scalars] eventIndex = 0 # a list of frame data: tStart, dur, eventFlag, interpMethod,interpExponet tFrameArray = [] # create a list of chords from the appropriate pitch mode for pathPos in self.getPathPos(): chordCurrent = self.getPitchGroup(pathPos) multisetCurrent = self.getMultiset(pathPos) # start and end of this set is in real-time, not local to path # if not by set, boundaries here are always tt of entire texture if textEventPartition == 'set': tStartSet, tEndSet = self.clockPoints( ) # value relative to start else: # its a path based, treat tiem as one set tStartSet, tEndSet = self.getTimeRange( ) # value relative to start # create a generator to get pitches from chord as index values selectorChordPos = basePmtr.Selector( list(range(len(chordCurrent))), textPitchSelectorControl) # real set start is always the formal start time here tCurrent = copy.deepcopy(tStartSet) tStartSetReal = copy.deepcopy(tStartSet) self.stateUpdate(tCurrent, chordCurrent, None, multisetCurrent, None, None) if textFieldLevel == 'set': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'set': octCurrent = self.getOct(tCurrent) # choose OCTAVE # get event count from list of eventPerSet list by pathPos for i in range(eventPerSet[pathPos]): # pitch in chord eventIndex = eventIndex + 1 # cumulative count # even when rounded, dont exceed maximum; last set may have less if eventIndex > textEventCount: break # tCurrent here is assumed as start of set initiall, although # this is not exactly correct tUnit = unit.limit( self.getTextDynamic('fillGenerator', tCurrent)) tCurrent = unit.denorm(tUnit, tStartSet, tEndSet) # choose pc from chord ps = chordCurrent[selectorChordPos()] # get position w/n chord self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, None) if textFieldLevel == 'event': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'event': octCurrent = self.getOct(tCurrent) # choose OCTAVE psReal = pitchTools.psToTempered(ps, octCurrent, self.temperamentObj, transCurrent) self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, psReal) bpm, pulse, dur, sus, acc = self.getRhythm(tCurrent) # silence mode has to be ignored amp = self.getAmp(tCurrent) * acc # choose amp, pan pan = self.getPan(tCurrent) auxiliary = self.getAux( tCurrent) # chooose AUX, pack into list eventDict = self.makeEvent(tCurrent, bpm, pulse, dur, sus, acc, amp, psReal, pan, auxiliary) self.storeEvent(eventDict) # tCurrent = tCurrent + dur # move clocks forward by dur unit # always store event time in array, w/ interp type and exponent tFrameArray.append( (tCurrent, dur, 1, textInterpolationMethodControl, self.getTextDynamic('exponent', tCurrent))) # tFrame = copy.deepcopy(tCurrent) self.clockForward() # advances path positon # sort frames and events; should both be the same size and in order self.esObj.sort() tFrameArray.sort() # sort stored events # process frame start times # store first event, as well as interp exponet if needed # tFrame is set to tCurrent tFrameArrayPost = [] for i in range(len(tFrameArray) - 1): # dont do last event eventDict = self.esObj[i] tCurrent = eventDict['time'] tFrame = copy.deepcopy(tCurrent) # get relative duration to next event durRel = self.esObj[i + 1]['time'] - eventDict['time'] # transfer old tFrame to new tFrameArrayPost.append(tFrameArray[i]) if textLevelFrameDuration == 'event': # one frame dur / event frameDur = self.getTextDynamic('frameDuration', tCurrent) if frameDur < durRel: # can eval in loop b/c frameDur is constant while (tFrame + frameDur) < (tCurrent + durRel): tFrame = tFrame + frameDur tFrameArrayPost.append((tFrame, frameDur, 0)) # frame updates / frame elif textLevelFrameDuration == 'frame': while 1: # must calc frameDur to see if it is over e next event frameDur = self.getTextDynamic('frameDuration', tFrame) if (tFrame + frameDur) > (tCurrent + durRel): break # cannot fit another frame w/o passing next event tFrame = tFrame + frameDur tFrameArrayPost.append((tFrame, frameDur, 0)) # cannot snap event time here; woudl require repositioning # next event # restore the last tFrame to the new tFrame tFrameArrayPost.append(tFrameArray[-1]) # configure which parameters, in EventSequence object, are interpolated if textParameterInterpolationControl == 'on': active = ['time', 'acc', 'bpm', 'amp', 'ps', 'pan', 'aux'] # elif textParameterInterpolationControl == 'off': active = ['time', 'bpm'] # interpolate events self.interpolate(tFrameArrayPost, textSnapSustainTime, active) return 1
def _scoreMain(self): """creates score >>> from athenaCL.libATH.libTM import texture >>> ti = texture.factory('LineCluster') >>> ti.tmName == 'LineCluster' True >>> ti.loadDefault() >>> ti.score() == True True """ # texture-wide PATH/PITCH elements # texture-wide time elements inst = self.getInst() tStart, tEnd = self.getTimeRange() tCurrent = tStart # texture-wide TEXTURE (self.textQ amd)options #used for optional parallel voices textParallelVoiceList = self.getTextStatic('pml', 'transpositionList') textParallelDelayTime = self.getTextStatic('pml', 'timeDelay') # get field, octave selection method value textFieldLevel = self.getTextStatic('lfp', 'level') textOctaveLevel = self.getTextStatic('lop', 'level') textPitchSelectorControl = self.getTextStatic('psc', 'selectionString') #textNonRedundantSwitch = self.getTextStatic('nrs', 'onOff') # create a list of chords from the appropriate pitch mode for pathPos in self.getPathPos(): chordCurrent = self.getPitchGroup(pathPos) multisetCurrent = self.getMultiset(pathPos) tStartSet, tEndSet = self.clockPoints() # if textNonRedundantSwitch == 'on': selectorControl = 'randomPermutate' # else: selectorControl = 'randomChoice' selectorChordPos = basePmtr.Selector( list(range(len(chordCurrent))), textPitchSelectorControl) tStartSetReal = copy.deepcopy(tCurrent) self.stateUpdate(tCurrent, chordCurrent, None, multisetCurrent, None, None) if textFieldLevel == 'set': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'set': octCurrent = self.getOct(tCurrent) # choose OCTAVE while 1: # PITCH in CHORD if tCurrent >= tEndSet: break bpm, pulse, dur, sus, acc = self.getRhythm( tCurrent) # choose RHYTHM if acc == 0 and not self.silenceMode: # this is a rest tCurrent = tCurrent + dur continue # this ps should be used as ROOT; # this is _not_ implemented yet, however # choose PC from CHORD ps = chordCurrent[selectorChordPos()] self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, None) if textFieldLevel == 'event': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'event': octCurrent = self.getOct(tCurrent) # choose OCTAVE #subprocess psChord is a list of PCH's needed to make chord, psChord = [] for pitchSpace in chordCurrent: if textFieldLevel == 'voice': transCurrent = self.getField( tCurrent) # choose PITCHFIELD if textOctaveLevel == 'voice': octCurrent = self.getOct(tCurrent) # choose OCTAVE psReal = pitchTools.psToTempered(pitchSpace, octCurrent, self.temperamentObj, transCurrent) psChord.append(psReal) # amp and pan done for each chord, not voice amp = self.getAmp(tCurrent) * acc pan = self.getPan(tCurrent) #do this for each PCH in psChord, already transposed for psReal in psChord: self.stateUpdate(tCurrent, chordCurrent, pitchSpace, multisetCurrent, None, psReal) # choose aux for each voice auxiliary = self.getAux(tCurrent) eventDict = self.makeEvent(tCurrent, bpm, pulse, dur, sus, acc, amp, psReal, pan, auxiliary) self.storeEvent(eventDict) # parellel transposition offset = 0 for parallelVoice in textParallelVoiceList: #offset to avoid amp problems, correct error w/ offset tCurrent = tCurrent + textParallelDelayTime offset = offset + textParallelDelayTime psText = pitchTools.psTransposer(psReal, parallelVoice) eventDict = self.makeEvent(tCurrent, bpm, pulse, dur, sus, acc, amp, psText, pan, auxiliary) self.storeEvent(eventDict) #---------------------------- # move clocks forward by dur unit tCurrent = (tCurrent + dur) - offset self.clockForward() # advances path positon # return value to check for errors return 1
def _scoreMain(self): """creates score note: octave choose for every note >>> from athenaCL.libATH.libTM import texture >>> ti = texture.factory('TimeFill') >>> ti.tmName == 'TimeFill' True >>> ti.loadDefault() >>> ti.score() == True True """ # texture-wide time elements inst = self.getInst() # needed for preliminary parameter values # tStart, tEnd = self.getTimeRange() # tCurrent = tStart # get field, octave selection method value textFieldLevel = self.getTextStatic('lfm', 'level') textOctaveLevel = self.getTextStatic('lom', 'level') textPitchSelectorControl = self.getTextStatic('psc', 'selectionString') textEventCount = self.getTextStatic('tec', 'count') textEventPartition = self.getTextStatic('lep', 'level') textDensityPartition = self.getTextStatic('edp', 'level') if textDensityPartition == 'set': # get a list of values pLen = self.getPathLen() eventPerSet = [int(round(textEventCount / pLen))] * pLen else: # duration fraction scalars = self.getPathDurationPercent() eventPerSet = [int(round(x*textEventCount)) for x in scalars] eventIndex = 0 # create a list of chords from the appropriate pitch mode for pathPos in self.getPathPos(): chordCurrent = self.getPitchGroup(pathPos) multisetCurrent = self.getMultiset(pathPos) # start and end of this set is in real-time, not local to path # if not by set, boundaries here are always tt of entire texture if textEventPartition == 'set': tStartSet, tEndSet = self.clockPoints() # value relative to start else: # its a path based, treat tiem as one set tStartSet, tEndSet = self.getTimeRange() # value relative to path # create a generator to get pitches from chord as index values selectorChordPos = basePmtr.Selector(range(len(chordCurrent)), textPitchSelectorControl) # real set start is always the formal start time here tCurrent = copy.deepcopy(tStartSet) tStartSetReal = copy.deepcopy(tStartSet) self.stateUpdate(tCurrent, chordCurrent, None, multisetCurrent, None, None) if textFieldLevel == 'set': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'set': octCurrent = self.getOct(tCurrent) # choose OCTAVE # get event count from list of eventPerSet list by pathPos for i in range(eventPerSet[pathPos]): # pitch in chord eventIndex = eventIndex + 1 # cumulative count # even when rounded, dont exceed maximum; last set may have less if eventIndex > textEventCount: break # tCurrent here is assumed as start of set initiall, although # this is not exactly correct tUnit = unit.limit(self.getTextDynamic('fillGenerator', tCurrent)) tCurrent = unit.denorm(tUnit, tStartSet, tEndSet) # choose pc from chord ps = chordCurrent[selectorChordPos()] # get position w/n chord self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, None) if textFieldLevel == 'event': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'event': octCurrent = self.getOct(tCurrent) # choose OCTAVE psReal = pitchTools.psToTempered(ps, octCurrent, self.temperamentObj, transCurrent) self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, psReal) bpm, pulse, dur, sus, acc = self.getRhythm(tCurrent) if acc == 0 and not self.silenceMode: # this is a rest tCurrent = tCurrent + dur continue amp = self.getAmp(tCurrent) * acc # choose amp, pan pan = self.getPan(tCurrent) auxiliary = self.getAux(tCurrent) # chooose AUX, pack into list eventDict = self.makeEvent(tCurrent, bpm, pulse, dur, sus, acc, amp, psReal, pan, auxiliary) self.storeEvent(eventDict) # tCurrent = tCurrent + dur # move clocks forward by dur unit self.clockForward() # advances path positon return 1
def _scoreMain(self): """creates score >>> from athenaCL.libATH.libTM import texture >>> ti = texture.factory('LiteralHorizontal') >>> ti.tmName == 'LiteralHorizontal' True >>> ti.loadDefault() >>> ti.score() == True True """ # texture-wide PATH/PITCH elements # texture-wide time elements inst = self.getInst() tStart, tEnd = self.getTimeRange() tCurrent = tStart #texture-wide TEXTURE (self.textQ amd)options #used for optional parallel voices textRepeatToggle = self.getTextStatic('lws', 'onOff') # get field, octave selection method value textFieldLevel = self.getTextStatic('lfm', 'level') textOctaveLevel = self.getTextStatic('lom', 'level') # create a list of chords from the appropriate pitch mode for pathPos in self.getPathPos(): chordCurrent = self.getPitchGroup(pathPos) multisetCurrent = self.getMultiset(pathPos) tStartSet, tEndSet = self.clockPoints() chordLength = len(chordCurrent) chordIndex = 0 muteSet = 'off' pcTest = [] tStartSetReal = copy.deepcopy(tCurrent) self.stateUpdate(tCurrent, chordCurrent, None, multisetCurrent, None, None) if textFieldLevel == 'set': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'set': octCurrent = self.getOct(tCurrent) # choose OCTAVE while 1: # PITCH in CHORD if tCurrent >= tEndSet: break ps = chordCurrent[chordIndex] # choose PC from CHORD pcTest.append(ps) chordIndex = chordIndex + 1 # shift to next pitch if chordIndex >= chordLength: chordIndex = 0 self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, None) if textFieldLevel == 'event': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'event': octCurrent = self.getOct(tCurrent) # choose OCTAVE psReal = pitchTools.psToTempered(ps, octCurrent, self.temperamentObj, transCurrent) self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, psReal) bpm, pulse, dur, sus, acc = self.getRhythm( tCurrent) # choose RHYTHM if muteSet == 'on': # make all rests acc = 0 # everything is a rest after chord inex reset if acc == 0 and not self.silenceMode: # this is a rest chordIndex = chordIndex - 1 # dont count this note if a rest pcTest = pcTest[:-1] # drop off last addition tCurrent = tCurrent + dur continue amp = self.getAmp(tCurrent) * acc # choose AMP, PAN pan = self.getPan(tCurrent) auxiliary = self.getAux( tCurrent) # chooose AUX, pack into list eventDict = self.makeEvent(tCurrent, bpm, pulse, dur, sus, acc, amp, psReal, pan, auxiliary) self.storeEvent(eventDict) # turn of further notes if all gotten if textRepeatToggle == 'off' and len(pcTest) >= chordLength: muteSet = 'on' # move clocks forward by dur unit tCurrent = tCurrent + dur # return value to check for errors self.clockForward() # advances path positon return 1
def _scoreMain(self): """creates score note: octave choose for every note >>> from athenaCL.libATH.libTM import texture >>> ti = texture.factory('HarmonicShuffle') >>> ti.tmName == 'HarmonicShuffle' True >>> ti.loadDefault() >>> ti.score() == True True """ # texture-wide time elements inst = self.getInst() # needed for preliminary parameter values tStart, tEnd = self.getTimeRange() tCurrent = tStart # get static texture values textMultisetSelectorControl = self.getTextStatic('msc', 'selectionString') textPitchSelectorControl = self.getTextStatic('psc', 'selectionString') textMaxTimeOffset = self.getTextStatic('mto', 'time') textFieldLevel = self.getTextStatic('lfp', 'level') textOctaveLevel = self.getTextStatic('lop', 'level') pLen = self.getPathLen() selectorMultisetPos = basePmtr.Selector(range(pLen), textMultisetSelectorControl) # random generator for creating offset in vetical attacks # same technique used in LiteralVertical, DroneArticulate self.gaussPmtrObj = parameter.factory(('randomGauss', .5, .1, -1, 1)) while tCurrent < tEnd: pathPos = selectorMultisetPos() # select path position chordCurrent = self.getPitchGroup(pathPos) multisetCurrent = self.getMultiset(pathPos) # get number of simultaneities from this multiset # count is probabilistic, absolute value; cannot be zero multisetCount = abs(drawer.floatToInt( self.getTextDynamic('countPerMultiset', tCurrent), 'weight')) # make zero == 1; alternatively, make zero a skib and continue if multisetCount == 0: multisetCount = 1 if textFieldLevel == 'set': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'set': octCurrent = self.getOct(tCurrent) # choose OCTAVE # number of times a simultaneity is drawn for k in range(multisetCount): if tCurrent > tEnd: break # create a selector to get pitches from chord as index values # only need to create one for each chord selectorChordPos = basePmtr.Selector(range(len(chordCurrent)), textPitchSelectorControl) # determine how many pitches in this simultaneity # abs value, rounded to nearest integer simultaneityCount = abs(drawer.floatToInt( self.getTextDynamic('countPerSimultaneity', tCurrent), 'weight')) # if zero set to max chord size if simultaneityCount == 0: simultaneityCount = len(chordCurrent) elif simultaneityCount > len(chordCurrent): simultaneityCount = len(chordCurrent) self.stateUpdate(tCurrent, chordCurrent, None, multisetCurrent, None, None) if textFieldLevel == 'event': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'event': octCurrent = self.getOct(tCurrent) # choose OCTAVE # rhythm, amp, pan, aux: all chosen once per simultaneity bpm, pulse, dur, sus, acc = self.getRhythm(tCurrent) if acc == 0 and not self.silenceMode: # this is a rest tCurrent = tCurrent + dur continue amp = self.getAmp(tCurrent) * acc # choose amp, pan pan = self.getPan(tCurrent) tThisChord = copy.deepcopy(tCurrent) # get each pitch in the simultaneity for i in range(simultaneityCount): # pitch in chord ps = chordCurrent[selectorChordPos()] # get position w/n chord self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, None) if textFieldLevel == 'voice': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'voice': octCurrent = self.getOct(tCurrent) # choose OCTAVE psReal = pitchTools.psToTempered(ps, octCurrent, self.temperamentObj, transCurrent) self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, psReal) # aux values are drawn here once per voice; # this is common to TMs: DroneArticulate, DroneSustain auxiliary = self.getAux(tCurrent) # chooose AUX, pack into list # offset value is between -textMaxOffset, 0, and +textMaxOffset offset = self.gaussPmtrObj(0.0) * textMaxTimeOffset tCurrent = tCurrent + offset if tCurrent < 0: # cant start before 0 tCurrent = tThisChord # reset eventDict = self.makeEvent(tCurrent, bpm, pulse, dur, sus, acc, amp, psReal, pan, auxiliary) self.storeEvent(eventDict) # restore time to tCurrent before processing offset again tCurrent = tThisChord # move clocks forward by dur unit tCurrent = tCurrent + dur return 1
def _scoreMain(self): """creates score >>> from athenaCL.libATH.libTM import texture >>> ti = texture.factory('LineCluster') >>> ti.tmName == 'LineCluster' True >>> ti.loadDefault() >>> ti.score() == True True """ # texture-wide PATH/PITCH elements # texture-wide time elements inst = self.getInst() tStart, tEnd = self.getTimeRange() tCurrent = tStart # texture-wide TEXTURE (self.textQ amd)options #used for optional parallel voices textParallelVoiceList = self.getTextStatic('pml', 'transpositionList') textParallelDelayTime = self.getTextStatic('pml', 'timeDelay') # get field, octave selection method value textFieldLevel = self.getTextStatic('lfp', 'level') textOctaveLevel = self.getTextStatic('lop', 'level') textPitchSelectorControl = self.getTextStatic('psc', 'selectionString') #textNonRedundantSwitch = self.getTextStatic('nrs', 'onOff') # create a list of chords from the appropriate pitch mode for pathPos in self.getPathPos(): chordCurrent = self.getPitchGroup(pathPos) multisetCurrent = self.getMultiset(pathPos) tStartSet, tEndSet = self.clockPoints() # if textNonRedundantSwitch == 'on': selectorControl = 'randomPermutate' # else: selectorControl = 'randomChoice' selectorChordPos = basePmtr.Selector(range(len(chordCurrent)), textPitchSelectorControl) tStartSetReal = copy.deepcopy(tCurrent) self.stateUpdate(tCurrent, chordCurrent, None, multisetCurrent, None, None) if textFieldLevel == 'set': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'set': octCurrent = self.getOct(tCurrent) # choose OCTAVE while 1: # PITCH in CHORD if tCurrent >= tEndSet: break bpm, pulse, dur, sus, acc = self.getRhythm(tCurrent) # choose RHYTHM if acc == 0 and not self.silenceMode: # this is a rest tCurrent = tCurrent + dur continue # this ps should be used as ROOT; # this is _not_ implemented yet, however # choose PC from CHORD ps = chordCurrent[selectorChordPos()] self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, None) if textFieldLevel == 'event': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'event': octCurrent = self.getOct(tCurrent) # choose OCTAVE #subprocess psChord is a list of PCH's needed to make chord, psChord = [] for pitchSpace in chordCurrent: if textFieldLevel == 'voice': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'voice': octCurrent = self.getOct(tCurrent) # choose OCTAVE psReal = pitchTools.psToTempered(pitchSpace, octCurrent, self.temperamentObj, transCurrent) psChord.append(psReal) # amp and pan done for each chord, not voice amp = self.getAmp(tCurrent) * acc pan = self.getPan(tCurrent) #do this for each PCH in psChord, already transposed for psReal in psChord: self.stateUpdate(tCurrent, chordCurrent, pitchSpace, multisetCurrent, None, psReal) # choose aux for each voice auxiliary = self.getAux(tCurrent) eventDict = self.makeEvent(tCurrent, bpm, pulse, dur, sus, acc, amp, psReal, pan, auxiliary) self.storeEvent(eventDict) # parellel transposition offset = 0 for parallelVoice in textParallelVoiceList: #offset to avoid amp problems, correct error w/ offset tCurrent = tCurrent + textParallelDelayTime offset = offset + textParallelDelayTime psText = pitchTools.psTransposer(psReal, parallelVoice) eventDict = self.makeEvent(tCurrent, bpm, pulse, dur, sus, acc, amp, psText, pan, auxiliary) self.storeEvent(eventDict) #---------------------------- # move clocks forward by dur unit tCurrent = (tCurrent + dur) - offset self.clockForward() # advances path positon # return value to check for errors return 1
def _scoreMain(self): """creates score >>> from athenaCL.libATH.libTM import texture >>> ti = texture.factory('IntervalExpansion') >>> ti.tmName == 'IntervalExpansion' True >>> ti.loadDefault() >>> ti.score() == True True """ self.ornamentObj = ornament.Ornament(self.pmtrObjDict, self.temperamentObj) # texture-wide PATH/PITCH elements # pitches do not come from here, but from below path = self.path.get("scPath") # texture-wide time elements inst = self.getInst() tStart, tEnd = self.getTimeRange() tCurrent = tStart # texture-wide TEXTURE (self.textQ amd)options textRepeatToggle = self.getTextStatic("lws", "onOff") ornamentSwitch = self.getTextStatic("ols", "libraryName") ornamentMaxDensity = self.getTextStatic("omd", "percent") # get field, octave selection method value textFieldLevel = self.getTextStatic("lfm", "level") textOctaveLevel = self.getTextStatic("lom", "level") # create a randomUniform parameter object to control ornament control # values between 0 and 1; if pmtr() <= ornamentMaxDensity ruPmtrObj = parameter.factory(("randomUniform", 0, 1)) # create a list of chords from the appropriate pitch mode for pathPos in self.getPathPos(): chordCurrent = self.getPitchGroup(pathPos) multisetCurrent = self.getMultiset(pathPos) tStartSet, tEndSet = self.clockPoints() chordLength = len(chordCurrent) chordIndex = 0 muteSet = "off" psTest = [] ornamentIndex = 0 tStartSetReal = copy.deepcopy(tCurrent) self.stateUpdate(tCurrent, chordCurrent, None, multisetCurrent, None, None) if textFieldLevel == "set": transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == "set": octCurrent = self.getOct(tCurrent) # choose OCTAVE while 1: # PITCH in CHORD if tCurrent >= tEndSet: break ps = chordCurrent[chordIndex] psTest.append(ps) chordIndex = chordIndex + 1 # shift to next pitch if chordIndex >= chordLength: chordIndex = 0 self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, None) if textFieldLevel == "event": transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == "event": octCurrent = self.getOct(tCurrent) # choose OCTAVE psReal = pitchTools.psToTempered(ps, octCurrent, self.temperamentObj, transCurrent) self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, psReal) bpm, pulse, dur, sus, acc = self.getRhythm(tCurrent) if muteSet == "on": # make all rests acc = 0 # everything is a rest after chord inex reset if acc == 0 and not self.silenceMode: # this is a rest chordIndex = chordIndex - 1 # dont count this note if a rest psTest = psTest[:-1] # drop off last addition tCurrent = tCurrent + dur continue # choose AMP, PAN amp = self.getAmp(tCurrent) * acc pan = self.getPan(tCurrent) auxiliary = self.getAux(tCurrent) parentEventDict = self.makeEvent(tCurrent, bpm, pulse, dur, sus, acc, amp, psReal, pan, auxiliary) refDict = self.getRefDict(tCurrent) # "rhythm" not "dur" (dur includes overlap) if ornamentSwitch != "off": # check if an ru value is <= ornament density (if 1, always) # time value is not important if ruPmtrObj(tCurrent) <= ornamentMaxDensity: repretory = self._ornGroupGet(ornamentSwitch) # a, b, c, d = repretory[ornamentIndex] # this will do in order a, b, c, d = random.choice(repretory) # choose orn at random subEventArray = self.ornamentObj.create(refDict, a, b, c, d) # process sub event array for iSub in range(len(subEventArray)): # get time from subEvent subEvent = subEventArray[iSub] val = self.getTextDynamic("ornamentShift", subEvent["time"]) subEvent["ps"] = subEvent["ps"] + val self.storePolyEvent(parentEventDict, subEventArray, "orn") ornamentIndex = ornamentIndex + 1 # increment for when ordered if ornamentIndex == len(repretory): ornamentIndex = 0 else: self.storeEvent(parentEventDict) else: # ornament == 'off': # dont do ornaments self.storeEvent(parentEventDict) # turn of further notes if all gotten if textRepeatToggle == "off" and len(psTest) >= chordLength: muteSet = "on" # move clocks forward by rhythm unit tCurrent = tCurrent + dur self.clockForward() # advances path positon return 1
def psScale(self, pitchFormat, contourForm, psBase, microTone=.5): """translates a scale form in notation [0,-1,0,1] into various pitch representations. integers in the scale form are interpreted in three ways: as chromatic 1/2 steps, as diatonic scale pitches (either from the local set or the entire path), or as units of some microtonal size this returns a list representing the scale steps in relation to psBase psBase needs to be found in terms of the path, which may not consist only of ints returns a psCountourReference, which is alway tempered pitch values """ # not sure this needs to be an int #assert drawer.isInt(psBase) octCurrent = self.pmtrObjDict['octQ'].currentValue transCurrent = self.pmtrObjDict['fieldQ'].currentValue #currentChord = self.stateCurrentChord # not needed for all forms, but myst always get if pitchFormat == 'set': pitchGroup = self.refDict['stateCurrentChord'] elif pitchFormat == 'path': pitchGroup = self.refDict['statePathList'] else: # non given, but note used pitchGroup = self.refDict['stateCurrentChord'] refScales, lowerUpper = extractNeighbors(pitchGroup, psBase) pcContourDict = mapNeighbors(refScales, psBase, contourForm) #print pcContourDict # get pitch scale # this has the mapping with the appropriate pitches # N.B: danger here of getting mistransposed values # previously was an error and corrected in _splitPch psContourRef = [] if pitchFormat == 'chromatic': for entry in contourForm: # transpose before getting temperament pcSpace = pitchTools.psTransposer(psBase, entry) psReal = pitchTools.psToTempered(pcSpace, octCurrent, self.temperamentObj, transCurrent) psContourRef.append(psReal) # transpose by half steps # sets: derive scale from set elif pitchFormat == 'set' or pitchFormat == 'path': for entry in contourForm: pcSpace = pcContourDict[ entry] # scale step is a key, gets pcSpace psReal = pitchTools.psToTempered(pcSpace, octCurrent, self.temperamentObj, transCurrent) psContourRef.append(psReal) # transpose by half steps elif pitchFormat == 'microtone': # microtonal for entry in contourForm: # treat scale step as microtone scaler # must do transposition after converting to PCH if entry * microTone > entry * 2: environment.printWarn([ lang.WARN, 'microtone large (%s)' % (entry * microTone) ]) trans = (transCurrent + (entry * microTone)) psReal = pitchTools.psToTempered(psBase, octCurrent, self.temperamentObj, trans) psContourRef.append(psReal) # transpose by half steps else: raise ValueError('no such pitchFormat') # this now returns psReals, not pch values return psContourRef, refScales
def _scoreMain(self): """creates score >>> from athenaCL.libATH.libTM import texture >>> ti = texture.factory('DroneSustain') >>> ti.tmName == 'DroneSustain' True >>> ti.loadDefault() >>> ti.score() == True True """ # texture-wide PATH/PITCH elements # texture-wide time elements inst = self.getInst() tStart, tEnd = self.getTimeRange() tCurrent = tStart #texture-wide TEXTURE (self.textQ amd)options #used for optional parallel voices textMaxTimeOffset = self.getTextStatic('mto', 'time') # get field, octave selection method value textFieldLevel = self.getTextStatic('lfp', 'level') textOctaveLevel = self.getTextStatic('lop', 'level') # this gives a range from -1 to 1 self.gaussPmtrObj = parameter.factory(('randomGauss', .5, .1, -1, 1)) # create a list of chords from the appropriate pitch mode for pathPos in self.getPathPos(): chordCurrent = self.getPitchGroup(pathPos) multisetCurrent = self.getMultiset(pathPos) tStartSet, tEndSet = self.clockPoints() chordLength = len(chordCurrent) tStartSetReal = copy.deepcopy(tCurrent) self.stateUpdate(tCurrent, chordCurrent, None, multisetCurrent, None, None) if textFieldLevel == 'set': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'set': octCurrent = self.getOct(tCurrent) # choose OCTAVE while 1: # PITCH in CHORD if tCurrent >= tEndSet: break # no ps yet found, give as None, get default self.stateUpdate(tCurrent, chordCurrent, None, multisetCurrent, None, None) # choose RHYTHM, no parameter used dur = tEndSet - tCurrent # total time of set sus = dur acc = 1 # no rests pulse = '(1,1,1)' bpm = None if acc == 0 and not self.silenceMode: # this is a rest tCurrent = (tCurrent + dur) continue if textFieldLevel == 'event': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'event': octCurrent = self.getOct(tCurrent) # choose OCTAVE # amp and pan per chord, not voice amp = self.getAmp(tCurrent) * acc pan = self.getPan(tCurrent) tThisChord = copy.deepcopy(tCurrent) for ps in chordCurrent: if textFieldLevel == 'voice': transCurrent = self.getField(tThisChord) # choose PITCHFIELD if textOctaveLevel == 'voice': octCurrent = self.getOct(tThisChord) # choose OCTAVE psReal = pitchTools.psToTempered(ps, octCurrent, self.temperamentObj, transCurrent) self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, psReal) # calculate for every voice in chord auxiliary = self.getAux(tCurrent) # chooose AUX, in list # offset value is between 0, and +textMaxOffset (abs used) offset = abs(self.gaussPmtrObj(0.0)) * textMaxTimeOffset tCurrent = tCurrent + offset if tCurrent < 0: # cant start before 0 tCurrent = tThisChord # reset eventDict = self.makeEvent(tCurrent, bpm, pulse, dur, sus, acc, amp, psReal, pan, auxiliary) self.storeEvent(eventDict) # all notes start at the begninng of this chord tCurrent = tThisChord # move clocks forward by dur unit tCurrent = tCurrent + dur self.clockForward() # advances path positon # return value to check for errors return 1
def create(self, refDict, presetName='trill', ornPos='release', pitchLang='path', microTone=.5): """ psBase is a pitch space integer, or not, depending on path form """ self.refDict = refDict tCurrent = refDict['stateCurrentTime'] psBaseRaw = refDict[ 'stateCurrentPitchRaw'] # this is psBaseInt, no temper presetDict = self._getOrnLibrary(presetName, ornPos, pitchLang, microTone) # load parameters contourForm = presetDict['contourForm'] ornStyle = presetDict['ornStyle'] ornPos = presetDict['ornPos'] pitchLang = presetDict['pitchLang'] ornNotePcent = presetDict['ornNotePcent'] durOrnGoal = presetDict['durOrnGoal'] durInstPcentOffset = presetDict['durInstPcentOffset'] microTone = presetDict['microTone'] ampScalerMedian = presetDict['ampScalerMedian'] ampInstPcentOffset = presetDict['ampInstPcentOffset'] # from parameter objects inst = self.pmtrObjDict['inst'].currentValue pan = self.pmtrObjDict['panQ'].currentValue amp = self.pmtrObjDict['ampQ'].currentValue octCurrent = self.pmtrObjDict['octQ'].currentValue transCurrent = self.pmtrObjDict['fieldQ'].currentValue pulseObj = self.pmtrObjDict['rhythmQ'].currentPulse baseRhythmTuple = pulseObj.get('triple') currentBeatTime = self.pmtrObjDict['rhythmQ'].bpm # duration of base rhyth, first of triple rhythmBase = self.pmtrObjDict['rhythmQ'].currentValue[ 0] # gets time in ms # calculat a tempered, transposed position # this value may be a float psBase = pitchTools.psToTempered(psBaseRaw, octCurrent, self.temperamentObj, transCurrent) # translate contourForm into PCH list of appropriate pitches #print _MOD, pitchLang, contourForm, psBase, microTone # psBase here needs to an int, in pitch space, that is somewhere on # the path. psCountourReference psContourReference, refScales = self.psScale(pitchLang, contourForm, psBaseRaw, microTone) # estimated, this will chang once rhythms meausred estOrnDurFraction = rhythmBase * ornNotePcent # find number of notes, and actual duartion of all oraments totOrnDur, durList = self._getOrnDurStyle(presetDict, estOrnDurFraction) # time of ornament, time of base # get timings posTimes = self._setOrnPos(ornPos, rhythmBase, tCurrent, totOrnDur) durBase, tBaseStart, tBaseEnd, tOrnStart, tOrnEnd = posTimes # make notes tLocalCurrent = copy.deepcopy(tCurrent) ampOrnament = amp * ampScalerMedian # get amp base value eventList = [] # event list does not store compelete events # make base note: if durBase > .0001: # if very short, ommit this note (value in seconds) abortOrnament = 0 else: abortOrnament = 1 if not abortOrnament: baseNoteEvent = self._makeEventDict(tBaseStart, durBase, amp, psBase, pan) else: # abort ornament, user tempered pitch print(lang.WARN, "ornamants aborted") baseNoteEvent = self._makeEventDict(tCurrent, rhythmBase, amp, psBase, pan) eventList.append(baseNoteEvent) return eventList # build ornament from ornament start time tLocalCurrent = tOrnStart # clock may move backwards scalePosition = 0 # index ot contourForm and durList durPosition = 0 durBufferSpace = .003 # 3 ms gap to avoid clips while 1: #print scalePosition, tLocalCurrent, durList[durPosition], tOrnEnd ampInstance = self._addAmpNoise(ampOrnament, ampInstPcentOffset) # indices hold ps, tempered version of of contourForm psCurrent = psContourReference[scalePosition] durInst = durList[durPosition] durFake = durInst - durBufferSpace # create shorter dur for b/n notes eventDict = self._makeEventDict(tLocalCurrent, durFake, ampInstance, psCurrent, pan) # not complete event eventList.append(eventDict) tLocalCurrent = tLocalCurrent + durInst # append actual # iterators scalePosition = scalePosition + 1 if scalePosition == len(psContourReference): scalePosition = 0 durPosition = durPosition + 1 if durPosition == len(durList): durPosition = 0 # exits if ornStyle == 'loop': if tLocalCurrent >= tOrnEnd: break if tLocalCurrent + durList[durPosition] >= tOrnEnd: break # dont want trill that spills over else: # ornaments that are 'single' or 'scale' if durPosition == 0: # its gone through once already break # only have enough durs for each ornament # resort notes so this looks better if ornPos == 'release': eventList.insert(0, baseNoteEvent) # insert at beginning else: # place base note after ornament eventList.append(baseNoteEvent) # add at end return eventList
def _scoreMain(self): """creates score note: octave choose for every note >>> from athenaCL.libATH.libTM import texture >>> ti = texture.factory('TimeFill') >>> ti.tmName == 'TimeFill' True >>> ti.loadDefault() >>> ti.score() == True True """ # texture-wide time elements inst = self.getInst() # needed for preliminary parameter values # tStart, tEnd = self.getTimeRange() # tCurrent = tStart # get field, octave selection method value textFieldLevel = self.getTextStatic('lfm', 'level') textOctaveLevel = self.getTextStatic('lom', 'level') textPitchSelectorControl = self.getTextStatic('psc', 'selectionString') textEventCount = self.getTextStatic('tec', 'count') textEventPartition = self.getTextStatic('lep', 'level') textDensityPartition = self.getTextStatic('edp', 'level') if textDensityPartition == 'set': # get a list of values pLen = self.getPathLen() eventPerSet = [int(round(textEventCount / pLen))] * pLen else: # duration fraction scalars = self.getPathDurationPercent() eventPerSet = [int(round(x * textEventCount)) for x in scalars] eventIndex = 0 # create a list of chords from the appropriate pitch mode for pathPos in self.getPathPos(): chordCurrent = self.getPitchGroup(pathPos) multisetCurrent = self.getMultiset(pathPos) # start and end of this set is in real-time, not local to path # if not by set, boundaries here are always tt of entire texture if textEventPartition == 'set': tStartSet, tEndSet = self.clockPoints( ) # value relative to start else: # its a path based, treat tiem as one set tStartSet, tEndSet = self.getTimeRange( ) # value relative to path # create a generator to get pitches from chord as index values selectorChordPos = basePmtr.Selector(range(len(chordCurrent)), textPitchSelectorControl) # real set start is always the formal start time here tCurrent = copy.deepcopy(tStartSet) tStartSetReal = copy.deepcopy(tStartSet) self.stateUpdate(tCurrent, chordCurrent, None, multisetCurrent, None, None) if textFieldLevel == 'set': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'set': octCurrent = self.getOct(tCurrent) # choose OCTAVE # get event count from list of eventPerSet list by pathPos for i in range(eventPerSet[pathPos]): # pitch in chord eventIndex = eventIndex + 1 # cumulative count # even when rounded, dont exceed maximum; last set may have less if eventIndex > textEventCount: break # tCurrent here is assumed as start of set initiall, although # this is not exactly correct tUnit = unit.limit( self.getTextDynamic('fillGenerator', tCurrent)) tCurrent = unit.denorm(tUnit, tStartSet, tEndSet) # choose pc from chord ps = chordCurrent[selectorChordPos()] # get position w/n chord self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, None) if textFieldLevel == 'event': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'event': octCurrent = self.getOct(tCurrent) # choose OCTAVE psReal = pitchTools.psToTempered(ps, octCurrent, self.temperamentObj, transCurrent) self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, psReal) bpm, pulse, dur, sus, acc = self.getRhythm(tCurrent) if acc == 0 and not self.silenceMode: # this is a rest tCurrent = tCurrent + dur continue amp = self.getAmp(tCurrent) * acc # choose amp, pan pan = self.getPan(tCurrent) auxiliary = self.getAux( tCurrent) # chooose AUX, pack into list eventDict = self.makeEvent(tCurrent, bpm, pulse, dur, sus, acc, amp, psReal, pan, auxiliary) self.storeEvent(eventDict) # tCurrent = tCurrent + dur # move clocks forward by dur unit self.clockForward() # advances path positon return 1
def _scoreMain(self): """creates score >>> from athenaCL.libATH.libTM import texture >>> ti = texture.factory('MonophonicOrnament') >>> ti.tmName == 'MonophonicOrnament' True >>> ti.loadDefault() >>> ti.score() == True True """ self.ornamentObj = ornament.Ornament(self.pmtrObjDict, self.temperamentObj) # texture-wide PATH/PITCH elements # texture-wide time elements inst = self.getInst() tStart, tEnd = self.getTimeRange() tCurrent = tStart #texture-wide TEXTURE (self.textQ amd)options #used for optional parallel voices textRepeatToggle = self.getTextStatic('lws', 'onOff') # create a list of chords from the appropriate pitch mode ornamentSwitch = self.getTextStatic('ols', 'libraryName') ornamentMaxDensity = self.getTextStatic('omd', 'percent') # get field, octave selection method value textFieldLevel = self.getTextStatic('lfm', 'level') textOctaveLevel = self.getTextStatic('lom', 'level') # create a randomUniform parameter object to control ornament control # values between 0 and 1; if pmtr() <= ornamentMaxDensity ruPmtrObj = parameter.factory(('randomUniform', 0, 1)) for pathPos in self.getPathPos(): chordCurrent = self.getPitchGroup(pathPos) multisetCurrent = self.getMultiset(pathPos) tStartSet, tEndSet = self.clockPoints() chordLength = len(chordCurrent) chordIndex = 0 muteSet = 'off' psTest = [] ornamentIndex = 0 tStartSetReal = copy.deepcopy(tCurrent) self.stateUpdate(tCurrent, chordCurrent, None, multisetCurrent, None, None) if textFieldLevel == 'set': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'set': octCurrent = self.getOct(tCurrent) # choose OCTAVE while 1: # PITCH in CHORD if tCurrent >= tEndSet: break ps = chordCurrent[chordIndex] # choose PC from CHORD psTest.append(ps) chordIndex = chordIndex + 1 # shift to next pitch if chordIndex >= chordLength: chordIndex = 0 self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, None) if textFieldLevel == 'event': transCurrent = self.getField(tCurrent) # choose PITCHFIELD if textOctaveLevel == 'event': octCurrent = self.getOct(tCurrent) # choose OCTAVE psReal = pitchTools.psToTempered(ps, octCurrent, self.temperamentObj, transCurrent) self.stateUpdate(tCurrent, chordCurrent, ps, multisetCurrent, None, psReal) bpm, pulse, dur, sus, acc = self.getRhythm(tCurrent) if muteSet == 'on': # make all rests acc = 0 # everything is a rest after chord inex reset if acc == 0 and not self.silenceMode: # this is a rest chordIndex = chordIndex - 1 # dont count this note if a rest psTest = psTest[:-1] # drop off last addition tCurrent = tCurrent + dur continue # amp, pan, aux choosen per event amp = self.getAmp(tCurrent) * acc pan = self.getPan(tCurrent) auxiliary = self.getAux(tCurrent) parentEventDict = self.makeEvent(tCurrent, bpm, pulse, dur, sus, acc, amp, psReal, pan, auxiliary) refDict = self.getRefDict(tCurrent) if ornamentSwitch != 'off': # check if an ru value is <= ornament density (if 1, always) # time value is not important if ruPmtrObj(tCurrent) <= ornamentMaxDensity: repretory = self._ornGroupGet(ornamentSwitch) #a, b, c, d = repretory[ornamentIndex] # this will do in order a, b, c, d = random.choice(repretory) # choose orn at random subEventArray = self.ornamentObj.create(refDict,a,b,c,d) self.storePolyEvent(parentEventDict, subEventArray, 'orn') ornamentIndex = ornamentIndex + 1 # increment for when ordered if ornamentIndex == len(repretory): ornamentIndex = 0 else: self.storeEvent(parentEventDict) else: # ornament == 'off': # dont do ornaments self.storeEvent(parentEventDict) # turn of further notes if all gotten if textRepeatToggle == 'off' and len(psTest) >= chordLength: muteSet = 'on' # move clocks forward by dur unit tCurrent = tCurrent + dur self.clockForward() # advances path positon return 1