def findSegmentationFile(cobjname, searchPaths, segmentationExtension, wholeFileBool): # ignore directories if os.path.isdir(cobjname): return 'corpusdirectory' #print("testing", cobjname, searchPaths, segmentationExtension) segmentationSearchPaths = searchPaths[:] segmentationSearchPaths.insert(0, os.path.split(cobjname)[0]) segmentationSearchPaths = list(set(segmentationSearchPaths)) possibilities = [] foundit = None for d in segmentationSearchPaths: testpath = os.path.join( d, os.path.split(cobjname)[1] + segmentationExtension) possibilities.append(testpath) #print("\ttesting path", testpath, os.path.exists(testpath)) if os.path.exists(testpath): foundit = testpath break # if not found if foundit == None and os.path.isdir(cobjname) and not wholeFileBool: util.error( 'segmentation file', "Cannot find any segmentation file for '%s' (tested %s). To specify the use of whole sound files as corpus segments, write this corpus entry as: \n\tcsf('%s', wholeFile=True)\nor\n\tCORPUS_GLOBAL_ATTRIBUTES = {'wholeFile': True}" % (cobjname, possibilities, cobjname)) elif foundit == None and not wholeFileBool: util.error( 'segmentation file', "Cannot find any segmentation file for '%s' (tested %s)." % (cobjname, possibilities)) return foundit
def __init__(self, scoreFromUserOptions, usercorpus, tgtsegs, tgtlength, cpsseglist, hopsizesec, p): self.active = scoreFromUserOptions != None and len( scoreFromUserOptions.instrumentobjs) != 0 if not self.active: return # # test configuration # make sure that instruments listed have a corresponding tag in the corpus; otherwise die allCorpusInstrTags = [] for c in usercorpus: if type(c.instrTag) == type(""): allCorpusInstrTags.append(c.instrTag) elif c.instrTag == None: pass else: allCorpusInstrTags.extend(c.instrTag) unusedInstruments = [] for si in scoreFromUserOptions.instrumentobjs: if si.name not in allCorpusInstrTags: unusedInstruments.append(si.name) if len(unusedInstruments) > 0: util.error( "INSTRUMENT", "Several instruments defined in score() are not tagged to any of your csfs(). These include %s. To link instruments to csf() resources, set the csf keyword 'instrTag' to the name of the desired instrument." % ', '.join(unusedInstruments), exitcode=1) # self.setup_internalshit(scoreFromUserOptions, usercorpus, tgtsegs, tgtlength, cpsseglist, hopsizesec, p)
def quantizeTime(outputEvents, method, interval, p): if method == None: '''Does nothing''' return p.pprint( 'Quantizing selected events into slices of %.2f seconds according to %s\n' % (interval, method)) if method == 'snapToGrid': '''Quantize each note's start time to the nearest value of OUTPUTEVENT_QUANTIZE_TIME_INTERVAL seconds''' for oe in outputEvents: oe.timeInScore = (int(oe.timeInScore / interval)) * interval elif method == 'medianAggregate': '''Sets each note's start time to the median time of notes found in time slices of OUTPUTEVENT_QUANTIZE_TIME_INTERVAL length in seconds.''' lastEvent = outputEvents[-1].timeInScore for oe in outputEvents: oe.quantizeInx = int(oe.timeInScore / interval) for qstep in range(int(lastEvent / interval) + 1): found = [] for oe in outputEvents: if oe.quantizeInx == qstep: found.append(oe) if len(found) == 0: # nothing here continue qstepMedianTime = np.median([oe.timeInScore for oe in found]) for oe in found: oe.timeInScore = qstepMedianTime else: util.error("QUANTIZATION", "no quantization method called %s" % method)
def pitchoverride(cobjlist, config): pitchlist = [c.desc['MIDIPitch-seg'].get(None, None) for c in cobjlist] minpitch, maxpitch = min(pitchlist), max(pitchlist) output_dict = {} for c, standardpitch in zip(cobjlist, pitchlist): if config == None: output_dict[c] = standardpitch elif type(config) in [float, int]: # pitchoverride=60 output_dict[c] = config elif type(config) != dict: util.error("INSTRUMENTS", 'pitchoverride must either be None, a number, or a dictionary.') # if passing this point, we're using the dict format elif 'type' in config and config['type'] == 'remap': assert 'low' in config and 'high' in config standard_zerotoone = (standardpitch-minpitch)/(maxpitch-minpitch) output_dict[c] = (standard_zerotoone*(config['high']-config['low']))+config['low'] # clip elif 'type' in config and config['type'] == 'clip': assert 'low' in config or 'high' in config if 'low' in config and standardpitch < config['low']: output_dict[c] = config['low'] elif 'high' in config and standardpitch > config['high']: output_dict[c] = config['high'] else: output_dict[c] = standardpitch # filename string match elif 'type' in config and config['type'] == 'file_match': for k in config: if k == 'type': continue if util.matchString(c.printName, k, caseSensative=True): output_dict[c] = config[k] # not found output_dict[c] = standardpitch else: util.error("INSTRUMENTS", 'Ya done goofed son.') return output_dict
def load_target(self): self.p.logsection("TARGET") self.tgt = sfsegment.target(self.ops.TARGET, self.AnalInterface) self.tgt.initAnal(self.AnalInterface, self.ops, self.p) self.tgt.stageSegments(self.AnalInterface, self.ops, self.p) if len(self.tgt.segs) == 0: util.error( "TARGET FILE", "no segments found! this is rather strange. could your target file %s be digital silence??" % (self.tgt.filename)) self.p.log( "TARGET SEGMENTATION: found %i segments with an average length of %.3f seconds" % (len(self.tgt.segs), np.average(self.tgt.seglengths))) descriptors = [] dnames = [] self.ops.parseDescriptors() for dobj in self.ops._normalizeDescriptors + self.ops._limitDescriptors: if dobj.seg or dobj.name in ['power']: continue d = np.array(self.tgt.whole.desc.get(dobj.name, copy=True)) d -= np.min(d) d /= np.max(d) d = np.around(d, 2) descriptors.append(d) dnames.append(dobj.name) if self.p.html != None: self.p.html.jschart_timeseries(yarray=np.array([ self.AnalInterface.f2s(i) for i in range(self.tgt.whole.lengthInFrames) ]), xarrays=descriptors, ylabel='time in seconds', xlabels=dnames)
def testOption(name, value): #print (name, value) if name.startswith('_'): return # added this exception to permit user variable that start with _ if not name in UserVar_types: util.error("user variable", "I don't have an option called %s\n :("%(name)) outcomes = [] for tstring in UserVar_types[name]: outcomes.append( testVariable(tstring, value) ) if not True in outcomes: # if none of these test were passed util.error("user variable", "variable %s must be %s (%s given as %s)"%(name, ' or '.join(UserVar_types[name]), str(value), type(value)))
def initialize_analysis_interface(self, printversion=True): if 'concateMethod' in self.ops.EXPERIMENTAL and self.ops.EXPERIMENTAL[ 'concateMethod'] == 'framebyframe': util.error( "CONFIG", "Frame by frame concatenation is only possible with the agConcatenateFrames.py script." ) self.p = userinterface.printer( self.ops.VERBOSITY, os.path.dirname(__file__), self.ops.get_outputfile('HTML_LOG_FILEPATH')) if printversion: self.p.printProgramInfo(__version__) self.AnalInterface = self.ops.createAnalInterface(self.p)
def finalizeSegmentNormList(self): for csfs in self.postLimitSegmentList: # add the segment since if passed any user-supplied limitations... self.data['postLimitSegmentDictVoice'][csfs.voiceID].append(csfs) self.data['totalLengthInSeconds'] += csfs.segmentDurationSec self.data['postLimitSegmentCount'] += 1 self.postLimitSegmentNormList = self.postLimitSegmentList # test to make sure some samples made it though usert limitations... if self.data['postLimitSegmentCount'] == 0: util.error( 'CORPUS', "No database segments made it into the selection pool. Check limits and pitchfilters..." )
def testForInitErrors(self, AnalInterface): # test self.filename oneframesec = AnalInterface.f2s(1) if not os.path.exists(self.filename): util.error('sfsegment init', 'file does not exist: \t%s\n' % self.filename) if self.soundfileExtension.lower( ) not in AnalInterface.validSfExtensions: util.error( 'sfsegment init', 'file is not an accepted soundfile type: \t%s\n' % self.filename) # test that startSec is sane if self.segmentStartSec < 0: util.error('sfsegment init', 'startSec is less than 0!!') if self.segmentStartSec >= self.segmentEndSec: util.error('sfsegment init', 'startSec is greater than its endSec!!') # test if requested read too long if self.segmentEndSec > self.soundfileTotalDuration + (oneframesec / 2.): print( '\n\nWARNING endSec (%.2f) is longer than the file\'s duration(%.2f)!! Truncating to filelength.\n\n' % (self.segmentEndSec, self.soundfileTotalDuration)) self.segmentEndSec = self.soundfileTotalDuration
def evaluate_midipitches(segmentobjlist, config, notfound=-1): if config in ['filename', 'composite', 'centroid-seg', 'f0-seg']: output_pitch_list = [MIDIPitchByFileName(obj.printName, config, obj, notfound=-1) for obj in segmentobjlist] # float or int elif type(config) in [float, int]: output_pitch_list = [config for obj in segmentobjlist] elif type(config) != dict: util.error("SF SEGMENTS", 'midiPitchMethod must either be a string, a number, or a dictionary.') elif 'type' not in config: util.error("SF SEGMENTS", "a midiPitchMethod dictionary needs the key 'type'.") # remap elif config['type'] == 'remap': # {'type': 'remap', 'method': 'centroid-seg', 'high': 80, 'low': 76} assert 'low' in config and 'high' in config and 'method' in config pitchlist = np.array([MIDIPitchByFileName(obj.printName, config['method'], obj, notfound=-1) for obj in segmentobjlist]) minpitch, maxpitch = min(pitchlist), max(pitchlist) output_pitch_list = (((pitchlist-minpitch)/(maxpitch-minpitch))*(config['high']-config['low']))+config['low'] # clip elif config['type'] == 'clip': assert 'low' in config or 'high' in config and 'method' in config pitchlist = np.array([MIDIPitchByFileName(obj.printName, config['method'], obj, notfound=-1) for obj in segmentobjlist]) if not 'low' in config: config['low'] = None if not 'high' in config: config['high'] = None output_pitch_list = np.clip(pitchlist, config['low'], config['high']) # filename string match elif config['type'] == 'file_match': output_pitch_list = [MIDIPitchByFileName(obj.printName, 'composite', obj, notfound=-1) for obj in segmentobjlist] for k in config: if k == 'type': continue for cidx, c in enumerate(segmentobjlist): if util.matchString(c.printName, k, caseSensative=True): output_pitch_list[cidx] = config[k] else: util.error("SF SEGMENTS", 'Ya done goofed son.') # assignment.. for o, p in zip(segmentobjlist, output_pitch_list): o.midipitch = p
def __init__(self, corpusFromUserOptions, corpusGlobalAttributesFromOptions, restrictCorpusSelectionsByFilenameString, searchPaths, AnalInterface, p): self.preloadlist = [] self.preLimitSegmentList = [] self.postLimitSegmentNormList = [] self.selectedSegmentList = [] self.simSelectRuleByCorpusId = [] self.len = len(corpusFromUserOptions) self.data = {} self.data['lastVoice'] = None self.data['totalLengthInSeconds'] = 0. self.data['vcToCorpusName'] = [] self.data['postLimitSegmentDictVoice'] = {} self.data['postLimitSegmentCount'] = 0 self.data['selectionTimeByVoice'] = {} self.data['cspInfo'] = [] for cidx in range(len(corpusFromUserOptions)): self.data['postLimitSegmentDictVoice'][cidx] = [] self.data['selectionTimeByVoice'][cidx] = [] # find any GLOBAL limitations the user has placed on the corpus self.globalLimits = [] self.localLimits = [] if 'limit' in corpusGlobalAttributesFromOptions: for stringy in corpusGlobalAttributesFromOptions['limit']: self.globalLimits.append( cpsLimit(stringy, range(len(corpusFromUserOptions)), AnalInterface)) self.data['numberVoices'] = len(corpusFromUserOptions) for cidx, cobj in enumerate(corpusFromUserOptions): # add this voice vcCnt = 0 cobj.name = util.verifyPath(cobj.name, AnalInterface.searchPaths) cobj.voiceID = cidx self.simSelectRuleByCorpusId.append(cobj.superimposeRule) self.data['vcToCorpusName'].append(cobj.name) for name, val in corpusGlobalAttributesFromOptions.items(): if name == 'limit': continue setattr(cobj, name, val) # add local limits totalLimitList = [] for limitstr in cobj.limit: limitObj = cpsLimit(limitstr, [cidx], AnalInterface) self.localLimits.append(limitObj) totalLimitList.append(limitObj) # add global limits totalLimitList.extend( self.globalLimits) # from CORPUS_GLOBAL_ATTRIBUTES # get segments/files list timeList = [] if os.path.isdir(cobj.name): fileType = 'dir' if os.path.isfile(cobj.name): fileType = 'file' if os.path.islink(cobj.name): fileType = 'link' # list of segmentation files? if type(cobj.segmentationFile) in [tuple, list]: # a list of seg files cobj.segmentationFile = [ path.test(string, ops.SEARCH_PATHS)[1] for string in cobj.segmentationFile ] if fileType == 'file': # an audio file ################## ## input a FILE ## -- look for audacity-style txt label file ################## times = [] if cobj.wholeFile: times.append([0, None]) else: cobj.segmentationFile = findSegmentationFile( cobj.name, searchPaths[:], cobj.segmentationExtension, cobj.wholeFile) if not os.path.isfile(cobj.segmentationFile): util.error( 'segmentation file', "Cannot find segmentation file '%s'" % cobj.segmentationFile) sgs = util.readAudacityLabelFile(cobj.segmentationFile) times.extend(sgs) p.log( "Using segments from segmentation file %s (%i segments)" % (cobj.segmentationFile, len(sgs))) for timeSeg in times: timeList.append([cobj.name] + timeSeg) cobj.numbSfFiles = 1 elif fileType == 'dir': # a directory ####################### ## input a DIRECTORY ## ####################### files = util.getDirListOnlyExt(cobj.name, cobj.recursive, AnalInterface.validSfExtensions) cobj.segmentationFile = None # don't print it for file in files: segFileTest = findSegmentationFile( file, searchPaths, cobj.segmentationExtension, cobj.wholeFile) if segFileTest != None and os.path.exists(segFileTest): times = util.readAudacityLabelFile(segFileTest) p.log( "Using segments from segmentation file %s (%i segments)" % (segFileTest, len(times))) else: times = [[0, None]] for timeSeg in times: timeList.append([file] + timeSeg) cobj.numbSfFiles = len(files) # reset counters... segCount = 0 windowDist = descriptordata.hannWin(len(timeList) * 2) # segment list stringMatchingWithFullPaths = True for idx in range(len(timeList)): startSec = timeList[idx][1] endSec = timeList[idx][2] if cobj.start != None and startSec < cobj.start: continue # skip it if cobj.end != None and startSec > cobj.end: continue # skip it # matchSting: includeStr/excludeStr if stringMatchingWithFullPaths: stringMatchPath = os.path.abspath(timeList[idx][0]) else: stringMatchPath = os.path.split(timeList[idx][0])[1] if cobj.includeStr != None: skip = True if type(cobj.includeStr) not in [list, tuple]: cobj.includeStr = [cobj.includeStr] for test in cobj.includeStr: if util.matchString(stringMatchPath, test, caseSensative=True): skip = False if skip: continue if cobj.excludeStr != None: skip = False if type(cobj.excludeStr) not in [list, tuple]: cobj.excludeStr = [cobj.excludeStr] for test in cobj.excludeStr: if util.matchString(stringMatchPath, test, caseSensative=True): skip = True if skip: continue # minTime / maxTime # matchTime: includeTimes/excludeTimes if len(cobj.includeTimes) > 0: skip = True for timeTuple in cobj.includeTimes: if startSec >= timeTuple[0] and endSec <= timeTuple[1]: skip = False if skip: continue if len(cobj.excludeTimes) > 0: skip = False if type(cobj.excludeTimes) not in [list, tuple]: cobj.excludeTimes = [cobj.excludeTimes ] # force it to be a list for timeTuple in cobj.excludeTimes: if startSec >= timeTuple[0] and startSec < timeTuple[1]: skip = True #print stringMatchPath, start, end, skip if skip: continue # see if there is any extra data from the segmentation file if len(timeList[idx]) > 3: segmentationfileData = ' '.join(timeList[idx][3:]) else: segmentationfileData = None # test if limitDur is set... if cobj.limitDur != None: if endSec != None and endSec - startSec > cobj.limitDur: endSec = startSec + cobj.limitDur # see which sf to map sound concatenation onto... if cobj.concatFileName == None: concatFileName = timeList[idx][0] else: concatFileName = cobj.concatFileName # get any metadata metadata = '' for mstring, mstart, mstop in cobj.metadata: if startSec >= mstart and startSec <= mstop: metadata += mstring + ' ' # see if global RESTRICT_CORPUS_SELECT_PERCENTAGE_BY_STRING applies maxPercentTargetSegmentsByString = None for restrictStr, restrictVal in restrictCorpusSelectionsByFilenameString.items( ): if util.matchString(timeList[idx][0], restrictStr): maxPercentTargetSegmentsByString = restrictVal self.preloadlist.append([ timeList[idx][0], timeList[idx][1], timeList[idx][2], cobj.scaleDb, cobj.onsetLen, cobj.offsetLen, cobj.envelopeSlope, AnalInterface, concatFileName, cobj.name, cobj.voiceID, cobj.midiPitchMethod, totalLimitList, cobj.pitchfilter, cobj.scaleDistance, cobj.superimposeRule, cobj.transMethod, cobj.transQuantize, cobj.allowRepetition, cobj.restrictInTime, cobj.restrictOverlaps, cobj.restrictRepetition, cobj.postSelectAmpBool, cobj.postSelectAmpMin, cobj.postSelectAmpMax, cobj.postSelectAmpMethod, segmentationfileData, metadata, cobj.clipDurationToTarget, cobj.instrTag, cobj.instrParams ]) vcCnt += 1 self.data['cspInfo'].append({ 'name': cobj.name, 'filehead': os.path.split(cobj.name)[1], 'segs': str(vcCnt), 'fileType': fileType, 'numbSfFiles': cobj.numbSfFiles, 'restrictInTime': cobj.restrictInTime, 'segFile': cobj.segmentationFile, 'restrictOverlaps': cobj.restrictOverlaps, 'scaleDb': cobj.scaleDb, 'maxPercentTargetSegments': cobj.maxPercentTargetSegments, 'selectedTargetSegments': [], 'instrTag': cobj.instrTag, 'instrParams': cobj.instrParams }) ########################### ## done with CORPUS loop ## ########################### p.startPercentageBar(upperLabel="Evaluating CORPUS...", total=len(self.preloadlist)) # in a seperate loop for printing... for cidx, corpusSegParams in enumerate(self.preloadlist): start = corpusSegParams[1] stop = corpusSegParams[2] if start == None: start = 0 if stop == None: stop = 100 p.percentageBarNext(lowerLabel="%s@%.2f-%.2f" % (corpusSegParams[0], start, stop)) # make the obj cpsSeg = sfsegment.corpusSegment(*corpusSegParams) # add it to the list! self.preLimitSegmentList.append(cpsSeg) self.evaluatePreConcateLimitations() self.evaluateCorpusPitchFilters() self.finalizeSegmentNormList() p.percentageBarClose( txt="Read %i/%i segments (%.0f%%, %.2f min.)" % (self.data['postLimitSegmentCount'], len(self.preLimitSegmentList), self.data['postLimitSegmentCount'] / float(len(self.preLimitSegmentList)) * 100., self.data['totalLengthInSeconds'] / 60.)) self.printConcateLimitations(p)
def __createDescriptorsFile__(self, sffile, analdir, npypath, jsonpath, ircam_bin, ircamd_configfile, debug=False): global descriptToFiles STAGING_DIRECTORY = os.path.join(analdir, 'tmp') if not os.path.exists(STAGING_DIRECTORY): os.makedirs(STAGING_DIRECTORY) command = [ircam_bin, sffile, ircamd_configfile] if self.p != None: self.p.log("\tRUNNING COMMAND: '"+' '.join(command)+"'") stdoutReturnDict={('sr', 0): ('sr', 2, int), ('samples', 0): ('lengthsamples', 2, int), ('channel(s):', 0): ('channels', 1, int)} try: p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=STAGING_DIRECTORY) except OSError: print('commandline', 'Command line call failed: \n\n"%s"'%' '.join(command)) out, err = p.communicate() out = out.decode('utf-8') # test for bad exit status if err not in [0, b'']: util.error('commandline', 'AudioGuide command line call failed: \n"%s"%s%s'%(' '.join(command), '\n--------\n\n', out)) infodict = {'ircamd_columns': {'power': 0}} # fill output dict if requested for o in out.split('\n'): #print o o = o.split() if len(o) > 1: for (str, loc), (key, valloc, valtype) in stdoutReturnDict.items(): if o[loc] == str: infodict[key] = o[valloc] if valtype == int: infodict[key] = int(infodict[key]) if valtype == float: infodict[key] = float(infodict[key]) # get number of frames of shorttime centroid f = open(os.path.join(STAGING_DIRECTORY, 'SpectralCentroid_ShortTermFeature_space2.info.txt')) framelength = int(f.readlines()[2].split()[2]) f.close() infodict['ircamd'] = {'framelength': framelength, 'filehead': os.path.split(npypath)[1]} if debug: print(sffile+'\n') print(infodict) print("Array=", (framelength, len(descriptToFiles)+1)) # set up descriptor matrix ircamd_array = np.empty((framelength, len(descriptToFiles)+1)) # plus one for power, added separately due to separate framerate # get number of frames of shorttime energy envelope f = open(os.path.join(STAGING_DIRECTORY, 'EnergyEnvelope_ShortTermFeature_space1.info.txt')) energyframelength = int(f.readlines()[2].split()[2]) f.close() myarray = np.fromfile(os.path.join(STAGING_DIRECTORY, 'EnergyEnvelope_ShortTermFeature_space1.raw'), dtype=np.float, count=-1, sep='') newarray = util.interpArray(myarray, framelength) newarray = np.reshape(newarray, (framelength, 1)) ircamd_array[:,0] = newarray[:,0] # power is first column infodict['lengthsec'] = infodict['lengthsamples']/float(infodict['sr']) for idx, (agId, source, isAmp, isMixable, rawFilename, matrixSize, matrixLocation) in enumerate(descriptToFiles): filename = os.path.join(STAGING_DIRECTORY, rawFilename+'_ShortTermFeature_space2.raw') myarray = np.fromfile(filename, dtype=np.float, count=-1, sep='') #print source, rawFilename, matrixSize, len(myarray), framelength, matrixSize, framelength*matrixSize myarray = np.reshape(myarray, (framelength, matrixSize)) ircamd_array[:,idx+1] = myarray[:,matrixLocation] infodict['ircamd_columns'][agId] = idx+1 # write files f = open(jsonpath, "w") json.dump(infodict, f) f.close() np.save(npypath, ircamd_array) return infodict, ircamd_array
def makeConcatenationCsdFile(outputCsdPath, outputSoundfilePath, channelRenderMethod, sr, kr, scoreText, cpsLength, listOfSfchannelsInScore, maxOverlaps, instruments, numberClasses, bits=32, useTargetAmplitude=0): if channelRenderMethod == "corpusmax": nchnls = max(listOfSfchannelsInScore ) # use maximum number of channels for a corpus item elif channelRenderMethod in ["mix", "stereo", "targetoutputmix"]: nchnls = 2 # mono of stereo depending on corpus sf elif channelRenderMethod == "oneChannelPerVoice": nchnls = cpsLength elif channelRenderMethod == "oneChannelPerOverlap": nchnls = maxOverlaps + 1 elif channelRenderMethod == "oneChannelPerInstrument": nchnls = len(instruments.instruments) elif channelRenderMethod == "oneChannelPerClassification": nchnls = numberClasses else: util.error( "csdrenderer", "No known channel render method '%s'\n" % channelRenderMethod) if bits == 16: bitflag = '-s' elif bits == 24: bitflag = '-3' elif bits == 32: bitflag = '-f' fh = open(outputCsdPath, 'w') fh.write( '''<CsoundSynthesizer> <CsOptions> -o "%s" --format=%s %s --omacro:channelRenderMethod=0 --omacro:durationStretchMethod=0 --omacro:useTargetAmplitude=%i </CsOptions> <CsInstruments> sr = %i ksmps = %i nchnls = %i giNoteCounter init 1 gkTargetRms init 0. 0dbfs = 1 opcode getTargetDescriptorFromTable, k, i iftable xin kabstime timek kdesc table3 kabstime, iftable, 0 printks "ktime = %%.5f val = %%.5f\\n", 0.1, kabstime, kdesc xout kdesc endop opcode pvsbuffer_module, a, akkkiiii ain, kspeed, kbuflen, kscale, iFFTsize, ioverlap, iwinsize, iwintype xin kPhOffset = 0 ktrig changed kbuflen ibuflen init 1 kspeed init 1 kscale init 1 if ktrig==1 then reinit UPDATE endif UPDATE: ibuflen = i(kbuflen) iphasor ftgenonce 0, 0, 65536, 7, 0, 65536, 1 aread osciliktp kspeed/ibuflen, iphasor, kPhOffset kread downsamp aread kread = kread * ibuflen aFB init 0 f_anal pvsanal ain+aFB, iFFTsize, ioverlap, iwinsize, iwintype ibuffer,ktime pvsbuffer f_anal, ibuflen rireturn khandle init ibuffer f_buf pvsbufread kread , khandle f_scale pvscale f_buf, kscale aresyn pvsynth f_scale xout aresyn endop instr 1 iCpsSegDur = p3 iCpsAmpDb = p4 SCpsFile strget p5 iStartRead = p6 iTransposition = semitone(p7) iRmsAmp = p8 iPeakTime = p9 iEffDur = p10 iAttackTime = p11 iDecayTime = p12 iEnvSlope = p13 iCorpusIdx = p14 iInstrumentIdx = p15 iSimSelectNumb = p16 iTgtSegDur = p17 iTgtSegNumb = p18 iClassification = p19 SstretchCode strget p20 SchannelRenderType strget p21 print giNoteCounter ; used by audioguide for its printed progress bar iStrCmpResult strcmp SstretchCode, "transpose" if (iStrCmpResult == 0) then ; TAPE HEAD TIME STRETCHING (transposition change) istretch = iCpsSegDur/iTgtSegDur iCpsSegDur = iCpsSegDur * (1/istretch) iDecayTime = iDecayTime * (1/istretch) iTransposition = istretch ; overwrites any ag-supplied transposition ! p3 = iTgtSegDur endif ; do envelope if (iAttackTime == 0 && iDecayTime == 0) then aAmp init 1 elseif (iAttackTime == 0) then aAmp linseg 1, iCpsSegDur-iDecayTime, 1, iDecayTime, 0 elseif (iDecayTime == 0) then aAmp linseg 0, iAttackTime, 1, iCpsSegDur-iAttackTime, 1 else aAmp linseg 0, iAttackTime, 1, iCpsSegDur-iDecayTime-iAttackTime, 1, iDecayTime, 0 endif aAmp pow aAmp, iEnvSlope aAmp = aAmp * ampdbfs(iCpsAmpDb) asnd1 init 0 asnd2 init 0 asnd3 init 0 asnd4 init 0 ; get input sound for this corpus segment iFileChannels filenchnls SCpsFile if (iFileChannels == 1) then asnd1 diskin2 SCpsFile, iTransposition, iStartRead asnd1 = asnd1 * aAmp elseif (iFileChannels == 2) then asnd1, asnd2 diskin2 SCpsFile, iTransposition, iStartRead asnd1 = asnd1 * aAmp asnd2 = asnd2 * aAmp elseif (iFileChannels == 4) then asnd1, asnd2, asnd3, asnd4 diskin2 SCpsFile, iTransposition, iStartRead asnd1 = asnd1 * aAmp asnd2 = asnd2 * aAmp asnd3 = asnd3 * aAmp asnd4 = asnd4 * aAmp endif iStrCmpResult strcmp SstretchCode, "pv" if (iStrCmpResult == 0) then ; DO PHASE VOCODER TIME STRETCHING istretch = iCpsSegDur/iTgtSegDur iCpsSegDur = iCpsSegDur * (1/istretch) iDecayTime = iDecayTime * (1/istretch) p3 = iTgtSegDur kbuflen = 1 if (iFileChannels == 1) then asnd1 pvsbuffer_module asnd1, istretch, kbuflen, iTransposition, 1024, 256, 1024, 1 elseif (iFileChannels == 2) then asnd1 pvsbuffer_module asnd1, istretch, kbuflen, iTransposition, 1024, 256, 1024, 1 asnd2 pvsbuffer_module asnd2, istretch, kbuflen, iTransposition, 1024, 256, 1024, 1 elseif (iFileChannels == 4) then asnd1 pvsbuffer_module asnd1, istretch, kbuflen, iTransposition, 1024, 256, 1024, 1 asnd2 pvsbuffer_module asnd2, istretch, kbuflen, iTransposition, 1024, 256, 1024, 1 asnd3 pvsbuffer_module asnd3, istretch, kbuflen, iTransposition, 1024, 256, 1024, 1 asnd4 pvsbuffer_module asnd4, istretch, kbuflen, iTransposition, 1024, 256, 1024, 1 endif endif if ($useTargetAmplitude == 1) then krmscorpus rms (asnd1) kampscalar = gkTargetRms/krmscorpus kampscalar limit kampscalar, 0, 1 ; dont boost over 1.0 printks "tgt = %%.5f cps = %%.5f scalar = %%.5f\\n", 0.1, gkTargetRms, krmscorpus, kampscalar if (iFileChannels == 1) then asnd1 = asnd1 * kampscalar elseif (iFileChannels == 2) then asnd1 = asnd1 * kampscalar asnd2 = asnd2 * kampscalar elseif (iFileChannels == 4) then asnd1 = asnd1 * kampscalar asnd2 = asnd2 * kampscalar asnd3 = asnd3 * kampscalar asnd4 = asnd4 * kampscalar endif endif ; NEW write to file anull init 0 iStrCmpResult strcmp SchannelRenderType, "corpusmax" if (iStrCmpResult == 0) then if (iFileChannels == 1) then ; MONO SOUNDS go into ALL CHANNELS if (nchnls == 1) then out asnd1 elseif (nchnls == 2) then outs asnd1, asnd1 elseif (nchnls == 4) then outq asnd1, asnd1, asnd1, asnd1 endif elseif (iFileChannels == 2) then ; STEREO SOUNDS if (nchnls == 1) then out asnd1+asnd2 elseif (nchnls == 2) then outs asnd1, asnd2 elseif (nchnls == 4) then outq asnd1, asnd2, asnd1, asnd2 endif elseif (iFileChannels == 4) then; QUAD SOUNDS if (nchnls == 1) then out asnd1+asnd2+asnd3+asnd4 elseif (nchnls == 2) then outs asnd1+asnd2, asnd3+asnd4 elseif (nchnls == 4) then outq asnd1, asnd2, asnd3, asnd4 endif endif endif iStrCmpResult strcmp SchannelRenderType, "stereo" if (iStrCmpResult == 0) then if (iFileChannels == 1) then ; a MONO file outs asnd1, asnd1 elseif (iFileChannels == 2) then outs asnd1, asnd2 elseif (iFileChannels == 4) then outs asnd1+asnd2, asnd3+asnd4 ; hmmmmm.. endif endif iStrCmpResult strcmp SchannelRenderType, "oneChannelPerVoice" if (iStrCmpResult == 0) then if (iFileChannels == 1) then outch int(p14+1), asnd1 elseif (iFileChannels == 2) then outch int(p14+1), asnd1+asnd2 elseif (iFileChannels == 4) then outch int(p14+1), asnd1+asnd2+asnd3+asnd4 endif endif iStrCmpResult strcmp SchannelRenderType, "oneChannelPerOverlap" if (iStrCmpResult == 0) then if (iFileChannels == 1) then outch int(p16+1), asnd1 elseif (iFileChannels == 2) then outch int(p16+1), asnd1+asnd2 elseif (iFileChannels == 4) then outch int(p16+1), asnd1+asnd2+asnd3+asnd4 endif endif iStrCmpResult strcmp SchannelRenderType, "oneChannelPerClassification" if (iStrCmpResult == 0) then if (iFileChannels == 1) then outch int(iClassification+1), asnd1 elseif (iFileChannels == 2) then outch int(iClassification+1), asnd1+asnd2 elseif (iFileChannels == 4) then outch int(iClassification+1), asnd1+asnd2+asnd3+asnd4 endif endif iStrCmpResult strcmp SchannelRenderType, "oneChannelPerInstrument" if (iStrCmpResult == 0) then if (iFileChannels == 1) then outch int(p15+1), asnd1 elseif (iFileChannels == 2) then outch int(p15+1), asnd1+asnd2 elseif (iFileChannels == 4) then outch int(p15+1), asnd1+asnd2+asnd3+asnd4 endif endif iStrCmpResult strcmp SchannelRenderType, "targetoutputmix" if (iStrCmpResult == 0) then atmp init 0 if (iFileChannels == 1) then ; a MONO file outs atmp, asnd1 elseif (iFileChannels == 2) then outs atmp, asnd1+asnd2 elseif (iFileChannels == 4) then outs atmp, asnd1+asnd2+asnd3+asnd4 ; hmmmmm.. endif endif giNoteCounter = giNoteCounter+1 ; increment note counter endin instr 2 ; target sound iDur = p3 iScaleDb = p4 StgtFile strget p5 iStartRead = p6 iPlaySoundChn1Bool = p7 iFileChannels filenchnls StgtFile if (iFileChannels == 2) then ; STEREO asnd1, asnd2 diskin2 StgtFile, 1, iStartRead elseif (iFileChannels == 1) then ; MONO asnd1 diskin2 StgtFile, 1, iStartRead asnd2 = asnd1 ; equal balance between L and R endif gkTargetRms rms asnd1+asnd2 if (iPlaySoundChn1Bool == 1) then atmp init 0 outs (asnd1+asnd2)*ampdbfs(iScaleDb), atmp endif endin </CsInstruments> <CsScore> %s %s e </CsScore> </CsoundSynthesizer>''' % (outputSoundfilePath, os.path.splitext(outputSoundfilePath)[1][1:], bitflag, useTargetAmplitude, sr, kr, nchnls, instru2helpstring(), scoreText))
# import audioguide's submodules from audioguide import sfsegment, concatenativeclasses, simcalc, userinterface, util, descriptordata, anallinkage # import other modules import numpy as np import json ########################################### ## LOAD OPTIONS AND SETUP SDIF-INTERFACE ## ########################################### ops = concatenativeclasses.parseOptions(opsfile=opspath, defaults=defaultpath, scriptpath=os.path.dirname(__file__)) if 'concateMethod' not in ops.EXPERIMENTAL or ops.EXPERIMENTAL[ 'concateMethod'] != 'framebyframe': util.error( "CONFIG", "agConcatenateFrames.py only supports frame by frame concatenation, e.g. examples/07-concatenateframes.py." ) p = userinterface.printer(ops.VERBOSITY, os.path.dirname(__file__), ops.HTML_LOG_FILEPATH) p.printProgramInfo(audioguide.__version__) AnalInterface = ops.createAnalInterface(p) p.middleprint('EXPERIMENTAL FRAME-BASED CONCATENATION') ############ ## TARGET ## ############ p.logsection("TARGET") tgt = sfsegment.target(ops.TARGET, AnalInterface) tgt.initAnal(AnalInterface, ops, p) tgt.stageSegments(AnalInterface, ops, p)
def write_concatenate_output_files(self): dict_of_files_written = {} ########################################## ## sort self.outputEvents by start time ## ########################################## self.outputEvents.sort(key=lambda x: x.timeInScore) eventsBool = len(self.outputEvents) > 0 ########################### ## temporal quantization ## ########################### concatenativeclasses.quantizeTime( self.outputEvents, self.ops.OUTPUTEVENT_QUANTIZE_TIME_METHOD, float(self.ops.OUTPUTEVENT_QUANTIZE_TIME_INTERVAL), self.p) ################################## ## CORPUS OUTPUT CLASSIFICATION ## ################################## if eventsBool and self.ops.OUTPUTEVENT_CLASSIFY['numberClasses'] > 1: classifications = descriptordata.soundSegmentClassification( self.ops.OUTPUTEVENT_CLASSIFY['descriptors'], [oe.sfseghandle for oe in self.outputEvents], numbClasses=self.ops.OUTPUTEVENT_CLASSIFY['numberClasses']) for cidx, classified in enumerate(classifications): self.outputEvents[cidx].classification = int(classified) ######################################### ## target signal decomposition cleanup ## ######################################### if self.tgt.decompose != {}: for oeidx, oe in enumerate(self.outputEvents): self.outputEvents[oeidx].decomposedstream = int( oe.timeInScore / self.tgt.decompose['origduration']) oe.timeInScore = oe.timeInScore % self.tgt.decompose[ 'origduration'] self.tgt.filename = self.tgt.decompose['origfilename'] self.tgt.startSec = 0 self.tgt.endSec = self.tgt.decompose['origduration'] self.AnalInterface.rawData[self.tgt.decompose['origfilename']] = { 'info': { 'channels': 1, 'lengthsec': self.tgt.decompose['origduration'] } } ######################### ## CREATE OUTPUT FILES ## ######################### self.p.logsection("OUTPUT FILES") allusedcpsfiles = list(set([oe.filename for oe in self.outputEvents])) ################# ## BACH output ## ################# if self.ops.BACH_FILEPATH != None: self.instruments.write(self.ops.get_outputfile('BACH_FILEPATH'), self.tgt.segs, self.cps.data['vcToCorpusName'], self.outputEvents, self.ops.BACH_SLOTS_MAPPING, self.ops.BACH_DB_TO_VELOCITY_BREAKPOINTS, self.ops.BACH_TARGET_STAFF, self.ops.BACH_CORPUS_STAFF, addTarget=self.ops.BACH_INCLUDE_TARGET) dict_of_files_written['BACH_FILEPATH'] = self.ops.get_outputfile( 'BACH_FILEPATH') ################ ## AAF output ## ################ if self.ops.AAF_FILEPATH != None: import audioguide.fileoutput.aaf as aaf this_aaf = aaf.output(self.ops.get_outputfile('AAF_FILEPATH')) # add target ? if self.ops.AAF_INCLUDE_TARGET: this_aaf.addSoundfileResource( self.tgt.filename, self.AnalInterface.rawData[self.tgt.filename]['info']) sorted_tgt_tracks = concatenativeclasses.sortTargetSegmentsIntoTracks( self.tgt.segs, "minimum") this_aaf.add_tracks(sorted_tgt_tracks) # add selected corpus sounds for cpsfile in allusedcpsfiles: this_aaf.addSoundfileResource( cpsfile, self.AnalInterface.rawData[cpsfile]['info']) sorted_cps_tracks = concatenativeclasses.sortOutputEventsIntoTracks( self.outputEvents, self.ops.AAF_CPSTRACK_METHOD, self.cps.data['vcToCorpusName']) this_aaf.add_tracks(sorted_cps_tracks) this_aaf.done(self.ops.AAF_AUTOLAUNCH) dict_of_files_written['AAF_FILEPATH'] = self.ops.get_outputfile( 'AAF_FILEPATH') self.p.log("Wrote aaf file %s\n" % self.ops.get_outputfile('AAF_FILEPATH')) ################ ## RPP output ## ################ if self.ops.RPP_FILEPATH != None: import audioguide.fileoutput.reaper as rpp this_rpp = rpp.output(self.ops.get_outputfile('RPP_FILEPATH')) # add target? if self.ops.RPP_INCLUDE_TARGET: this_rpp.add_tracks( concatenativeclasses.sortTargetSegmentsIntoTracks( self.tgt.segs, "minimum")) # add selected corpus sounds this_rpp.add_tracks( concatenativeclasses.sortOutputEventsIntoTracks( self.outputEvents, self.ops.RPP_CPSTRACK_METHOD, self.cps.data['vcToCorpusName'])) this_rpp.write(self.ops.RPP_AUTOLAUNCH) dict_of_files_written['RPP_FILEPATH'] = self.ops.get_outputfile( 'RPP_FILEPATH') self.p.log("Wrote rpp file %s\n" % self.ops.get_outputfile('RPP_FILEPATH')) ###################### ## dict output file ## ###################### if self.ops.DICT_OUTPUT_FILEPATH != None: output = {} output['opsfilename'] = self.ops.ops_file_path output['opsfiledata'] = self.ops.opsfileAsString # make target segment dict list self.tgt.segs.sort(key=operator.attrgetter('segmentStartSec')) tgtSegDataList = [] for ts in self.tgt.segs: thisSeg = { 'startSec': ts.segmentStartSec, 'endSec': ts.segmentEndSec } thisSeg['power'] = ts.desc.get('power-seg') thisSeg['numberSelectedUnits'] = ts.numberSelectedUnits thisSeg['has_been_mixed'] = ts.has_been_mixed tgtSegDataList.append(thisSeg) # finish up output['target'] = { 'filename': self.tgt.filename, 'sfSkip': self.tgt.startSec, 'duration': self.tgt.endSec - self.tgt.startSec, 'segs': tgtSegDataList, 'fileduation': self.AnalInterface.rawData[ self.tgt.filename]['info']['lengthsec'], 'chn': self.AnalInterface.rawData[self.tgt.filename]['info'] ['channels'] } output['corpus_file_list'] = list(set(allusedcpsfiles)) output['selectedEvents'] = [ oe.makeDictOutput() for oe in self.outputEvents ] if eventsBool: output['outputparse'] = { 'simultaneousSelections': int( max([ d['simultaneousSelectionNumber'] for d in output['selectedEvents'] ]) + 1), 'classifications': max(self.ops.OUTPUTEVENT_CLASSIFY['numberClasses'], 1), 'corpusIds': int( max([ d['corpusIdNumber'] for d in output['selectedEvents'] ]) + 1) } fh = open(self.ops.get_outputfile('DICT_OUTPUT_FILEPATH'), 'w') json.dump(output, fh) fh.close() dict_of_files_written[ 'DICT_OUTPUT_FILEPATH'] = self.ops.get_outputfile( 'DICT_OUTPUT_FILEPATH') self.p.log("Wrote JSON dict file %s\n" % self.ops.get_outputfile('DICT_OUTPUT_FILEPATH')) ##################################### ## maxmsp list output pour gilbert ## ##################################### if self.ops.MAXMSP_OUTPUT_FILEPATH != None: output = {} output['target_file'] = [ self.tgt.filename, self.tgt.startSec * 1000., self.tgt.endSec * 1000. ] output['events'] = [ oe.makeMaxMspListOutput() for oe in self.outputEvents ] output['corpus_files'] = allusedcpsfiles fh = open(self.ops.get_outputfile('MAXMSP_OUTPUT_FILEPATH'), 'w') json.dump(output, fh) fh.close() dict_of_files_written[ 'MAXMSP_OUTPUT_FILEPATH'] = self.ops.get_outputfile( 'MAXMSP_OUTPUT_FILEPATH') self.p.log("Wrote MAX/MSP JSON lists to file %s\n" % self.ops.get_outputfile('MAXMSP_OUTPUT_FILEPATH')) ################################### ## superimpose label output file ## ################################### if self.ops.OUTPUT_LABEL_FILEPATH != None: fh = open(self.ops.get_outputfile('OUTPUT_LABEL_FILEPATH'), 'w') fh.write(''.join([oe.makeLabelText() for oe in self.outputEvents])) fh.close() dict_of_files_written[ 'OUTPUT_LABEL_FILEPATH'] = self.ops.get_outputfile( 'OUTPUT_LABEL_FILEPATH') self.p.log("Wrote superimposition label file %s\n" % self.ops.get_outputfile('OUTPUT_LABEL_FILEPATH')) ####################################### ## corpus segmented features as json ## ####################################### if self.ops.CORPUS_SEGMENTED_FEATURES_JSON_FILEPATH != None: fh = open( self.ops.get_outputfile( 'CORPUS_SEGMENTED_FEATURES_JSON_FILEPATH'), 'w') alldata = {} for c in self.cps.postLimitSegmentNormList: descs = {} for name, obj in c.desc.nameToObjMap.items(): if name.find('-seg') != -1: descs[name] = obj.get(0, c.desc.len) alldata[(c.filename + '@' + str(c.segmentStartSec))] = descs json.dump(alldata, fh) fh.close() dict_of_files_written[ 'CORPUS_SEGMENTED_FEATURES_JSON_FILEPATH'] = self.ops.get_outputfile( 'CORPUS_SEGMENTED_FEATURES_JSON_FILEPATH') self.p.log("Wrote corpus segmented features file %s\n" % self.ops.get_outputfile( 'CORPUS_SEGMENTED_FEATURES_JSON_FILEPATH')) ###################### ## lisp output file ## ###################### if self.ops.LISP_OUTPUT_FILEPATH != None: fh = open(self.ops.get_outputfile('LISP_OUTPUT_FILEPATH'), 'w') fh.write('(' + ''.join([oe.makeLispText() for oe in self.outputEvents]) + ')') fh.close() dict_of_files_written[ 'LISP_OUTPUT_FILEPATH'] = self.ops.get_outputfile( 'LISP_OUTPUT_FILEPATH') self.p.log("Wrote lisp output file %s\n" % self.ops.get_outputfile('LISP_OUTPUT_FILEPATH')) ######################################## ## data from segmentation file output ## ######################################## if self.ops.DATA_FROM_SEGMENTATION_FILEPATH != None: fh = open( self.ops.get_outputfile('DATA_FROM_SEGMENTATION_FILEPATH'), 'w') for line in [ oe.makeSegmentationDataText() for oe in self.outputEvents ]: fh.write(line) fh.close() dict_of_files_written[ 'DATA_FROM_SEGMENTATION_FILEPATH'] = self.ops.get_outputfile( 'DATA_FROM_SEGMENTATION_FILEPATH') self.p.log( "Wrote data from segmentation file to textfile %s\n" % self.ops.get_outputfile('DATA_FROM_SEGMENTATION_FILEPATH')) ############################ ## csound CSD output file ## ############################ if eventsBool and self.ops.CSOUND_CSD_FILEPATH != None and self.ops.CSOUND_RENDER_FILEPATH != None: from audioguide.fileoutput import csoundinterface as csd maxOverlaps = np.max([oe.simSelects for oe in self.outputEvents]) csSco = 'i2 0. %f %f "%s" %f %i\n\n' % ( self.tgt.endSec - self.tgt.startSec, self.tgt.whole.envDb, self.tgt.filename, self.tgt.startSec, int(self.ops.CSOUND_CHANNEL_RENDER_METHOD == 'targetoutputmix') ) # just in case that there are negative p2 times! minTime = min([oe.timeInScore for oe in self.outputEvents]) if minTime < 0: for oe in self.outputEvents: oe.timeInScore -= minTime csSco += ''.join([ oe.makeCsoundOutputText(self.ops.CSOUND_CHANNEL_RENDER_METHOD) for oe in self.outputEvents ]) csd.makeConcatenationCsdFile( self.ops.get_outputfile('CSOUND_CSD_FILEPATH', valid_extensions=['.csd']), self.ops.get_outputfile( 'CSOUND_RENDER_FILEPATH', valid_extensions=['.wav', '.aif', '.aiff']), self.ops.CSOUND_CHANNEL_RENDER_METHOD, self.ops.CSOUND_SR, self.ops.CSOUND_KSMPS, csSco, self.cps.len, set([oe.sfchnls for oe in self.outputEvents]), maxOverlaps, self.instruments, self.ops.OUTPUTEVENT_CLASSIFY['numberClasses'], bits=self.ops.CSOUND_BITS) dict_of_files_written[ 'CSOUND_CSD_FILEPATH'] = self.ops.get_outputfile( 'CSOUND_CSD_FILEPATH') self.p.log("Wrote csound csd file %s\n" % self.ops.get_outputfile('CSOUND_CSD_FILEPATH')) if self.ops.CSOUND_RENDER_FILEPATH != None: csd.render(self.ops.get_outputfile('CSOUND_CSD_FILEPATH', valid_extensions=['.csd']), len(self.outputEvents), printerobj=self.p) self.p.log("Rendered csound soundfile output %s\n" % self.ops.get_outputfile('CSOUND_RENDER_FILEPATH')) dict_of_files_written[ 'CSOUND_RENDER_FILEPATH'] = self.ops.get_outputfile( 'CSOUND_RENDER_FILEPATH') if self.ops.CSOUND_NORMALIZE: csd.normalize( self.ops.get_outputfile('CSOUND_RENDER_FILEPATH'), db=self.ops.CSOUND_NORMALIZE_PEAK_DB) ################################ ## csound simple score output ## ################################ if eventsBool and self.ops.CSOUND_SCORE_FILEPATH != None: from audioguide.fileoutput import csoundinterface as csd csSco = csd.instru2helpstring() + '\n' csSco += ''.join([ oe.makeCsoundOutputText(self.ops.CSOUND_CHANNEL_RENDER_METHOD) for oe in self.outputEvents ]) fh = open(self.ops.get_outputfile('CSOUND_SCORE_FILEPATH'), 'w') fh.write(csSco) fh.close() ####################### ## copy options file ## ####################### if self.ops.COPY_OPTIONS_FILEPATH != None and self.ops.ops_file_path != None: import shutil shutil.copy(self.ops.ops_file_path, self.ops.get_outputfile('COPY_OPTIONS_FILEPATH')) dict_of_files_written[ 'COPY_OPTIONS_FILEPATH'] = self.ops.get_outputfile( 'COPY_OPTIONS_FILEPATH') #################### ## close log file ## #################### if self.ops.HTML_LOG_FILEPATH != None: self.p.writehtmllog( self.ops.get_outputfile('HTML_LOG_FILEPATH', valid_extensions=['.html'])) dict_of_files_written[ 'HTML_LOG_FILEPATH'] = self.ops.get_outputfile( 'HTML_LOG_FILEPATH') ####################################### ## check for lack of selected events ## ####################################### if not eventsBool and self.ops.HTML_LOG_FILEPATH != None: util.error( 'CONCATENATION', "No segments were selected during concatenation. Check the log file %s for details." % self.ops.HTML_LOG_FILEPATH) elif not eventsBool: util.error( 'CONCATENATION', 'No segments were selected during concatenation. Try enabling the LOG file output to help figure out why. Use HTML_LOG_FILEPATH="valid/path"' ) if eventsBool and self.ops.CSOUND_CSD_FILEPATH != None and self.ops.CSOUND_RENDER_FILEPATH != None and self.ops.CSOUND_PLAY_RENDERED_FILE: csd.playFile(self.ops.get_outputfile('CSOUND_RENDER_FILEPATH')) return dict_of_files_written
sys.path.append(libpath) # import audioguide's submodules from audioguide import sfsegment, concatenativeclasses, simcalc, userinterface, util, descriptordata, anallinkage, musicalwriting # import other modules import numpy as np import json ########################################### ## LOAD OPTIONS AND SETUP SDIF-INTERFACE ## ########################################### ops = concatenativeclasses.parseOptions(opsfile=opspath, defaults=defaultpath, scriptpath=os.path.dirname(__file__)) if 'concateMethod' in ops.EXPERIMENTAL and ops.EXPERIMENTAL['concateMethod'] == 'framebyframe': util.error("CONFIG", "Frame by frame concatenation is only possible with the agConcatenateFrames.py script.") p = userinterface.printer(ops.VERBOSITY, os.path.dirname(__file__), ops.HTML_LOG_FILEPATH) p.printProgramInfo(audioguide.__version__) AnalInterface = ops.createAnalInterface(p) p.middleprint('SOUNDFILE CONCATENATION') ############ ## TARGET ## ############ p.logsection( "TARGET" ) tgt = sfsegment.target(ops.TARGET, AnalInterface) tgt.initAnal(AnalInterface, ops, p) tgt.stageSegments(AnalInterface, ops, p)