Пример #1
0
def main(input_filename, output_filename, break_filename, break_parts, measures, mix):
    audiofile = audio.LocalAudioFile(input_filename)
    sample_rate = audiofile.sampleRate
    breakfile = audio.LocalAudioFile(break_filename)
    if breakfile.numChannels == 1:
        breakfile = mono_to_stereo(breakfile)
    num_channels = audiofile.numChannels
    drum_data = split_break(breakfile, break_parts)
    hits_per_beat = int(break_parts / (4 * measures))
    bars = audiofile.analysis.bars
    out_shape = (len(audiofile) + 100000, num_channels)
    out = audio.AudioData(shape=out_shape, sampleRate=sample_rate, numChannels=num_channels)
    if not bars:
        print "Didn't find any bars in this analysis!"
        print "No output."
        sys.exit(-1)
    for bar in bars[:-1]:
        beats = bar.children()
        for i in range(len(beats)):
            try:
                break_index = ((bar.local_context()[0] % measures) * 4) + (i % 4)
            except ValueError:
                break_index = i % 4
            tats = range((break_index) * hits_per_beat, (break_index + 1) * hits_per_beat)
            drum_samps = sum([len(drum_data[x]) for x in tats])
            beat_samps = len(audiofile[beats[i]])
            beat_shape = (beat_samps, num_channels)
            tat_shape = (float(beat_samps / hits_per_beat), num_channels)
            beat_data = audio.AudioData(shape=beat_shape, sampleRate=sample_rate, numChannels=num_channels)
            for j in tats:
                tat_data = audio.AudioData(shape=tat_shape, sampleRate=sample_rate, numChannels=num_channels)
                if drum_samps > beat_samps / hits_per_beat:
                    # truncate drum hits to fit beat length
                    tat_data.data = drum_data[j].data[: len(tat_data)]
                elif drum_samps < beat_samps / hits_per_beat:
                    # space out drum hits to fit beat length
                    # temp_data = add_fade_out(drum_data[j])
                    tat_data.append(drum_data[j])
                tat_data.endindex = len(tat_data)
                beat_data.append(tat_data)
                del (tat_data)
            # account for rounding errors
            beat_data.endindex = len(beat_data)
            mixed_beat = audio.mix(beat_data, audiofile[beats[i]], mix=mix)
            del (beat_data)
            out.append(mixed_beat)
    finale = bars[-1].start + bars[-1].duration
    last = audio.AudioQuantum(
        audiofile.analysis.bars[-1].start, audiofile.analysis.duration - audiofile.analysis.bars[-1].start
    )
    last_data = audio.getpieces(audiofile, [last])
    out.append(last_data)
    out.encode(output_filename)
Пример #2
0
def main(username):
  auds=[]
  p = re.compile("mix")
  files = os.listdir("f")
  for file in files:
    m = p.findall(file)
    if m:
      print "processing f/"+file
      try:
        auds.append(audio.LocalAudioFile("f/"+file))
      except:
        print "failed to include "+file
  
  auds.sort(key=keysig)
  
  mixed = []
  end = None
  previous = None
  for aud in auds:
    bars = aud.analysis.bars
    try:
      if end != None and previous != None:
        mix = audio.mix(audio.getpieces(previous, [end]), audio.getpieces(aud, [bars[0]]), 0.5)
        mixed.append(mix)
      else:
        mixed.append(audio.getpieces(aud, [bars[0]]))
    except:
      print "failed to create mix bar"

    try:
      mixed.append(audio.getpieces(aud, bars[1:-5]))
      end = bars[-5]
      previous = aud
    except:
      print "unable to append bars"

  out = audio.assemble(mixed, numChannels=2)
  out.encode(username+".mp3")
Пример #3
0
            else:
                silence_shape = (reference_data.endindex, )
            new_segment = audio.AudioData(shape=silence_shape,
                                          sampleRate=out.sampleRate,
                                          numChannels=segment_data.numChannels)
            new_segment.append(segment_data)
            new_segment.endindex = len(new_segment)
            segment_data = new_segment
        elif reference_data.endindex < segment_data.endindex:
            # we need to cut segment2 shorter, because segment2 is shorter
            index = slice(0, int(reference_data.endindex), 1)
            segment_data = audio.AudioData(None,
                                           segment_data.data[index],
                                           sampleRate=segment_data.sampleRate)

        mixed_data = audio.mix(segment_data, reference_data, mix=mix)
        out.append(mixed_data)

out.encode(outputFilename)
"""
RENDER TO FILES:

#this code makes segclusters, which is a list of each cluster of segments. 
segclusters = [[]]*num_clusters
for s in range(0, len(idx)):
	segment = song.analysis.segments[s]
	cluster_number = idx[s]
	outs[cluster_number].append(song[segment])


# render each out to files
Пример #4
0
	
	# This is the add silence solution:
	if add_silence: 
		if reference_data.endindex > segment_data.endindex:
			# we need to add silence, because segment1 is longer
			if num_channels > 1:
				silence_shape = (reference_data.endindex,num_channels)
			else:
				silence_shape = (reference_data.endindex,)
			new_segment = audio.AudioData(shape=silence_shape,
									sampleRate=out.sampleRate,
									numChannels=segment_data.numChannels)
			new_segment.append(segment_data)
			new_segment.endindex = len(new_segment)
			segment_data = new_segment
		elif reference_data.endindex < segment_data.endindex:
			# we need to cut segment2 shorter, because segment2 is shorter
			index = slice(0, int(reference_data.endindex), 1)
			segment_data = audio.AudioData(None, segment_data.data[index], sampleRate=segment_data.sampleRate)
	else: 		  
		# TODO: stretch samples to fit.
		# haven't written this part yet.
		segment_data = segment_data

	# mix the original and the remix
	mixed_data = audio.mix(segment_data,reference_data,mix=mix)
	out.append(mixed_data)

# redner output
out.encode(outputFilename)
Пример #5
0
 def run(self, mix=0.5, envelope=False):
     dur = len(self.input_a.data) + 100000 # another two seconds
     # determine shape of new array
     if len(self.input_a.data.shape) > 1:
         new_shape = (dur, self.input_a.data.shape[1])
         new_channels = self.input_a.data.shape[1]
     else:
         new_shape = (dur,)
         new_channels = 1
     out = audio.AudioData(shape=new_shape,
                         sampleRate=self.input_b.sampleRate,
                         numChannels=new_channels)
     for a in self.segs_a:
         seg_index = a.absolute_context()[0]
         # find best match from segs in B
         distance_matrix = self.calculate_distances(a)
         distances = [numpy.sqrt(x[0]+x[1]+x[2]) for x in distance_matrix]
         match = self.segs_b[distances.index(min(distances))]
         segment_data = self.input_b[match]
         reference_data = self.input_a[a]
         if segment_data.endindex < reference_data.endindex:
             if new_channels > 1:
                 silence_shape = (reference_data.endindex,new_channels)
             else:
                 silence_shape = (reference_data.endindex,)
             new_segment = audio.AudioData(shape=silence_shape,
                                     sampleRate=out.sampleRate,
                                     numChannels=segment_data.numChannels)
             new_segment.append(segment_data)
             new_segment.endindex = len(new_segment)
             segment_data = new_segment
         elif segment_data.endindex > reference_data.endindex:
             index = slice(0, int(reference_data.endindex), 1)
             segment_data = audio.AudioData(None,segment_data.data[index],
                                     sampleRate=segment_data.sampleRate)
         if envelope:
             # db -> voltage ratio http://www.mogami.com/e/cad/db.html
             linear_max_volume = pow(10.0,a.loudness_max/20.0)
             linear_start_volume = pow(10.0,a.loudness_begin/20.0)
             if(seg_index == len(self.segs_a)-1): # if this is the last segment
                 linear_next_start_volume = 0
             else:
                 linear_next_start_volume = pow(10.0,self.segs_a[seg_index+1].loudness_begin/20.0)
                 pass
             when_max_volume = a.time_loudness_max
             # Count # of ticks I wait doing volume ramp so I can fix up rounding errors later.
             ss = 0
             # Set volume of this segment. Start at the start volume, ramp up to the max volume , then ramp back down to the next start volume.
             cur_vol = float(linear_start_volume)
             # Do the ramp up to max from start
             samps_to_max_loudness_from_here = int(segment_data.sampleRate * when_max_volume)
             if(samps_to_max_loudness_from_here > 0):
                 how_much_volume_to_increase_per_samp = float(linear_max_volume - linear_start_volume)/float(samps_to_max_loudness_from_here)
                 for samps in xrange(samps_to_max_loudness_from_here):
                     try:
                         segment_data.data[ss] *= cur_vol
                     except IndexError:
                         pass
                     cur_vol = cur_vol + how_much_volume_to_increase_per_samp
                     ss = ss + 1
             # Now ramp down from max to start of next seg
             samps_to_next_segment_from_here = int(segment_data.sampleRate * (a.duration-when_max_volume))
             if(samps_to_next_segment_from_here > 0):
                 how_much_volume_to_decrease_per_samp = float(linear_max_volume - linear_next_start_volume)/float(samps_to_next_segment_from_here)
                 for samps in xrange(samps_to_next_segment_from_here):
                     cur_vol = cur_vol - how_much_volume_to_decrease_per_samp
                     try:
                         segment_data.data[ss] *= cur_vol
                     except IndexError:
                         pass
                     ss = ss + 1
         mixed_data = audio.mix(segment_data,reference_data,mix=mix)
         out.append(mixed_data)
     out.encode(self.output_filename)
Пример #6
0
def main(input_filename, output_filename, break_filename, break_parts,
         measures, mix):

    # This takes the input tracks, sends them to the analyzer, and returns the results.
    audiofile = audio.LocalAudioFile(input_filename)
    sample_rate = audiofile.sampleRate
    breakfile = audio.LocalAudioFile(break_filename)

    # This converts the break to stereo, if it is mono
    if breakfile.numChannels == 1:
        breakfile = mono_to_stereo(breakfile)

    # This gets the number of channels in the main file
    num_channels = audiofile.numChannels

    # This splits the break into each beat
    drum_data = split_break(breakfile, break_parts)
    hits_per_beat = int(break_parts / (4 * measures))
    # This gets the bars from the input track
    bars = audiofile.analysis.bars

    # This creates the 'shape' of new array.
    # (Shape is a tuple (x, y) that indicates the length per channel of the audio file)
    out_shape = (len(audiofile) + 100000, num_channels)
    # This creates a new AudioData array to write data to
    out = audio.AudioData(shape=out_shape,
                          sampleRate=sample_rate,
                          numChannels=num_channels)
    if not bars:
        # If the analysis can't find any bars, stop!
        # (This might happen with really ambient music)
        print "Didn't find any bars in this analysis!"
        print "No output."
        sys.exit(-1)

    # This is where the magic happens:
    # For every beat in every bar except the last bar,
    # map the tatums of the break to the tatums of the beat
    for bar in bars[:-1]:
        # This gets the beats in the bar, and loops over them
        beats = bar.children()
        for i in range(len(beats)):
            # This gets the index of matching beat in the break
            try:
                break_index = ((bar.local_context()[0] %\
                                measures) * 4) + (i % 4)
            except ValueError:
                break_index = i % 4
            # This gets the tatums from the beat of the break
            tats = range((break_index) * hits_per_beat,
                         (break_index + 1) * hits_per_beat)
            # This gets the number of samples in each tatum
            drum_samps = sum([len(drum_data[x]) for x in tats])

            # This gets the number of sample and the shape of the beat from the original track
            beat_samps = len(audiofile[beats[i]])
            beat_shape = (beat_samps, num_channels)

            # This get the shape of each tatum
            tat_shape = (float(beat_samps / hits_per_beat), num_channels)

            # This creates the new AudioData that will be filled with chunks of the drum break
            beat_data = audio.AudioData(shape=beat_shape,
                                        sampleRate=sample_rate,
                                        numChannels=num_channels)
            for j in tats:
                # This creates an audioData for each tatum
                tat_data = audio.AudioData(shape=tat_shape,
                                           sampleRate=sample_rate,
                                           numChannels=num_channels)
                # This corrects for length / timing:
                # If the original is shorter than the break, truncate drum hits to fit beat length
                if drum_samps > beat_samps / hits_per_beat:
                    tat_data.data = drum_data[j].data[:len(tat_data)]
                # If the original is longer, space out drum hits to fit beat length
                elif drum_samps < beat_samps / hits_per_beat:
                    tat_data.append(drum_data[j])

                # This adds each new tatum to the new beat.
                tat_data.endindex = len(tat_data)
                beat_data.append(tat_data)
                del (tat_data)

            # This corrects for rounding errors
            beat_data.endindex = len(beat_data)

            # This mixes the new beat data with the input data, and appends it to the final file
            mixed_beat = audio.mix(beat_data, audiofile[beats[i]], mix=mix)
            del (beat_data)
            out.append(mixed_beat)

    # This works out the last beat and appends it to the final file
    finale = bars[-1].start + bars[-1].duration
    last = audio.AudioQuantum(
        audiofile.analysis.bars[-1].start,
        audiofile.analysis.duration - audiofile.analysis.bars[-1].start)
    last_data = audio.getpieces(audiofile, [last])
    out.append(last_data)

    # This writes the newly created audio to the given file.
    out.encode(output_filename)
Пример #7
0
    def compileSection(self, j, section, hats):
        """
            Compiles one "section" of dubstep - that is, one section (verse/chorus) of the original song,
            but appropriately remixed as dubstep.

            Chooses appropriate samples from the section of the original song in three keys (P1, m3, m7)
            then plays them back in order in the generic "dubstep" pattern (all 8th notes):

            |                         |                         :|
            |: 1  1  1  1  1  1  1  1 | m3 m3 m3 m3 m7 m7 m7 m7 :| x2
            |                         |                         :|

            On the first iteration, the dubstep bar is mixed with a "splash" sound - high-passed percussion or whatnot.
            On the second iteration, hats are mixed in on the offbeats and the wubs break on the last beat to let the
            original song's samples shine through for a second, before dropping back down in the next section.

            If samples are missing of one pitch, the searchSamples algorithm tries to find samples
            a fifth from that pitch that will sound good. (If none exist, it keeps trying, in fifths up the scale.)
            
            If the song is not 4/4, the resulting remix is sped up or slowed down by the appropriate amount.
            (That can get really wonky, but sounds cool sometimes, and fixes a handful of edge cases.)
        """
        onebar = audio.AudioQuantumList()

        s1 = self.searchSamples(j, self.tonic)
        s2 = self.searchSamples(j, (self.tonic + 3) % 12)
        s3 = self.searchSamples(j, (self.tonic + 9) % 12)

        biggest = max([s1, s2, s3])  #for music that's barely tonal
        if not biggest:
            for i in xrange(0, 12):
                biggest = self.searchSamples(j, self.tonic + i)
                if biggest:
                    break

        if not biggest:
            raise Exception('Missing samples in section %s of the song!' % j +
                            1)

        if not s1: s1 = biggest
        if not s2: s2 = biggest
        if not s3: s3 = biggest

        if self.template['target'] == "tatums":
            f = 4
            r = 2
        elif self.template['target'] == "beats":
            f = 2
            r = 2
        elif self.template['target'] == "bars":
            f = 1
            r = 1
        for k in xrange(0, r):
            for i in xrange(0, 4 * f):
                onebar.append(s1[i % len(s1)])
            for i in xrange(4 * f, 6 * f):
                onebar.append(s2[i % len(s2)])
            for i in xrange(6 * f, 8 * f):
                onebar.append(s3[i % len(s3)])
        if self.original.analysis.time_signature == 4:
            orig_bar = self.st.shiftTempo(
                audio.getpieces(self.original, onebar),
                self.template['tempo'] / self.tempo)
        else:
            orig_bar = audio.getpieces(self.original, onebar)
            orig_bar = self.st.shiftTempo(
                orig_bar,
                len(orig_bar) /
                ((44100 * 16 * 2 * 60.0) / self.template['tempo']))
        if orig_bar.numChannels == 1:
            orig_bar = self.mono_to_stereo(orig_bar)
        mixfactor = self.mixfactor(onebar)
        a = self.truncatemix(
            audio.mix(
                audio.AudioData(self.sample_path +
                                self.template['wubs'][self.tonic],
                                sampleRate=44100,
                                numChannels=2,
                                verbose=False),
                audio.AudioData(
                    self.sample_path +
                    self.template['splashes'][(j + 1) %
                                              len(self.template['splashes'])],
                    sampleRate=44100,
                    numChannels=2,
                    verbose=False)), orig_bar, mixfactor)
        b = self.truncatemix(
            audio.mix(
                audio.AudioData(self.sample_path +
                                self.template['wub_breaks'][self.tonic],
                                sampleRate=44100,
                                numChannels=2,
                                verbose=False), hats), orig_bar, mixfactor)
        return (a, b)
Пример #8
0
    def compileSection(self, j, section, hats):
        """
            Compiles one "section" of dubstep - that is, one section (verse/chorus) of the original song,
            but appropriately remixed as dubstep.

            Chooses appropriate samples from the section of the original song in three keys (P1, m3, m7)
            then plays them back in order in the generic "dubstep" pattern (all 8th notes):

            |                         |                         :|
            |: 1  1  1  1  1  1  1  1 | m3 m3 m3 m3 m7 m7 m7 m7 :| x2
            |                         |                         :|

            On the first iteration, the dubstep bar is mixed with a "splash" sound - high-passed percussion or whatnot.
            On the second iteration, hats are mixed in on the offbeats and the wubs break on the last beat to let the
            original song's samples shine through for a second, before dropping back down in the next section.

            If samples are missing of one pitch, the searchSamples algorithm tries to find samples
            a fifth from that pitch that will sound good. (If none exist, it keeps trying, in fifths up the scale.)
            
            If the song is not 4/4, the resulting remix is sped up or slowed down by the appropriate amount.
            (That can get really wonky, but sounds cool sometimes, and fixes a handful of edge cases.)
        """
        onebar = audio.AudioQuantumList()

        s1 = self.searchSamples(j, self.tonic)
        s2 = self.searchSamples(j, (self.tonic + 3) % 12)
        s3 = self.searchSamples(j, (self.tonic + 9) % 12)

        biggest = max([s1, s2, s3]) #for music that's barely tonal
        if not biggest:
            for i in xrange(0, 12):
                biggest = self.searchSamples(j, self.tonic + i)
                if biggest:
                    break

        if not biggest:
            raise Exception('Missing samples in section %s of the song!' % j+1)

        if not s1: s1 = biggest
        if not s2: s2 = biggest
        if not s3: s3 = biggest

        if self.template['target'] == "tatums":
            f = 4
            r = 2
        elif self.template['target'] == "beats":
            f = 2
            r = 2
        elif self.template['target'] == "bars":
            f = 1
            r = 1
        for k in xrange(0, r):
            for i in xrange(0, 4*f):
                onebar.append(s1[i % len(s1)])
            for i in xrange(4*f, 6*f):
                onebar.append( s2[i % len(s2)] )
            for i in xrange(6*f, 8*f):
                onebar.append( s3[i % len(s3)] )
        if self.original.analysis.time_signature == 4:
            orig_bar = self.st.shiftTempo(audio.getpieces(self.original, onebar), self.template['tempo']/self.tempo)
        else:
            orig_bar = audio.getpieces(self.original, onebar)
            orig_bar = self.st.shiftTempo(orig_bar, len(orig_bar) / ((44100 * 16 * 2 * 60.0)/self.template['tempo']))
        if orig_bar.numChannels == 1:
            orig_bar = self.mono_to_stereo(orig_bar)
        mixfactor = self.mixfactor(onebar)
        a = self.truncatemix(
                audio.mix(
                    audio.AudioData(
                        self.sample_path + self.template['wubs'][self.tonic], 
                        sampleRate=44100,
                        numChannels=2,
                        verbose=False
                    ),
                    audio.AudioData(
                        self.sample_path + self.template['splashes'][(j+1) % len(self.template['splashes'])],
                        sampleRate=44100,
                        numChannels=2,
                        verbose=False
                    )
                ),
            orig_bar,
            mixfactor
        )
        b = self.truncatemix(
                audio.mix(
                    audio.AudioData(
                        self.sample_path + self.template['wub_breaks'][self.tonic],
                        sampleRate=44100,
                        numChannels=2,
                        verbose=False
                    ),
                    hats
                ),
            orig_bar,
            mixfactor
        )
        return (a, b)
Пример #9
0
    def run(self, mix=0.5, envelope=False):
        # This chunk creates a new array of AudioData to put the resulting resynthesis in:

        # Add two seconds to the length, just in case
        dur = len(self.input_a.data) + 100000 

        # This determines the 'shape' of new array.
        # (Shape is a tuple (x, y) that indicates the length per channel of the audio file)
        # If we have a stereo shape, copy that shape
        if len(self.input_a.data.shape) > 1:
            new_shape = (dur, self.input_a.data.shape[1])
            new_channels = self.input_a.data.shape[1]
        # If not, make a mono shape
        else:
            new_shape = (dur,)
            new_channels = 1
        # This creates the new AudioData array, based on the new shape
        out = audio.AudioData(shape=new_shape,
                            sampleRate=self.input_b.sampleRate,
                            numChannels=new_channels)

        # Now that we have a properly formed array to put chunks of audio in, 
        # we can start deciding what chunks to put in!

        # This loops over each segment in file A and finds the best matching segment from file B
        for a in self.segs_a:
            seg_index = a.absolute_context()[0]

            # This works out the distances
            distance_matrix = self.calculate_distances(a)
            distances = [numpy.sqrt(x[0]+x[1]+x[2]) for x in distance_matrix]

            # This gets the best match
            match = self.segs_b[distances.index(min(distances))]
            segment_data = self.input_b[match]
            reference_data = self.input_a[a]

            # This corrects for length:  if our new segment is shorter, we add silence
            if segment_data.endindex < reference_data.endindex:
                if new_channels > 1:
                    silence_shape = (reference_data.endindex,new_channels)
                else:
                    silence_shape = (reference_data.endindex,)
                new_segment = audio.AudioData(shape=silence_shape,
                                        sampleRate=out.sampleRate,
                                        numChannels=segment_data.numChannels)
                new_segment.append(segment_data)
                new_segment.endindex = len(new_segment)
                segment_data = new_segment

            # Or, if our new segment is too long, we make it shorter
            elif segment_data.endindex > reference_data.endindex:
                index = slice(0, int(reference_data.endindex), 1)
                segment_data = audio.AudioData(None,segment_data.data[index],
                                        sampleRate=segment_data.sampleRate)
            
            # This applies the volume envelopes from each segment of A to the segment from B.
            if envelope:
                # This gets the maximum volume and starting volume for the segment from A:
                # db -> voltage ratio http://www.mogami.com/e/cad/db.html
                linear_max_volume = pow(10.0,a.loudness_max/20.0)
                linear_start_volume = pow(10.0,a.loudness_begin/20.0)
        
                # This gets the starting volume for the next segment
                if(seg_index == len(self.segs_a)-1): # If this is the last segment, the next volume is zero
                    linear_next_start_volume = 0
                else:
                    linear_next_start_volume = pow(10.0,self.segs_a[seg_index+1].loudness_begin/20.0)
                    pass

                # This gets when the maximum volume occurs in A
                when_max_volume = a.time_loudness_max

                # Count # of ticks I wait doing volume ramp so I can fix up rounding errors later.
                ss = 0
                # This sets the starting volume volume of this segment. 
                cur_vol = float(linear_start_volume)
                # This  ramps up to the maximum volume from start
                samps_to_max_loudness_from_here = int(segment_data.sampleRate * when_max_volume)
                if(samps_to_max_loudness_from_here > 0):
                    how_much_volume_to_increase_per_samp = float(linear_max_volume - linear_start_volume)/float(samps_to_max_loudness_from_here)
                    for samps in xrange(samps_to_max_loudness_from_here):
                        try:
                            # This actally applies the volume modification
                            segment_data.data[ss] *= cur_vol
                        except IndexError:
                            pass
                        cur_vol = cur_vol + how_much_volume_to_increase_per_samp
                        ss = ss + 1
                # This ramp down to the volume for the start of the next segent
                samps_to_next_segment_from_here = int(segment_data.sampleRate * (a.duration-when_max_volume))
                if(samps_to_next_segment_from_here > 0):
                    how_much_volume_to_decrease_per_samp = float(linear_max_volume - linear_next_start_volume)/float(samps_to_next_segment_from_here)
                    for samps in xrange(samps_to_next_segment_from_here):
                        cur_vol = cur_vol - how_much_volume_to_decrease_per_samp
                        try:
                            # This actally applies the volume modification
                            segment_data.data[ss] *= cur_vol
                        except IndexError:
                            pass
                        ss = ss + 1
            
            # This mixes the segment from B with the segment from A, and adds it to the output
            mixed_data = audio.mix(segment_data,reference_data,mix=mix)
            out.append(mixed_data)

        # This writes the newly created audio to the given file.  Phew!
        out.encode(self.output_filename)
Пример #10
0
def dnbify(randombeat):
	print "dnbify"
	
	dnbfile = "mp3/breaks/RC4_Breakbeat_175 (%i).mp3" % randombeat
	dnbloop = audio.LocalAudioFile(dnbfile)
	
	# how many different groups will we cluster our data into?
	num_clusters = 5

	mix = 1.0
		
	dnbouts = []
	for layer in range(0, 2):
		# best_match = 1  # slower, less varied version. Good for b's which are percussion loops
		# best_match = 0 # faster, more varied version, picks a random segment from that cluster. Good for b's which are sample salads. 
		best_match = layer
		print "layer"
		print layer
		
		song1 = dnbloop
		song2 = song
		
		dnbout = audio.AudioData(shape=out_shape, sampleRate=sample_rate,numChannels=num_channels)
		
		# here we just grab the segments that overlap the section
		sectionsegments = song1.analysis.segments
		#for _ in range(3):
		#	sectionsegments.extend(song1.analysis.segments)
		sectionsegments2 = song2.analysis.segments #.that(overlap(section));
		

		# this is just too easy
		# song.analysis.segments.timbre is a list of all 12-valued timbre vectors. 
		# must be converted to a numpy.array() so that kmeans(data, n) is happy
		data = array(sectionsegments.timbre)
		data2 = array(sectionsegments2.timbre)
		
		"""
		# grab timbre data
		# must be converted to a numpy.array() so that kmeans(data, n) is happy
		data = array(song1.analysis.segments.timbre)
		data2 = array(song2.analysis.segments.timbre)
		"""
		
		
		# computing K-Means with k = num_clusters
		centroids,_ = kmeans(data,num_clusters)
		centroids2,_ = kmeans(data2,num_clusters)
		# assign each sample to a cluster
		idx,_ = vq(data,centroids)
		idx2,_ = vq(data2,centroids2)
		
		collection = []
		for c in range(0, num_clusters):
			ccount = 0
			for i in idx:
				if i==c: 
					ccount += 1
			collection.append([ccount, c])
		collection.sort()
		# list of cluster indices from largest to smallest

		centroid_pairs = []
		for _,c in collection:
			centroid1 = array(centroids[c])
			min_distance = [9999999999,0]
			for ci in range(0,len(centroids2)):
				if ci in [li[1] for li in centroid_pairs]:
					continue
				centroid2 = array(centroids2[ci])
				euclidian_distance = norm(centroid1-centroid2)
				if euclidian_distance < min_distance[0]:
					min_distance = [euclidian_distance, ci]
			centroid_pairs.append([c,min_distance[1]])

		print centroid_pairs
		# now we have a list of paired up cluster indices. Cool.

		# Just so we're clear, we're rebuilding the structure of song1 with segments from song2

		# prepare song2 clusters, 
		segclusters2 = [audio.AudioQuantumList()]*len(centroids2)
		for s2 in range(0,len(idx2)):
			segment2 = song2.analysis.segments[s2]
			cluster2 = idx2[s2]
			segment2.numpytimbre = array(segment2.timbre)
			segclusters2[cluster2].append(segment2)

		
		# for each segment1 in song1, find the timbrely closest segment2 in song2 belonging to the cluster2 with which segment1's cluster1 is paired. 
		for s in range(0,len(idx)):
			segment1 = song1.analysis.segments[s]
			cluster1 = idx[s]
			cluster2 = [li[1] for li in centroid_pairs if li[0]==cluster1][0]

			if(best_match>0):
				# slower, less varied version. Good for b's which are percussion loops
				
				"""
				# there's already a function for this, use that instead: timbre_distance_from
				timbre1 = array(segment1.timbre)
				min_distance = [9999999999999,0]
				for seg in segclusters2[cluster2]:
					timbre2 = seg.numpytimbre
					euclidian_distance = norm(timbre2-timbre1)
					if euclidian_distance < min_distance[0]:
						min_distance = [euclidian_distance, seg]
				bestmatchsegment2 = min_distance[1]
				# we found the segment2 in song2 that best matches segment1
				"""
				
				bestmatches = segclusters2[cluster2].ordered_by(timbre_distance_from(segment1))
				
				if(best_match > 1):
					# if best_match > 1, it randomly grabs from the top best_matches.
					maxmatches = max(best_match, len(bestmatches))
					bestmatchsegment2 = choice(bestmatches[0:maxmatches])
				else:
					# if best_match == 1, it grabs the exact best match
					bestmatchsegment2 = bestmatches[0]
			else:
				# faster, more varied version, picks a random segment from that cluster. Good for sample salads. 
				bestmatchsegment2 = choice(segclusters2[cluster2])
				
			reference_data = song1[segment1]
			segment_data = song2[bestmatchsegment2]
			
			# what to do when segments lengths aren't equal? (almost always)
			# do we add silence? or do we stretch the samples?
			add_silence = True
			
			# This is the add silence solution:
			if add_silence: 
				if reference_data.endindex > segment_data.endindex:
					# we need to add silence, because segment1 is longer
					if num_channels > 1:
						silence_shape = (reference_data.endindex,num_channels)
					else:
						silence_shape = (reference_data.endindex,)
					new_segment = audio.AudioData(shape=silence_shape,
											sampleRate=out.sampleRate,
											numChannels=segment_data.numChannels)
					new_segment.append(segment_data)
					new_segment.endindex = len(new_segment)
					segment_data = new_segment
				elif reference_data.endindex < segment_data.endindex:
					# we need to cut segment2 shorter, because segment2 is shorter
					index = slice(0, int(reference_data.endindex), 1)
					segment_data = audio.AudioData(None, segment_data.data[index], sampleRate=segment_data.sampleRate)
			else: 		  
				# TODO: stretch samples to fit.
				# haven't written this part yet.
				segment_data = segment_data

			# mix the original and the remix
			mixed_data = audio.mix(segment_data,reference_data,mix=mix)
			dnbout.append(mixed_data)
		
		dnbouts.append(dnbout)
	
	print "YEA"
	mixed_dnbouts = audio.AudioData(shape=out_shape, sampleRate=sample_rate,numChannels=num_channels)
	print len(dnbouts[0])
	print len(dnbouts[1])
	#for s in range(0,len(dnbouts[0])):
	#	print s
#		print dnbouts[0]
	#	print dnbouts[1]
	mixed_data = audio.mix(dnbouts[0], dnbouts[1], 0.5)
	mixed_dnbouts.append(mixed_data)
	print "woah"
	
	dnbrepeatout = audio.AudioData(shape=out_shape, sampleRate=sample_rate,numChannels=num_channels)
	for _ in range(4):
		dnbrepeatout.append(mixed_dnbouts)
	print "oh okay"
	return dnbrepeatout
Пример #11
0
    def run(self, mix=0.5, envelope=False):
        # This chunk creates a new array of AudioData to put the resulting resynthesis in:

        # Add two seconds to the length, just in case
        dur = len(self.input_a.data) + 100000

        # This determines the 'shape' of new array.
        # (Shape is a tuple (x, y) that indicates the length per channel of the audio file)
        # If we have a stereo shape, copy that shape
        if len(self.input_a.data.shape) > 1:
            new_shape = (dur, self.input_a.data.shape[1])
            new_channels = self.input_a.data.shape[1]
        # If not, make a mono shape
        else:
            new_shape = (dur, )
            new_channels = 1
        # This creates the new AudioData array, based on the new shape
        out = audio.AudioData(shape=new_shape,
                              sampleRate=self.input_b.sampleRate,
                              numChannels=new_channels)

        # Now that we have a properly formed array to put chunks of audio in,
        # we can start deciding what chunks to put in!

        # This loops over each segment in file A and finds the best matching segment from file B
        for a in self.segs_a:
            seg_index = a.absolute_context()[0]

            # This works out the distances
            distance_matrix = self.calculate_distances(a)
            distances = [
                numpy.sqrt(x[0] + x[1] + x[2]) for x in distance_matrix
            ]

            # This gets the best match
            match = self.segs_b[distances.index(min(distances))]
            segment_data = self.input_b[match]
            reference_data = self.input_a[a]

            # This corrects for length:  if our new segment is shorter, we add silence
            if segment_data.endindex < reference_data.endindex:
                if new_channels > 1:
                    silence_shape = (reference_data.endindex, new_channels)
                else:
                    silence_shape = (reference_data.endindex, )
                new_segment = audio.AudioData(
                    shape=silence_shape,
                    sampleRate=out.sampleRate,
                    numChannels=segment_data.numChannels)
                new_segment.append(segment_data)
                new_segment.endindex = len(new_segment)
                segment_data = new_segment

            # Or, if our new segment is too long, we make it shorter
            elif segment_data.endindex > reference_data.endindex:
                index = slice(0, int(reference_data.endindex), 1)
                segment_data = audio.AudioData(
                    None,
                    segment_data.data[index],
                    sampleRate=segment_data.sampleRate)

            # This applies the volume envelopes from each segment of A to the segment from B.
            if envelope:
                # This gets the maximum volume and starting volume for the segment from A:
                # db -> voltage ratio http://www.mogami.com/e/cad/db.html
                linear_max_volume = pow(10.0, a.loudness_max / 20.0)
                linear_start_volume = pow(10.0, a.loudness_begin / 20.0)

                # This gets the starting volume for the next segment
                if (seg_index == len(self.segs_a) - 1
                    ):  # If this is the last segment, the next volume is zero
                    linear_next_start_volume = 0
                else:
                    linear_next_start_volume = pow(
                        10.0, self.segs_a[seg_index + 1].loudness_begin / 20.0)
                    pass

                # This gets when the maximum volume occurs in A
                when_max_volume = a.time_loudness_max

                # Count # of ticks I wait doing volume ramp so I can fix up rounding errors later.
                ss = 0
                # This sets the starting volume volume of this segment.
                cur_vol = float(linear_start_volume)
                # This  ramps up to the maximum volume from start
                samps_to_max_loudness_from_here = int(segment_data.sampleRate *
                                                      when_max_volume)
                if (samps_to_max_loudness_from_here > 0):
                    how_much_volume_to_increase_per_samp = float(
                        linear_max_volume - linear_start_volume) / float(
                            samps_to_max_loudness_from_here)
                    for samps in xrange(samps_to_max_loudness_from_here):
                        try:
                            # This actally applies the volume modification
                            segment_data.data[ss] *= cur_vol
                        except IndexError:
                            pass
                        cur_vol = cur_vol + how_much_volume_to_increase_per_samp
                        ss = ss + 1
                # This ramp down to the volume for the start of the next segent
                samps_to_next_segment_from_here = int(
                    segment_data.sampleRate * (a.duration - when_max_volume))
                if (samps_to_next_segment_from_here > 0):
                    how_much_volume_to_decrease_per_samp = float(
                        linear_max_volume - linear_next_start_volume) / float(
                            samps_to_next_segment_from_here)
                    for samps in xrange(samps_to_next_segment_from_here):
                        cur_vol = cur_vol - how_much_volume_to_decrease_per_samp
                        try:
                            # This actally applies the volume modification
                            segment_data.data[ss] *= cur_vol
                        except IndexError:
                            pass
                        ss = ss + 1

            # This mixes the segment from B with the segment from A, and adds it to the output
            mixed_data = audio.mix(segment_data, reference_data, mix=mix)
            out.append(mixed_data)

        # This writes the newly created audio to the given file.  Phew!
        out.encode(self.output_filename)
Пример #12
0
def main(input_filename, output_filename, forced_key):
    
    sampling_target = "beats"   #could be bars, beats or tatums

    st = modify.Modify()
    nonwub = audio.LocalAudioFile(input_filename)
    if not forced_key:
        tonic = nonwub.analysis.key['value']
    else:
        tonic = forced_key
    tempo = nonwub.analysis.tempo['value'] 

    fade_in = nonwub.analysis.end_of_fade_in
    fade_out = nonwub.analysis.start_of_fade_out

    bars = nonwub.analysis.bars#.that(are_contained_by_range(fade_in, fade_out))
    beats = nonwub.analysis.beats#.that(are_contained_by_range(fade_in, fade_out))  
    sections = nonwub.analysis.sections

    for i, v in enumerate(sections):
        samples[i] = {}
        for pitch in range(0, 12):
            sample_list = audio.AudioQuantumList()
            if sampling_target == "tatums":
                beat_list = audio.AudioQuantumList()
                beat_list.extend([b for x in v.children() for b in x.children()])
                sample_list.extend([b for x in beat_list for b in x.children()])
            elif sampling_target == "beats":
                sample_list.extend([b for x in v.children() for b in x.children()])
            elif sampling_target == "bars":
                sample_list.extend(v.children())
            samples[i][pitch] = sample_list.that(overlap_ends_of(nonwub.analysis.segments.that(have_pitch_max(pitch)).that(overlap_starts_of(sample_list))))

    audioout = audio.AudioData(shape= (len(nonwub),2), sampleRate=44100, numChannels=2)
    out = audio.AudioQuantumList()

    """
    We could use bars here, but unfortunately, the beat detection is more reliable than the bar detection.
    Hence, we create our own bars of 4.
    """

    """
        SONG INTRO SECTION
        Plays first four bars of song like usual.
        Then loops:
            first quarter note of first bar         x 4
            first quarter note of second bar        x 4
            first eighth note of third bar          x 8
            first sixteenth note of fourth bar      x 8
            third sixteenth note of fourth bar      x 8
    """


#    for i, s in enumerate(nonwub.analysis.bars):
#        audio.getpieces(nonwub, [s]).encode("bar_%s_%s" % (i, output_filename))

    low        = audio.AudioData('samples/sub_long01.wav', sampleRate=44100, numChannels=2)
    fizzle     = audio.AudioData('samples/fizzle.wav', sampleRate=44100, numChannels=2)
    fizzle_soft= audio.AudioData('samples/fizzle-soft.wav', sampleRate=44100, numChannels=2)
    introeight = audio.AudioData('samples/intro-eight.wav', sampleRate=44100, numChannels=2)
    hats       = audio.AudioData('samples/hats.wav', sampleRate=44100, numChannels=2)
    blank      = audio.AudioData('samples/empty.wav', sampleRate=44100, numChannels=2)

    custom_bars = []

    custom_bars.append(beats[0:4])
    custom_bars.append(beats[4:8])
    custom_bars.append(beats[8:12])
    custom_bars.append(beats[12:16])    

    out.extend([x for bar in custom_bars for x in bar])
    
    out.append(custom_bars[0][0])
    out.append(custom_bars[0][0])
    out.append(custom_bars[0][0])
    out.append(custom_bars[0][0])
    
    out.append(custom_bars[1][0])
    out.append(custom_bars[1][0])
    out.append(custom_bars[1][0])
    out.append(custom_bars[1][0])

    beatone = custom_bars[2][0]
    beattwo = custom_bars[3][0]
    beatthree = custom_bars[3][2]
    
    for x in range(0, 8):
        out.append(audio.AudioQuantum(beatone.start, beatone.duration/2, None, beatone.confidence, beatone.source))
    for x in range(0, 8):
        out.append(audio.AudioQuantum(beattwo.start, beattwo.duration/4, None, beattwo.confidence, beattwo.source))
    for x in range(0, 8):
        out.append(audio.AudioQuantum(beatthree.start, beatthree.duration/4, None, beatthree.confidence, beatthree.source))
    
    nonwub_intro = mono_to_stereo(st.shiftTempo(audio.getpieces(nonwub, out), 140/tempo))
    nonwub_intro = audio.mix(nonwub_intro, low, 0.7)
    nonwub_intro = audio.mix(nonwub_intro, introeight, 0.7)

    audioout.append(nonwub_intro)
 
######
#   BEGIN WUBWUB
######
#   Each "wub" comprises of 8 bars = 32 beats
#   of which, the default song format is:
#       1 1 1 1 1 1 1 1     =   8 wubs in tonic
#       4 4 4 4             =   4 wubs in the minor third from the tonic
#       10 10 10 10         =   4 wubs in the minor 7th from the tonic
######

    for section, value in enumerate(sections):
        onebar = audio.AudioQuantumList()
        if sampling_target == "tatums":
            for twice in range(0, 2):
                for i in range(0, 16):
                    s = samples_of_key(section, tonic)
                    onebar.append( s[i % len(s)] )
                for i in range(16, 24):
                    s = samples_of_key(section, (tonic + 3) % 12)
                    onebar.append(  s[i % len(s)]  )
                for i in range(24, 32):
                    s = samples_of_key(section, (tonic + 9) % 12)
                    onebar.append(  s[i % len(s)]  )
        elif sampling_target == "beats":
            for twice in range(0, 2):
                for i in range(0, 8):
                    s = samples_of_key(section, tonic)
                    onebar.append( s[i % len(s)] )
                for i in range(8, 12):
                    s = samples_of_key(section, (tonic + 3) % 12)
                    onebar.append(  s[i % len(s)]  )
                for i in range(12, 16):
                    s = samples_of_key(section, (tonic + 9) % 12)
                    onebar.append(  s[i % len(s)]  )
        elif sampling_target == "bars":
            for i in range(0, 4):
                s = samples_of_key(section, tonic)
                onebar.append( s[i % len(s)] )
            for i in range(4, 6):
                s = samples_of_key(section, (tonic + 3) % 12)
                onebar.append(  s[i % len(s)]  )
            for i in range(6, 8):
                s = samples_of_key(section, (tonic + 9) % 12)
                onebar.append(  s[i % len(s)]  )

        orig_bar = mono_to_stereo( st.shiftTempo( audio.getpieces(nonwub, onebar), 140/tempo ) )

        loud = loudness(orig_bar)

        basemix = 0.5      # 0 = full wub, 1 = full song

        mixfactor = (-1 * basemix) + loud
        if mixfactor < 0.3:
            mixfactor = 0.3

        audioout.append( audio.mix( audio.mix( wubs[tonic], fizzle ), orig_bar , mixfactor ) )
        audioout.append( audio.mix( audio.mix( wub_breaks[tonic], hats ), orig_bar , mixfactor ) )
    
    audioout.append( fizzle_soft )
    audioout.encode( output_filename )
Пример #13
0
def main(input_filename, output_filename, break_filename, break_parts, measures, mix):

    # This takes the input tracks, sends them to the analyzer, and returns the results.
    audiofile = audio.LocalAudioFile(input_filename)
    sample_rate = audiofile.sampleRate
    breakfile = audio.LocalAudioFile(break_filename)

    # This converts the break to stereo, if it is mono
    if breakfile.numChannels == 1:
        breakfile = mono_to_stereo(breakfile)

    # This gets the number of channels in the main file
    num_channels = audiofile.numChannels

    # This splits the break into each beat
    drum_data = split_break(breakfile, break_parts)
    hits_per_beat = int(break_parts / (4 * measures))
    # This gets the bars from the input track
    bars = audiofile.analysis.bars

    # This creates the 'shape' of new array.
    # (Shape is a tuple (x, y) that indicates the length per channel of the audio file)
    out_shape = (len(audiofile) + 100000, num_channels)
    # This creates a new AudioData array to write data to
    out = audio.AudioData(shape=out_shape, sampleRate=sample_rate, numChannels=num_channels)
    if not bars:
        # If the analysis can't find any bars, stop!
        # (This might happen with really ambient music)
        print "Didn't find any bars in this analysis!"
        print "No output."
        sys.exit(-1)

    # This is where the magic happens:
    # For every beat in every bar except the last bar,
    # map the tatums of the break to the tatums of the beat
    for bar in bars[:-1]:
        # This gets the beats in the bar, and loops over them
        beats = bar.children()
        for i in range(len(beats)):
            # This gets the index of matching beat in the break
            try:
                break_index = ((bar.local_context()[0] % measures) * 4) + (i % 4)
            except ValueError:
                break_index = i % 4
            # This gets the tatums from the beat of the break
            tats = range((break_index) * hits_per_beat, (break_index + 1) * hits_per_beat)
            # This gets the number of samples in each tatum
            drum_samps = sum([len(drum_data[x]) for x in tats])

            # This gets the number of sample and the shape of the beat from the original track
            beat_samps = len(audiofile[beats[i]])
            beat_shape = (beat_samps, num_channels)

            # This get the shape of each tatum
            tat_shape = (float(beat_samps / hits_per_beat), num_channels)

            # This creates the new AudioData that will be filled with chunks of the drum break
            beat_data = audio.AudioData(shape=beat_shape, sampleRate=sample_rate, numChannels=num_channels)
            for j in tats:
                # This creates an audioData for each tatum
                tat_data = audio.AudioData(shape=tat_shape, sampleRate=sample_rate, numChannels=num_channels)
                # This corrects for length / timing:
                # If the original is shorter than the break, truncate drum hits to fit beat length
                if drum_samps > beat_samps / hits_per_beat:
                    tat_data.data = drum_data[j].data[: len(tat_data)]
                # If the original is longer, space out drum hits to fit beat length
                elif drum_samps < beat_samps / hits_per_beat:
                    tat_data.append(drum_data[j])

                # This adds each new tatum to the new beat.
                tat_data.endindex = len(tat_data)
                beat_data.append(tat_data)
                del (tat_data)

            # This corrects for rounding errors
            beat_data.endindex = len(beat_data)

            # This mixes the new beat data with the input data, and appends it to the final file
            mixed_beat = audio.mix(beat_data, audiofile[beats[i]], mix=mix)
            del (beat_data)
            out.append(mixed_beat)

    # This works out the last beat and appends it to the final file
    finale = bars[-1].start + bars[-1].duration
    last = audio.AudioQuantum(
        audiofile.analysis.bars[-1].start, audiofile.analysis.duration - audiofile.analysis.bars[-1].start
    )
    last_data = audio.getpieces(audiofile, [last])
    out.append(last_data)

    # This writes the newly created audio to the given file.
    out.encode(output_filename)