Example #1
0
 def render(self, chunk_size=None):
     #   For now, only support stereo tracks
     assert self.t1.track.data.ndim == 2
     assert self.t2.track.data.ndim == 2
     if chunk_size is None:
         yield crossfade(self.t1.data, self.t2.data, self.mode)
     else:
         start = int(self.s1 * 44100)
         end = int((self.s1 + self.duration) * 44100)
         for i in xrange(start, end, chunk_size):
             e = min(end, i + chunk_size)
             yield (crossfade(self.t1.track[i:e].data,
                              self.t2.track[i:e].data, self.mode,
                              self.samples, i - start).astype(numpy.int16))
Example #2
0
 def render(self, chunk_size=None):
     #   For now, only support stereo tracks
     assert self.t1.track.data.ndim == 2
     assert self.t2.track.data.ndim == 2
     if chunk_size is None:
         yield crossfade(self.t1.data, self.t2.data, self.mode)
     else:
         start = int(self.s1 * 44100)
         end = int((self.s1 + self.duration) * 44100)
         for i in xrange(start, end, chunk_size):
             e = min(end, i + chunk_size)
             yield (crossfade(self.t1.track[i:e].data,
                              self.t2.track[i:e].data, self.mode,
                              self.samples, i - start).astype(numpy.int16))
Example #3
0
 def render(self):
     t1, t2 = map(make_stereo, (self.t1.get(), self.t2.get()))
     vecout = crossfade(t1.data, t2.data, self.mode)
     audio_out = AudioData(ndarray=vecout, shape=vecout.shape, 
                             sampleRate=t1.sampleRate, 
                             numChannels=vecout.shape[1])
     return audio_out
Example #4
0
 def render(self):
     t1, t2 = map(make_stereo, (self.t1.get(), self.t2.get()))
     vecout = crossfade(list(t1.data), list(t2.data), self.mode)
     audio_out = AudioData(ndarray=vecout, shape=vecout.shape, 
                             sampleRate=t1.sampleRate, 
                             numChannels=vecout.shape[1])
     return audio_out
Example #5
0
	def render(self, chunk_size=None):
		stretch1, stretch2 = self.__limited(self.t1, self.l1, chunk_size),\
							 self.__limited(self.t2, self.l2, chunk_size)
		total = 0
		for i, (a, b) in enumerate(izip(stretch1, stretch2)):
			o = min(len(a), len(b))
			total += o
			yield crossfade(a[:o], b[:o], '', self.samples, i * chunk_size)\
					.astype(numpy.int16)

		leftover = self.samples - total
		if leftover > 0:
			log.warning("Leftover samples (%d) when crossmatching.", leftover)
Example #6
0
    def render(self, chunk_size=None):
        stretch1, stretch2 = self.__limited(self.t1, self.l1, chunk_size),\
                             self.__limited(self.t2, self.l2, chunk_size)
        total = 0
        for i, (a, b) in enumerate(izip(stretch1, stretch2)):
            o = min(len(a), len(b))
            total += o
            yield crossfade(a[:o], b[:o], '', self.samples, i * chunk_size)\
                    .astype(numpy.int16)

        leftover = self.samples - total
        if leftover > 0:
            log.warning("Leftover samples (%d) when crossmatching.", leftover)
Example #7
0
	def render(self, chunk_size=5512):
		#   For now, only support stereo tracks
		# CJA 20150213: The "chunk_size is None" branch was looking broken, so I
		# removed it (cf 030ff0 and 6de38a), but it is used (in one place - see
		# top-level render() above). Now defaulting to 5512 which seems to be a
		# viable chunk size; it may be necessary to reinstate, and then bug-fix,
		# the original notion of "unchunked return".
		assert self.t1.data.ndim == 2
		assert self.t2.data.ndim == 2
		s1 = int(self.s1 * 44100)
		s2 = int(self.s2 * 44100)
		end = int(self.duration * 44100)
		for i in xrange(0, end, chunk_size):
			e = min(end, i + chunk_size)
			# Note that this may bomb if there isn't enough of t2 to do the xfade.
			# Since xfade is set administratively, this is simply a matter of "be
			# smart". If you break stuff, it's your problem.
			yield (crossfade(self.t1[s1+i:s1+e].data,
							 self.t2[s2+i:s2+e].data, self.mode,
							 self.samples, i).astype(numpy.int16))
Example #8
0
 def render(self, chunk_size=5512):
     #   For now, only support stereo tracks
     # CJA 20150213: The "chunk_size is None" branch was looking broken, so I
     # removed it (cf 030ff0 and 6de38a), but it is used (in one place - see
     # top-level render() above). Now defaulting to 5512 which seems to be a
     # viable chunk size; it may be necessary to reinstate, and then bug-fix,
     # the original notion of "unchunked return".
     assert self.t1.data.ndim == 2
     assert self.t2.data.ndim == 2
     s1 = int(self.s1 * 44100)
     s2 = int(self.s2 * 44100)
     end = int(self.duration * 44100)
     for i in range(0, end, chunk_size):
         e = min(end, i + chunk_size)
         # Note that this may bomb if there isn't enough of t2 to do the xfade.
         # Since xfade is set administratively, this is simply a matter of "be
         # smart". If you break stuff, it's your problem.
         if False:  # hacky hack
             yield (crossfade(self.t1[s1 + i:s1 + e].data,
                              self.t2[s2 + i:s2 + e].data, self.mode,
                              self.samples, i).astype(numpy.int16))
Example #9
0
 def render(self, chunk_size=None):
     stretch1, stretch2 = self.__limited(self.t1, self.l1, chunk_size),\
                          self.__limited(self.t2, self.l2, chunk_size)
     for i, (a, b) in enumerate(izip(stretch1, stretch2)):
         yield crossfade(a, b, '', self.samples, i * chunk_size)\
                 .astype(numpy.int16)
Example #10
0
    def update(self, *args):
        from random import random, choice
        print "play", self.curr_md5, "beat", self.curr_beat
        cursor_ = args[0]
        branch_cursor_ = args[1]
        last_branch_cursor_ = args[2]
        self.curr_player.play(self.curr_laf.analysis.beats[self.curr_beat])
        self.curr_beat = (self.curr_beat + 1) % len(
            self.curr_laf.analysis.beats)
        # get candidates
        candidates = self.all_edges[self.curr_md5].get(self.curr_beat, [])
        candidates = [
            candidates[i] for i in range(len(candidates))
            if candidates[i][0] < self.threshold
        ]
        # restrict to local branches if we just changed songs and are resetting the data structures
        if self.thread is not None:
            if self.thread.ejecting():
                candidates = [
                    candidates[i] for i in range(len(candidates))
                    if candidates[i][1] == self.curr_md5
                ]
        branched = False
        if len(candidates) > 0:
            print len(
                candidates
            ), "branch candidates, prob =", self.curr_branch_probability
            # print candidates
            # flip a coin
            if random() < self.curr_branch_probability:
                #Beat is rendered so the audio data can be obtained for the crossfade function
                first_rendered_beat = self.curr_laf.analysis.beats[
                    self.curr_beat].render()
                print "Branch!!!"
                branch = choice(candidates)
                changed_song = branch[1] != self.curr_md5
                self.last_branch[0] = [
                    self.curr_beat + self.start_beats[self.curr_md5]
                ]
                self.curr_md5 = branch[1]
                self.curr_beat = branch[2]
                self.curr_player = self.aq_players[self.curr_md5]
                self.curr_laf = self.local_audio[self.curr_md5]
                self.curr_branch_probability = self.min_branch_probability
                self.last_branch[1] = [
                    self.curr_beat + self.start_beats[self.curr_md5]
                ]
                branched = True

                #Next beat is also rendered
                second_rendered_beat = self.curr_laf.analysis.beats[
                    self.curr_beat].render()

                #Which beat is shorter
                min_len = min(first_rendered_beat.data.shape[0],
                              second_rendered_beat.data.shape[0])
                first = first_rendered_beat.data[0:min_len, :]
                second = second_rendered_beat.data[0:min_len, :]

                #Crossfade between two beats of the same length
                third = crossfade(first, second, 'linear')

                #If the first beat is longer...
                if first_rendered_beat.data.shape[
                        0] > second_rendered_beat.data.shape[0]:
                    audio_out = AudioData(
                        ndarray=third,
                        shape=third.shape,
                        sampleRate=first_rendered_beat.sampleRate,
                        numChannels=third.shape[1])
                #If the second beat is longer...
                else:
                    #The crossfade replaces the first part of the second beat
                    second_rendered_beat.data[0:min_len, :] = third
                    audio_out = AudioData(
                        ndarray=second_rendered_beat.data,
                        shape=second_rendered_beat.data.shape,
                        sampleRate=first_rendered_beat.sampleRate,
                        numChannels=second_rendered_beat.data.shape[1])

                self.curr_player.play_audio_data(audio_out)
                self.curr_beat = (self.curr_beat + 1) % len(
                    self.curr_laf.analysis.beats)

                if changed_song:
                    print "********** Changed song **********"
                    # signal that the data loading thread should reset
                    self.last_branch = [self.curr_beat, self.curr_beat]
                    if self.thread is not None:
                        self.thread.eject(self.curr_md5)

            else:
                self.curr_branch_probability = min(
                    self.max_branch_probability, self.curr_branch_probability +
                    self.step_branch_probability)
        #self.curr_player.play(self.curr_laf.analysis.beats[self.curr_beat])
        #self.curr_beat = (self.curr_beat + 1) % len(self.curr_laf.analysis.beats)
        # update cursor
        t0 = self.curr_beat + self.start_beats[self.curr_md5]
        cursor_.set_xdata(t0)
        cursor_.set_ydata(t0)

        if len(candidates) > 0:
            from numpy import vstack, repeat, array
            t0 = repeat(t0, len(candidates), 0)
            t1 = array([self.start_beats[c[1]] for c in candidates]) + array(
                [c[2] for c in candidates])
            branch_x = vstack((t0, t0, t1, t1, t0)).T.reshape((-1, 1))
            branch_y = vstack((t0, t1, t1, t0, t0)).T.reshape((-1, 1))
            branch_cursor_.set_xdata(branch_x)
            branch_cursor_.set_ydata(branch_y)
            self.ghost = 1
        elif self.ghost >= 4:
            branch_cursor_.set_xdata([])
            branch_cursor_.set_ydata([])
        else:
            self.ghost += 1

        if branched:
            if self.last_branch[0] < self.last_branch[1]:
                last_branch_cursor_.set_color('green')
            else:
                last_branch_cursor_.set_color('red')
            last_branch_x = [self.last_branch[i] for i in [0, 1, 1]]
            last_branch_y = [self.last_branch[i] for i in [0, 0, 1]]
            last_branch_cursor_.set_xdata(last_branch_x)
            last_branch_cursor_.set_ydata(last_branch_y)

        args[0].figure.canvas.draw()
Example #11
0
 def render(self, chunk_size=None):
     stretch1, stretch2 = self.__limited(self.t1, self.l1, chunk_size),\
                          self.__limited(self.t2, self.l2, chunk_size)
     for i, (a, b) in enumerate(izip(stretch1, stretch2)):
         yield crossfade(a, b, '', self.samples, i * chunk_size)\
                 .astype(numpy.int16)