def get_rhythm_phrase(self, file_name, genre): mid = MidiFile(file_name) midi_temp = "temp.mid" wav_temp = "temp.wav" print("loading sound font from %s" % file_name.split(genre)[0] + genre + '/' + genre + '.sf2') fs = FluidSynth(file_name.split(genre)[0] + genre + '/' + genre + '.sf2', sample_rate=self.sample_rate) midi_original_tempo = 1000000 * 60 / (mid.tracks[0][5].tempo) target_tempo = self.tempo for note in mid.tracks[0]: note.time = int(note.time * midi_original_tempo / target_tempo) mid.save(midi_temp) fs.midi_to_audio(midi_temp, wav_temp) # os.system("fluidsynth -ni %s %s -F %s -r %s" %(file_name.split(genre)[0] + genre + '/' + genre + '.sf2',midi_temp,wav_temp,self.sample_rate)) sr, loop_data = wavfile.read(wav_temp) # loop_data = np.memmap(wav_temp, np.float32, offset=40) print(loop_data.shape) loop_data = (loop_data[:len(np.array(self.layers[0].data).flatten())] )[::2].reshape(-1, self.frames_per_buffer) print(loop_data.shape) rhythm_data = list(loop_data) rhythm_layer = Queue() rhythm_layer.data = rhythm_data return rhythm_layer
def close_recording_for_loop_over(self): print("post-padding the recording with zeros") max_size = max([len(layer.data) for layer in self.layers] + [len(self.phrase.data)]) # postpend zeros to bring all layers to same length postpend_length = len(self.phrase.data) - len(self.overdub.data) self.overdub.post_padding( postpend_length * [np.zeros(self.number_of_channels * self.frames_per_buffer)]) self.layers.append(self.overdub.slice(max_size)) self.overdub = Queue() print("Closed recording")
def breadth_first_search(self, board): moves = ['Up', 'Down', 'Left', 'Right'] frontier = Queue() explored = set() frontier.add(State(board)) nodes_expanded = 0 while not frontier.is_empty(): curr = frontier.remove() explored.add(curr) if curr.board == self.goalBoard: return curr, nodes_expanded nodes_expanded += 1 for move in moves: result = curr.board.move(move) if result is not None: result_state = State(result, move, curr, curr.depth + 1) if result_state not in explored and result_state not in frontier: State.max_depth = max(State.max_depth, result_state.depth) frontier.add(result_state)
def has_backup_path(self): for node in self.nodes: self.nodes[node].dist = float("inf") self.nodes[node].pre = None self.nodes[node].pre_door = None self.source.dist = 0 que = Queue() que.enqueue(self.source) while que.size(): first = que.dequeue() if first == self.exit_node: if self.max_exit_dist < first.dist: self.max_exit_dist = first.dist return True for key in first.connections: neighbour = self.nodes[key] vertex = first.room + self.delimiter + key if self.vertices[vertex].capacity - self.vertices[vertex].flow: if first.dist + 1 < neighbour.dist: neighbour.dist = first.dist + 1 neighbour.pre = first neighbour.pre_vertex = self.vertices[vertex] que.enqueue(neighbour) return False
def __init__(self, channels, buffer_rate, sample_rate, phrase_id): self.is_overdubbing = False self.phrase_id = phrase_id self.phrase = Queue() self.overdub = Queue() self.layers = [] self.number_of_channels = channels self.frames_per_buffer = buffer_rate self.sample_rate = sample_rate self.phrase_state_index = -1 self.rhythm_appended = False self.tempo = 120 self.beats = [] self.offset = 0
def bft(self, starting_point): q = Queue() visited = [] q.enqueue(starting_point) while q.len() is not 0: n = q.dequeue() if n not in visited: visited.append(n) # print(visited) for i in self.vertices[f"{n}"]: q.enqueue(f"{i}") return visited
def bfs_walk(self): que = Queue() que.enqueue(next(iter(self.nodes.values()))) while que.size() > 0: first = que.dequeue() if first.mark: continue else: print(first.name) first.mark = True for neighbour in first.connections: que.enqueue(self.nodes[neighbour])
def print_level_order(self): q = Queue() tmp = OrderedDict() for index in range(self.get_height(), -2, -1): tmp[index] = [] q.enqueue(self.node) level = 0 while q.size() > 0: actual = q.dequeue() if actual: if level != actual.level: level = actual.level tmp[level].append(str(actual.key)) if actual.left: q.enqueue(actual.left.node) if actual.right: q.enqueue(actual.right.node) parent = actual else: if parent: if parent.left.node or parent.right.node: tmp[level - 1].append("_") if not parent.left.node and not parent.right.node: tmp[level - 1].append("_") parent = actual else: tmp[level - 1].append("_") result = self.format_avl(tmp) print(result)
def path_exists(g, start, target): start = g[start] queue = Queue() for child in start: queue.enqueue(child) for child in queue: if child.seen: return if child == target: return True else: child.seen = True for child_of_child in child: queue.enqueue(child_of_child) else: return False
return explored return inner def one(*args): return 1 def zero(*args): return 0 def cost(grid, _, node): """Determine cost of traversing given node""" i, j = node return COSTS[grid[i][j]] def manhattan(_, goal, node): """Manhattan distance from node to goal, for heuristic""" i, j = node gi, gj = goal return abs(gi - i) + abs(gj - j) bfs = _search(Queue(), one, zero) dfs = _search(Stack(), one, zero) ucs = _search(PriorityQueue(), cost, zero) a_star = _search(PriorityQueue(), cost, manhattan)
# print(lst) # print(lst.nodes) # stack = Stack() # stack.push(4) # stack.push(5) # stack.push(6) # print(stack.pop()) # stack.push(7) # print(stack) # print(stack.count) print('Stack:') stack = Stack([1, 2, 3]) print(stack) print(stack.peek()) stack.pop() print(stack) print(stack.peek()) print('Queue:') queue = Queue([1, 2, 3]) print(queue) print(queue.peek()) queue.dequeue() print(queue) print(queue.peek())
class Phrase(object): def __init__(self, channels, buffer_rate, sample_rate, phrase_id): self.is_overdubbing = False self.phrase_id = phrase_id self.phrase = Queue() self.overdub = Queue() self.layers = [] self.number_of_channels = channels self.frames_per_buffer = buffer_rate self.sample_rate = sample_rate self.phrase_state_index = -1 self.rhythm_appended = False self.tempo = 120 self.beats = [] self.offset = 0 def phrase_states(self): n = len(self.layers) return [list(range(n))] + [[el] for el in range(n)] def set_tempo(self): flattened_array_y = np.array(self.layers[0].data).flatten() self.tempo, self.beats = bpm_detector(flattened_array_y, self.sample_rate) # self.tempo, self.beats = librosa.beat.beat_track(y=flattened_array_y, sr=self.sample_rate, units="frames") def arm_overdubbing_track(self): print("Pre-padding records with zeros") self.overdub.pre_padding( self.phrase.head * [np.zeros(self.number_of_channels * self.frames_per_buffer)]) self.is_overdubbing = True def clear_phrase(self): n = len(self.layers) phrase_state = self.phrase_states()[self.phrase_state_index] print("found %d layers clearing %s from it" % (n, phrase_state)) self.layers = [ self.layers[i] for i in range(n) if i not in phrase_state ] self.phrase_state_index = 0 if self.layers: active_layers = [layer.data for layer in self.layers] self.phrase.data = list(np.array(active_layers).sum(axis=0)) else: self.__init__(self.number_of_channels, self.frames_per_buffer, self.sample_rate, self.phrase_id) return len(self.layers) def select_phrase(self): self.phrase_state_index = (self.phrase_state_index + 1) % (len(self.layers) + 1) phrase_state = self.phrase_states()[self.phrase_state_index] print("Current Selected Layer %s" % phrase_state) active_layers = [self.layers[i].data for i in phrase_state] self.phrase.data = list(np.array(active_layers).sum(axis=0)) def close_recording_for_loop_over(self): print("post-padding the recording with zeros") max_size = max([len(layer.data) for layer in self.layers] + [len(self.phrase.data)]) # postpend zeros to bring all layers to same length postpend_length = len(self.phrase.data) - len(self.overdub.data) self.overdub.post_padding( postpend_length * [np.zeros(self.number_of_channels * self.frames_per_buffer)]) self.layers.append(self.overdub.slice(max_size)) self.overdub = Queue() print("Closed recording") def extend_recording(self): self.phrase.extend_loop() print("extended phrase looping") for layer in self.layers: print("extended layer looping") layer.extend_loop() def set_overdubbing_mode(self): # set to overdubbing if phrase is not empty else set to first recording self.is_overdubbing = ~self.phrase.empty() def get_rhythm_phrase(self, file_name, genre): mid = MidiFile(file_name) midi_temp = "temp.mid" wav_temp = "temp.wav" print("loading sound font from %s" % file_name.split(genre)[0] + genre + '/' + genre + '.sf2') fs = FluidSynth(file_name.split(genre)[0] + genre + '/' + genre + '.sf2', sample_rate=self.sample_rate) midi_original_tempo = 1000000 * 60 / (mid.tracks[0][5].tempo) target_tempo = self.tempo for note in mid.tracks[0]: note.time = int(note.time * midi_original_tempo / target_tempo) mid.save(midi_temp) fs.midi_to_audio(midi_temp, wav_temp) # os.system("fluidsynth -ni %s %s -F %s -r %s" %(file_name.split(genre)[0] + genre + '/' + genre + '.sf2',midi_temp,wav_temp,self.sample_rate)) sr, loop_data = wavfile.read(wav_temp) # loop_data = np.memmap(wav_temp, np.float32, offset=40) print(loop_data.shape) loop_data = (loop_data[:len(np.array(self.layers[0].data).flatten())] )[::2].reshape(-1, self.frames_per_buffer) print(loop_data.shape) rhythm_data = list(loop_data) rhythm_layer = Queue() rhythm_layer.data = rhythm_data return rhythm_layer
def test_queue(): q = Queue([48, 3]) q.enqueue(10) q.enqueue(15) q.enqueue(20) q.enqueue(25) print(q.indexof(25)) print(q) q.clear() # q.pop() print(q._head, q._tail) print(q) q.enqueue(25) q.dequeue() print(q)