async def test_control_point_set_time(self): track = await self._add_track() with self.project.apply_mutations('test'): point = track.create_control_point(audioproc.MusicalTime(1, 4), 0.7) with self.project.apply_mutations('test'): point.time = audioproc.MusicalTime(3, 4)
async def test_create_sample(self): track = await self._add_track() loaded_sample = await track.load_sample( os.path.join(unittest.TESTDATA_DIR, 'future-thunder1.wav'), self.loop) with self.project.apply_mutations('test'): track.create_sample(audioproc.MusicalTime(1, 4), loaded_sample) self.assertEqual(track.samples[0].time, audioproc.MusicalTime(1, 4))
async def test_add_control_point(self): track = await self._add_track() with self.project.apply_mutations('test'): point = track.create_control_point(audioproc.MusicalTime(1, 4), 0.7) self.assertEqual(len(track.points), 1) self.assertIs(track.points[0], point) self.assertEqual(point.time, audioproc.MusicalTime(1, 4)) self.assertAlmostEqual(point.value, 0.7)
def __nodeMessage(self, msg: Dict[str, Any]) -> None: current_position_urid = ( 'http://noisicaa.odahoda.de/lv2/processor_midi_looper#current_position' ) if current_position_urid in msg: numerator, denominator = msg[current_position_urid] current_position = audioproc.MusicalTime(numerator, denominator) self.__pianoroll.setPlaybackPosition(current_position) record_state_urid = 'http://noisicaa.odahoda.de/lv2/processor_midi_looper#record_state' if record_state_urid in msg: record_state = RecordState(msg[record_state_urid]) self.__record.setRecordState(record_state) if record_state == RecordState.RECORDING: self.__recorded_events.clear() self.__pianoroll.clearEvents() self.__pianoroll.setUnfinishedNoteMode( pianoroll.UnfinishedNoteMode.ToPlaybackPosition) else: if record_state == RecordState.OFF: del self.__listeners['events'] with self.project.apply_mutations('%s: Record patch' % self.__node.name): patch = self.__node.patches[0] patch.set_events(self.__recorded_events) self.__recorded_events.clear() self.__pianoroll.clearEvents() self.__event_map.clear() for event in self.__node.patches[0].events: self.__event_map.append( self.__pianoroll.addEvent(event)) self.__listeners['events'] = self.__node.patches[ 0].events_changed.add(self.__eventsChanged) self.__pianoroll.setUnfinishedNoteMode( pianoroll.UnfinishedNoteMode.ToEnd) recorded_event_urid = 'http://noisicaa.odahoda.de/lv2/processor_midi_looper#recorded_event' if recorded_event_urid in msg: time_numerator, time_denominator, midi, recorded = msg[ recorded_event_urid] time = audioproc.MusicalTime(time_numerator, time_denominator) if recorded: event = value_types.MidiEvent(time, midi) self.__recorded_events.append(event) self.__pianoroll.addEvent(event) if midi[0] & 0xf0 == 0x90: self.__pianoroll.noteOn(midi[1]) elif midi[0] & 0xf0 == 0x80: self.__pianoroll.noteOff(midi[1])
def __playbackPositionChanged(self, time: audioproc.MusicalTime) -> None: if self.__measure_editor_at_playback_pos is not None: self.__measure_editor_at_playback_pos.clearPlaybackPos() self.__measure_editor_at_playback_pos = None measure_time = audioproc.MusicalTime() for measure_editor in self.measure_editors(): if measure_time <= time < measure_time + measure_editor.duration: measure_editor.setPlaybackPos(audioproc.MusicalTime() + (time - measure_time)) self.__measure_editor_at_playback_pos = measure_editor break measure_time += measure_editor.duration
async def test_delete_control_point(self): track = await self._add_track() with self.project.apply_mutations('test'): point = track.create_control_point(audioproc.MusicalTime(1, 4), 0.7) with self.project.apply_mutations('test'): track.delete_control_point(point) self.assertEqual(len(track.points), 0)
def test_messages_on_mutations(self): connector = self.track.create_node_connector( message_cb=self.message_cb, audioproc_client=None) try: self.assertEqual(connector.init(), []) self.messages.clear() self.track.points.insert( 0, self.pool.create( model.ControlPoint, time=audioproc.MusicalTime(1, 4), value=0.5)) self.assertEqual( self.messages, ['cvgenerator_add_control_point']) self.messages.clear() self.track.points.insert( 1, self.pool.create( model.ControlPoint, time=audioproc.MusicalTime(2, 4), value=0.8)) self.assertEqual( self.messages, ['cvgenerator_add_control_point']) self.messages.clear() del self.track.points[0] self.assertEqual( self.messages, ['cvgenerator_remove_control_point']) self.messages.clear() self.track.points[0].value = 0.2 self.assertEqual( self.messages, ['cvgenerator_remove_control_point', 'cvgenerator_add_control_point']) self.messages.clear() self.track.points[0].time = audioproc.MusicalTime(3, 4) self.assertEqual( self.messages, ['cvgenerator_remove_control_point', 'cvgenerator_add_control_point']) finally: connector.cleanup()
def __nodeMessage(self, msg: Dict[str, Any]) -> None: midi_event_urid = 'http://noisicaa.odahoda.de/lv2/processor_midi_monitor#midi_event' if not self.__pause_action.isChecked() and midi_event_urid in msg: time_numerator, time_denominator, midi = msg[midi_event_urid] time = audioproc.MusicalTime(time_numerator, time_denominator) row = self.__events.rowCount() self.__events.insertRow(row) columns = ['-', '-', '-', '-', '-'] if time.numerator >= 0: columns[0] = '%.3f' % float(time) status = midi[0] & 0xf0 if 0x80 <= status <= 0xe0: status_name = { 0x80: "Note Off", 0x90: "Note On", 0xa0: "Aftertouch", 0xb0: "Control Change", 0xc0: "Program Change", 0xd0: "Channel Pressure", 0xe0: "Pitch Bend", }[status] columns[1] = status_name columns[2] = '%d' % ((midi[0] & 0x0f) + 1) if status in (0x80, 0x90, 0xa0): columns[3] = '%s (%d)' % ( value_types.MIDI_TO_NOTE[midi[1]], midi[1]) columns[4] = '%d' % midi[2] elif status == 0xe0: value = ((midi[2] << 7) | midi[1]) - 0x2000 columns[3] = '%d' % value elif status == 0xc0: columns[3] = '%d' % midi[1] else: columns[3] = '%d' % midi[1] columns[4] = '%d' % midi[2] else: columns[1] = '0x%02x' % midi[0] for col, text in enumerate(columns): item = QtWidgets.QTableWidgetItem(text) if col in (0, 3, 4): item.setData(Qt.TextAlignmentRole, Qt.AlignRight | Qt.AlignVCenter) self.__events.setItem(row, col, item) while self.__events.rowCount() > 1000: self.__events.removeRow(0) self.__events.scrollToBottom()
def __onMoveTo(self, where: MoveTo) -> None: if self.__player_id is None: logger.warning("Player action without active player.") return if where == MoveTo.Start: self.setCurrentTime(audioproc.MusicalTime()) elif where == MoveTo.End: self.setCurrentTime(self.time_mapper.end_time) elif where == MoveTo.PrevBeat: beat = int( (self.__current_time + audioproc.MusicalDuration(3, 16)) / audioproc.MusicalTime(1, 4)) new_time = audioproc.MusicalTime(beat - 1, 4) if new_time < audioproc.MusicalTime(0, 1): new_time = audioproc.MusicalTime(0, 1) self.setCurrentTime(new_time) elif where == MoveTo.NextBeat: beat = int( (self.__current_time + audioproc.MusicalDuration(3, 16)) / audioproc.MusicalTime(1, 4)) new_time = audioproc.MusicalTime(beat + 1, 4) if new_time > self.time_mapper.end_time: new_time = self.time_mapper.end_time self.setCurrentTime(new_time) else: raise ValueError(where)
def _init_internal(self) -> None: time = audioproc.MusicalTime() for mref in self._node.measure_list: self.__add_measure(time, mref) time += mref.measure.duration self._listeners['measure_list'] = self._node.measure_list_changed.add( self.__measure_list_changed) self._add_track_listeners()
def snapTime(self, time: audioproc.MusicalTime) -> audioproc.MusicalTime: grid_time = ( audioproc.MusicalTime(0, 1) + self.gridStep() * int(round(float(time / self.gridStep())))) time_x = int(time * self.scaleX()) grid_x = int(grid_time * self.scaleX()) if abs(time_x - grid_x) <= 10: return grid_time return time
def __init__(self, **kwargs: Any) -> None: super().__init__(**kwargs) self.__session_prefix = 'player_state:%s:' % self.project.id self.__last_current_time_update = None # type: float self.__time_mode = TimeMode.Follow self.__playing = False self.__current_time = self.__get_session_value('current_time', audioproc.MusicalTime()) self.__loop_start_time = self.__get_session_value('loop_start_time', None) self.__loop_end_time = self.__get_session_value('loop_end_time', None) self.__loop_enabled = self.__get_session_value('loop_enabled', False) self.__player_id = None # type: str self.__move_to_start_action = QtWidgets.QAction("Move to start", self) self.__move_to_start_action.setIcon(QtGui.QIcon( os.path.join(constants.DATA_DIR, 'icons', 'media-skip-backward.svg'))) self.__move_to_start_action.setShortcut(QtGui.QKeySequence('Home')) self.__move_to_start_action.setShortcutContext(Qt.ApplicationShortcut) self.__move_to_start_action.triggered.connect(lambda: self.__onMoveTo(MoveTo.Start)) self.__move_to_end_action = QtWidgets.QAction("Move to end", self) self.__move_to_end_action.setIcon(QtGui.QIcon( os.path.join(constants.DATA_DIR, 'icons', 'media-skip-forward.svg'))) self.__move_to_end_action.setShortcut(QtGui.QKeySequence('End')) self.__move_to_end_action.setShortcutContext(Qt.ApplicationShortcut) self.__move_to_end_action.triggered.connect(lambda: self.__onMoveTo(MoveTo.End)) self.__move_to_prev_action = QtWidgets.QAction("Move to previous beat", self) self.__move_to_prev_action.setIcon(QtGui.QIcon( os.path.join(constants.DATA_DIR, 'icons', 'media-seek-backward.svg'))) self.__move_to_prev_action.setShortcut(QtGui.QKeySequence('PgUp')) self.__move_to_prev_action.setShortcutContext(Qt.ApplicationShortcut) self.__move_to_prev_action.triggered.connect(lambda: self.__onMoveTo(MoveTo.PrevBeat)) self.__move_to_next_action = QtWidgets.QAction("Move to next beat", self) self.__move_to_next_action.setIcon(QtGui.QIcon( os.path.join(constants.DATA_DIR, 'icons', 'media-seek-forward.svg'))) self.__move_to_next_action.setShortcut(QtGui.QKeySequence('PgDown')) self.__move_to_next_action.setShortcutContext(Qt.ApplicationShortcut) self.__move_to_next_action.triggered.connect(lambda: self.__onMoveTo(MoveTo.NextBeat)) self.__toggle_action = QtWidgets.QAction("Play", self) self.__toggle_action.setIcon(QtGui.QIcon( os.path.join(constants.DATA_DIR, 'icons', 'media-playback-start.svg'))) self.__toggle_action.setShortcut(QtGui.QKeySequence('Space')) self.__toggle_action.setShortcutContext(Qt.ApplicationShortcut) self.__toggle_action.triggered.connect(self.__onToggle) self.__loop_action = QtWidgets.QAction("Loop playback", self) self.__loop_action.setIcon(QtGui.QIcon( os.path.join(constants.DATA_DIR, 'icons', 'media-playlist-repeat.svg'))) self.__loop_action.setCheckable(True) self.__loop_action.toggled.connect(self.__onLoop)
def _update_measure_range(self, begin: int, end: int) -> None: time = audioproc.MusicalTime() for mref in self._node.measure_list: if mref.index >= end: break if mref.index >= begin: self._update_measure(time, mref) time += mref.measure.duration
def test_messages_on_mutations(self): connector = self.track.create_node_connector( message_cb=self.message_cb, audioproc_client=None) try: self.assertEqual(connector.init(), []) self.messages.clear() self.track.samples.insert( 0, self.pool.create(model.SampleRef, time=audioproc.MusicalTime(1, 4), sample=self.sample1)) self.assertEqual(self.messages, ['sample_script_add_sample']) self.messages.clear() self.track.samples.insert( 1, self.pool.create(model.SampleRef, time=audioproc.MusicalTime(2, 4), sample=self.sample2)) self.assertEqual(self.messages, ['sample_script_add_sample']) self.messages.clear() del self.track.samples[0] self.assertEqual(self.messages, ['sample_script_remove_sample']) self.messages.clear() self.track.samples[0].time = audioproc.MusicalTime(3, 4) self.assertEqual( self.messages, ['sample_script_remove_sample', 'sample_script_add_sample']) self.messages.clear() self.track.samples[0].sample = self.sample1 self.assertEqual( self.messages, ['sample_script_remove_sample', 'sample_script_add_sample']) finally: connector.cleanup()
def test_messages_on_init(self): self.track.points.insert( 0, self.pool.create( model.ControlPoint, time=audioproc.MusicalTime(1, 4), value=0.5)) self.track.points.insert( 1, self.pool.create( model.ControlPoint, time=audioproc.MusicalTime(2, 4), value=0.8)) connector = self.track.create_node_connector( message_cb=self.message_cb, audioproc_client=None) try: messages = connector.init() self.assertEqual( [self.WhichOneof(msg) for msg in messages], ['cvgenerator_add_control_point', 'cvgenerator_add_control_point']) finally: connector.cleanup()
def _set_time(self, value: audioproc.MusicalTime) -> None: if self.parent is not None: if not self.is_first: if value <= cast(ControlPoint, self.prev_sibling).time: raise ValueError("Control point out of order.") else: if value < audioproc.MusicalTime(0, 4): raise ValueError("Control point out of order.") if not self.is_last: if value >= cast(ControlPoint, self.next_sibling).time: raise ValueError("Control point out of order.") super()._set_time(value)
def test_messages_on_init(self): self.track.samples.insert( 0, self.pool.create(model.SampleRef, time=audioproc.MusicalTime(1, 4), sample=self.sample1)) self.track.samples.insert( 1, self.pool.create(model.SampleRef, time=audioproc.MusicalTime(2, 4), sample=self.sample2)) connector = self.track.create_node_connector( message_cb=self.message_cb, audioproc_client=None) try: messages = connector.init() self.assertEqual( [self.WhichOneof(msg) for msg in messages], ['sample_script_add_sample', 'sample_script_add_sample']) finally: connector.cleanup()
def __init__( self, parent: QtWidgets.QWidget, time_mapper: audioproc.TimeMapper, **kwargs: Any ) -> None: super().__init__(parent=parent, **kwargs) self.setDigitCount(9) self.setSegmentStyle(QtWidgets.QLCDNumber.Flat) self.setFrameStyle(QtWidgets.QFrame.Panel) self.setFrameShadow(QtWidgets.QFrame.Sunken) self.__time_mapper = time_mapper self.__current_time = audioproc.MusicalTime() self.displayModeChanged.connect(self.__update)
def updateMeasures(self) -> None: if self.__closing: return t0 = audioproc.MusicalTime() x0 = int(t0 * self.scaleX()) for measure_editor in self.measure_editors(): t1 = t0 + measure_editor.duration if isinstance(measure_editor, Appendix): x1 = x0 + 90 else: x1 = int(t1 * self.scaleX()) measure_editor.setTopLeft(QtCore.QPoint(x0 + self.leftMargin(), 0)) measure_editor.setWidth(x1 - x0) x0 = x1 t0 = t1
def split_segment(self, segment_ref: PianoRollSegmentRef, split_time: audioproc.MusicalTime) -> None: assert segment_ref.parent is self start_time = segment_ref.time end_time = start_time + segment_ref.segment.duration segment1 = segment_ref.segment segment1.duration = split_time - start_time segment_ref2 = self.create_segment(split_time, end_time - split_time) segment2 = segment_ref2.segment intervals = list(segment1.intervals()) rel_split_time = split_time.relative_to(start_time) for interval in intervals: if interval.end_time <= rel_split_time: # remains in segment1 pass elif interval.start_time >= rel_split_time: # move completely to segment2 segment2.add_event( interval.start_event.midi_event.relative_to( rel_split_time)) segment1.remove_event(interval.start_event) if interval.end_event is not None: segment2.add_event( interval.end_event.midi_event.relative_to( rel_split_time)) segment1.remove_event(interval.end_event) else: # split interval segment2.add_event( value_types.MidiEvent(audioproc.MusicalTime( 0, 1), interval.start_event.midi_event.midi)) if interval.end_event is not None: segment2.add_event( interval.end_event.midi_event.relative_to( rel_split_time)) segment1.add_event( value_types.MidiEvent( rel_split_time, interval.end_event.midi_event.midi)) segment1.remove_event(interval.end_event)
def mousePressEvent(self, evt: QtGui.QMouseEvent) -> None: self.track.updateHighlightedPoint() if (evt.button() == Qt.LeftButton and evt.modifiers() == Qt.NoModifier and self.track.highlightedPoint() is not None): self.__moving_point = self.track.highlightedPoint() self.__moving_point_original_pos = self.__moving_point.pos() self.__moving_point_offset = evt.pos() - self.__moving_point.pos() self.__move_mode = 'any' point_index = self.__moving_point.index if point_index > 0: range_left = self.track.points[point_index - 1].pos().x() + 1 else: range_left = self.track.timeToX(audioproc.MusicalTime(0, 1)) if point_index < len(self.track.points) - 1: range_right = self.track.points[point_index + 1].pos().x() - 1 else: range_right = self.track.timeToX(self.track.projectEndTime()) self.__move_range = (range_left, range_right) evt.accept() return if (evt.button() == Qt.LeftButton and evt.modifiers() == Qt.ShiftModifier and self.track.highlightedPoint() is not None): with self.project.apply_mutations('%s: Remove control point' % self.track.track.name): self.track.track.delete_control_point( self.track.highlightedPoint().point) evt.accept() return if evt.button() == Qt.RightButton and self.__moving_point is not None: self.track.setPointPos(self.__moving_point, self.__moving_point_original_pos) self.__moving_point = None evt.accept() return super().mousePressEvent(evt)
def __measure_list_changed(self, change: model_base.PropertyChange) -> None: if isinstance(change, model_base.PropertyListInsert): time = audioproc.MusicalTime() for mref in self._node.measure_list: if mref.index == change.new_value.index: assert mref is change.new_value self.__add_measure(time, mref) elif mref.index > change.new_value.index: self._update_measure(time, mref) time += mref.measure.duration elif isinstance(change, model_base.PropertyListDelete): mref = change.old_value self.__remove_measure(mref) else: raise TypeError("Unsupported change type %s" % type(change))
async def __setup_player(self) -> None: self.__player_state_changed = asyncio.Event(loop=self.__event_loop) self.__player_started = asyncio.Event(loop=self.__event_loop) self.__player_finished = asyncio.Event(loop=self.__event_loop) self.__playing = False self.__current_time = audioproc.MusicalTime() self.__duration = self.__project.duration create_audioproc_request = editor_main_pb2.CreateAudioProcProcessRequest( name='render', host_parameters=audioproc.HostParameters( block_size=self.__render_settings.block_size, sample_rate=self.__render_settings.sample_rate)) create_audioproc_response = editor_main_pb2.CreateProcessResponse() await self.__manager.call('CREATE_AUDIOPROC_PROCESS', create_audioproc_request, create_audioproc_response) self.__audioproc_address = create_audioproc_response.address self.__audioproc_client = audioproc.AudioProcClient( self.__event_loop, self.__server, self.__urid_mapper) self.__audioproc_client.engine_notifications.add( self.__handle_engine_notification) await self.__audioproc_client.setup() await self.__audioproc_client.connect(self.__audioproc_address) await self.__audioproc_client.create_realm(name='root', enable_player=True) await self.__audioproc_client.set_backend( 'renderer', audioproc.BackendSettings( datastream_address=self.__datastream_address)) self.__session_values = session_value_store.SessionValueStore( self.__event_loop, 'render') self.__player = player.Player(project=self.__project, event_loop=self.__event_loop, audioproc_client=self.__audioproc_client, session_values=self.__session_values, realm='root') await self.__player.setup()
def link_segments( self, data: clipboard_pb2.PianoRollSegments, time: audioproc.MusicalTime) -> List[PianoRollSegmentRef]: segment_map = {} # type: Dict[int, PianoRollSegment] for segment in self.segment_heap: segment_map[segment.id] = segment segment_refs = [] # type: List[PianoRollSegmentRef] for serialized_segment_ref in data.segment_refs: assert serialized_segment_ref.segment in segment_map ref_time = audioproc.MusicalTime.from_proto( serialized_segment_ref.time) ref = self._pool.create( PianoRollSegmentRef, time=time + (ref_time - audioproc.MusicalTime(0, 1)), segment=segment_map[serialized_segment_ref.segment]) self.segments.append(ref) segment_refs.append(ref) return segment_refs
def paste_segments( self, data: clipboard_pb2.PianoRollSegments, time: audioproc.MusicalTime) -> List[PianoRollSegmentRef]: segment_map = {} # type: Dict[int, PianoRollSegment] for serialized_segment in data.segments: segment = down_cast(PianoRollSegment, self._pool.clone_tree(serialized_segment)) self.segment_heap.append(segment) segment_map[serialized_segment.root] = segment segment_refs = [] # type: List[PianoRollSegmentRef] for serialized_segment_ref in data.segment_refs: ref_time = audioproc.MusicalTime.from_proto( serialized_segment_ref.time) ref = self._pool.create( PianoRollSegmentRef, time=time + (ref_time - audioproc.MusicalTime(0, 1)), segment=segment_map[serialized_segment_ref.segment]) self.segments.append(ref) segment_refs.append(ref) return segment_refs
class Editor(object_list_manager.ObjectListManager[music.Track, TrackContainer], time_view_mixin.TimeViewMixin, ui_base.ProjectMixin, slots.SlotContainer, core.AutoCleanupMixin, QtWidgets.QWidget): maximumYOffsetChanged = QtCore.pyqtSignal(int) yOffsetChanged = QtCore.pyqtSignal(int) pageHeightChanged = QtCore.pyqtSignal(int) currentToolBoxChanged = QtCore.pyqtSignal(tools.ToolBox) currentTrackChanged = QtCore.pyqtSignal(object) playbackPosition, setPlaybackPosition, playbackPositionChanged = slots.slot( audioproc.MusicalTime, 'playbackPosition', default=audioproc.MusicalTime(-1, 1)) sidebarWidth, setSidebarWidth, sidebarWidthChanged = slots.slot( int, 'sidebarWidth', default=12) zoom, setZoom, zoomChanged = slots.slot(fractions.Fraction, 'zoom', default=fractions.Fraction(1, 1)) MIN_ZOOM = fractions.Fraction(2, 3)**12 MAX_ZOOM = fractions.Fraction(3, 2)**2 def __init__(self, *, player_state: player_state_lib.PlayerState, **kwargs: Any) -> None: self.__player_state = player_state self.__current_tool_box = None # type: tools.ToolBox self.__current_tool = None # type: tools.ToolBase self.__y_offset = 0 super().__init__(**kwargs) self.setMouseTracking(True) self.setFocusPolicy(Qt.StrongFocus) self.setMinimumWidth(50) self.setMinimumHeight(0) self.__in_track_resize = False self.__moving_track = None # type: TrackContainer self.__moving_track_pos = None # type: int self.__moving_track_insert_index = None # type: int self.__auto_scroll_dy = 0 self.__auto_scroll_timer = QtCore.QTimer(self) self.__auto_scroll_timer.setInterval(1000 // 50) self.__auto_scroll_timer.timeout.connect(self.__autoScrollTick) self.initObjectList(self.project, 'nodes') self.__content_height = 0 self.__updateTracks() self.objectListChanged.connect(self.__updateTracks) self.sidebarWidthChanged.connect(self.__updateTracks) self.__current_track = None # type: music.Track for idx, container in enumerate(self.objectWrappers()): if idx == 0: self.__onCurrentTrackChanged(container.track) self.currentTrackChanged.connect(self.__onCurrentTrackChanged) self.__player_state.currentTimeChanged.connect( self.setPlaybackPosition) self.setZoom( self.get_session_value('tracklist:%s:zoom' % self.project.id, fractions.Fraction(1, 1))) self.zoomChanged.connect( functools.partial(self.set_session_value, 'tracklist:%s:zoom' % self.project.id)) self.zoomChanged.connect(lambda _: self.__updateTracks()) self.__increase_scale_x_action = QtWidgets.QAction(self) self.__increase_scale_x_action.setShortcut("ctrl+left") self.__increase_scale_x_action.setShortcutContext(Qt.WindowShortcut) self.__increase_scale_x_action.triggered.connect( functools.partial(self.__setScaleX, fractions.Fraction(2, 3))) self.addAction(self.__increase_scale_x_action) self.__decrease_scale_x_action = QtWidgets.QAction(self) self.__decrease_scale_x_action.setShortcut("ctrl+right") self.__decrease_scale_x_action.setShortcutContext(Qt.WindowShortcut) self.__decrease_scale_x_action.triggered.connect( functools.partial(self.__setScaleX, fractions.Fraction(3, 2))) self.addAction(self.__decrease_scale_x_action) self.__increase_zoom_action = QtWidgets.QAction(self) self.__increase_zoom_action.setShortcut("ctrl++") self.__increase_zoom_action.setShortcutContext( Qt.WidgetWithChildrenShortcut) self.__increase_zoom_action.triggered.connect( functools.partial(self.__scaleZoom, fractions.Fraction(3, 2))) self.addAction(self.__increase_zoom_action) self.__decrease_zoom_action = QtWidgets.QAction(self) self.__decrease_zoom_action.setShortcut("ctrl+-") self.__decrease_zoom_action.setShortcutContext( Qt.WidgetWithChildrenShortcut) self.__decrease_zoom_action.triggered.connect( functools.partial(self.__scaleZoom, fractions.Fraction(2, 3))) self.addAction(self.__decrease_zoom_action) self.__reset_zoom_action = QtWidgets.QAction(self) self.__reset_zoom_action.setShortcut("ctrl+0") self.__reset_zoom_action.setShortcutContext( Qt.WidgetWithChildrenShortcut) self.__reset_zoom_action.triggered.connect(self.__resetZoom) self.addAction(self.__reset_zoom_action) def __setScaleX(self, factor: fractions.Fraction) -> None: new_scale_x = self.scaleX() * factor new_scale_x = max(fractions.Fraction(5, 1), new_scale_x) new_scale_x = min(fractions.Fraction(10000, 1), new_scale_x) center_time = max( 0, self.width() // 2 - self.leftMargin() + self.xOffset()) / self.scaleX() self.setScaleX(new_scale_x) center_x = self.leftMargin() + int(self.scaleX() * center_time) self.setXOffset(max(0, center_x - self.width() // 2)) def __setZoom(self, zoom: fractions.Fraction) -> None: if zoom == self.zoom(): return center_y = (self.yOffset() + self.height() // 2) / self.zoom() self.setZoom(zoom) self.setYOffset( max( 0, min(self.maximumYOffset(), int(center_y * self.zoom()) - self.height() // 2))) def __scaleZoom(self, factor: fractions.Fraction) -> None: new_zoom = self.zoom() * factor new_zoom = max(self.MIN_ZOOM, new_zoom) new_zoom = min(self.MAX_ZOOM, new_zoom) self.__setZoom(new_zoom) self.__setScaleX(factor) def __resetZoom(self) -> None: self.__setScaleX(1 / self.zoom()) self.__setZoom(fractions.Fraction(1, 1)) def __autoScrollTick(self) -> None: self.setYOffset( max( 0, min(self.maximumYOffset(), self.yOffset() + self.__auto_scroll_dy))) def setAutoScroll(self, dy: int) -> None: self.__auto_scroll_dy = dy if self.__auto_scroll_dy and self.isVisible(): self.__auto_scroll_timer.start() else: self.__auto_scroll_timer.stop() def currentTrack(self) -> music.Track: return self.__current_track def setCurrentTrack(self, track: music.Track) -> None: if track is self.__current_track: return if self.__current_track is not None: container = self.objectWrapperById(self.__current_track.id) container.setIsCurrent(False) self.__current_track = None if track is not None: container = self.objectWrapperById(track.id) container.setIsCurrent(True) self.__current_track = track if container.track.visible and self.isVisible(): track_y = container.track_editor.y() + self.yOffset() yoffset = self.yOffset() if track_y + container.track_editor.height( ) > yoffset + self.height(): yoffset = track_y + container.track_editor.height( ) - self.height() if track_y < yoffset: yoffset = track_y self.setYOffset(yoffset) self.currentTrackChanged.emit(self.__current_track) def _filterObject(self, obj: music.ObjectBase) -> bool: return isinstance(obj, music.Track) def _createObjectWrapper(self, track: music.Track) -> TrackContainer: container = TrackContainer(editor=self, track=track, player_state=self.__player_state, context=self.context) container.visibilityChanged.connect(lambda _: self.__updateTracks()) return container def _deleteObjectWrapper(self, container: TrackContainer) -> None: if container.track is self.__current_track: self.setCurrentTrack(None) container.cleanup() def __updateTracks(self) -> None: separator_height = max(1, min(8, int(self.zoom() * 4))) tracks = [] # type: List[Tuple[TrackContainer, int]] moving_track_height = 0 content_height = 0 for container in self.objectWrappers(): if not container.track.visible: container.hide() continue if not tracks: content_height += separator_height track_height = max(5, int(self.zoom() * container.height)) track_height = min(container.track_editor.maximumHeight(), track_height) track_height = max(container.track_editor.minimumHeight(), track_height) if container is self.__moving_track: moving_track_height = track_height tracks.append((container, track_height)) content_height += track_height + separator_height if self.__in_track_resize: content_height = max(content_height, self.__content_height) if content_height != self.__content_height: self.__content_height = content_height self.maximumYOffsetChanged.emit( max(0, self.__content_height - self.height())) if self.__content_height >= self.height(): y = -self.yOffset() else: y = (self.height() - self.__content_height) // 2 y += separator_height show_top_sep = True moving_track_inserted = False for container, track_height in tracks: if container is self.__moving_track: container.setTrackGeometry( QtCore.QRect(0, self.__moving_track_pos, self.width(), track_height), self.sidebarWidth(), separator_height, True) show_top_sep = True else: if (not moving_track_inserted and self.__moving_track is not None and self.__moving_track_pos < y + track_height // 2): y += moving_track_height + separator_height if container.track.index > self.__moving_track.track.index: self.__moving_track_insert_index = container.track.index - 1 else: self.__moving_track_insert_index = container.track.index moving_track_inserted = True container.setTrackGeometry( QtCore.QRect(0, y, self.width(), track_height), self.sidebarWidth(), separator_height, show_top_sep) show_top_sep = False y += track_height + separator_height if not moving_track_inserted and self.__moving_track is not None: self.__moving_track_insert_index = len(self.project.nodes) - 1 if self.__moving_track is not None: self.__moving_track.raise_() def beginTrackResize(self) -> None: self.__in_track_resize = True def endTrackResize(self) -> None: self.__in_track_resize = False self.__updateTracks() def setTrackHeight(self, container: TrackContainer, height: int) -> None: h = fractions.Fraction(height) / self.zoom() h = min(container.track_editor.maximumHeight(), h) h = max(container.track_editor.minimumHeight(), h) if h != container.height: container.setHeight(h) self.__updateTracks() def beginTrackMove(self, container: TrackContainer) -> None: self.__moving_track = container self.__moving_track_pos = container.track_editor.pos().y() self.__moving_track_insert_index = None self.__updateTracks() def endTrackMove(self) -> None: assert self.__moving_track is not None if self.__moving_track_insert_index is not None: moving_track = self.__moving_track new_index = self.__moving_track_insert_index self.__moving_track = None self.__moving_track_insert_index = None with self.project.apply_mutations('Move track "%s"' % moving_track.track.name): self.project.nodes.move(moving_track.track.index, new_index) self.__updateTracks() def moveTrack(self, pos: int) -> None: self.__moving_track_pos = pos self.__updateTracks() def __onCurrentTrackChanged(self, track: music.Track) -> None: if track is not None: container = self.objectWrapperById(track.id) self.setCurrentToolBox(container.track_editor.toolBox()) else: self.setCurrentToolBox(None) def currentToolBox(self) -> tools.ToolBox: return self.__current_tool_box def setCurrentToolBox(self, toolbox: tools.ToolBox) -> None: if self.__current_tool_box is toolbox: return logger.debug("Switching to tool box %s", type(toolbox).__name__) if self.__current_tool_box is not None: self.__current_tool_box.currentToolChanged.disconnect( self.__onCurrentToolChanged) self.__onCurrentToolChanged(None) self.__current_tool_box = None if toolbox is not None: self.__current_tool_box = toolbox self.__onCurrentToolChanged(self.__current_tool_box.currentTool()) self.__current_tool_box.currentToolChanged.connect( self.__onCurrentToolChanged) self.currentToolBoxChanged.emit(self.__current_tool_box) def __onCurrentToolChanged(self, tool: tools.ToolBase) -> None: if tool is self.__current_tool: return logger.debug("Current tool: %s", tool) if self.__current_tool is not None: self.__current_tool.cursorChanged.disconnect( self.__onToolCursorChanged) self.__onToolCursorChanged(None) self.__current_tool = None if tool is not None: self.__current_tool = tool self.__onToolCursorChanged(self.__current_tool.cursor()) self.__current_tool.cursorChanged.connect( self.__onToolCursorChanged) def __onToolCursorChanged(self, cursor: QtGui.QCursor) -> None: logger.debug("Cursor changed: %s", cursor) if cursor is not None: self.setCursor(cursor) else: self.setCursor(QtGui.QCursor(Qt.ArrowCursor)) def maximumYOffset(self) -> int: return max(0, self.__content_height - self.height()) def pageHeight(self) -> int: return self.height() def yOffset(self) -> int: return self.__y_offset def setYOffset(self, offset: int) -> None: if offset == self.__y_offset: return self.__y_offset = offset self.yOffsetChanged.emit(self.__y_offset) self.__updateTracks() def offset(self) -> QtCore.QPoint: return QtCore.QPoint(self.xOffset(), self.__y_offset) def resizeEvent(self, evt: QtGui.QResizeEvent) -> None: super().resizeEvent(evt) self.maximumYOffsetChanged.emit( max(0, self.__content_height - self.height())) self.pageHeightChanged.emit(self.height()) self.__updateTracks() def wheelEvent(self, evt: QtGui.QWheelEvent) -> None: if evt.modifiers() == Qt.ShiftModifier: offset = self.xOffset() offset -= 2 * evt.angleDelta().y() offset = min(self.maximumXOffset(), offset) offset = max(0, offset) self.setXOffset(offset) evt.accept() return elif evt.modifiers() == Qt.ControlModifier: offset = self.yOffset() offset -= evt.angleDelta().y() offset = min(self.maximumYOffset(), offset) offset = max(0, offset) self.setYOffset(offset) evt.accept() return super().wheelEvent(evt)
class BaseTrackEditor(time_view_mixin.TimeViewMixin, ui_base.ProjectMixin, core.AutoCleanupMixin, slots.SlotContainer, QtWidgets.QWidget): sizeChanged = QtCore.pyqtSignal(QtCore.QSize) currentToolChanged = QtCore.pyqtSignal(tools.ToolType) playbackPosition, setPlaybackPosition, playbackPositionChanged = slots.slot( audioproc.MusicalTime, 'playbackPosition', default=audioproc.MusicalTime(-1, 1)) isCurrent, setIsCurrent, isCurrentChanged = slots.slot(bool, 'isCurrent', default=False) defaultHeight, setDefaultHeight, defaultHeightChanged = slots.slot( int, 'defaultHeight', default=200) zoom, setZoom, zoomChanged = slots.slot(fractions.Fraction, 'zoom', default=fractions.Fraction(1, 1)) def __init__(self, *, track: music.Track, player_state: player_state_lib.PlayerState, editor: 'editor_lib.Editor', **kwargs: Any) -> None: self.__auto_scroll = True super().__init__(parent=editor, **kwargs) self.setMouseTracking(True) self.setMinimumHeight(10) self.setMaximumHeight(1000) self.__track = track self.__player_state = player_state self.__editor = editor self.__zoom = fractions.Fraction(1, 1) self._bg_color = QtGui.QColor(255, 255, 255) self.isCurrentChanged.connect(self.__isCurrentChanged) self.__isCurrentChanged(self.isCurrent()) self.scaleXChanged.connect(lambda _: self.__scaleChanged()) self.zoomChanged.connect(lambda _: self.__scaleChanged()) self.__scaleChanged() self.__toolbox = self.createToolBox() self.currentToolChanged.emit(self.__toolbox.currentToolType()) self.__toolbox.toolTypeChanged.connect(self.currentToolChanged.emit) @property def track(self) -> music.Track: return self.__track def setAutoScroll(self, auto_scroll: bool) -> None: self.__auto_scroll = auto_scroll def setXOffset(self, offset: int) -> int: dx = super().setXOffset(offset) if self.__auto_scroll: self.scroll(dx, 0) return dx def __scaleChanged(self) -> None: self.updateSize() self.purgePaintCaches() self.update() def offset(self) -> QtCore.QPoint: return QtCore.QPoint(self.xOffset(), 0) def updateSize(self) -> None: pass def __isCurrentChanged(self, is_current: bool) -> None: if is_current: self._bg_color = QtGui.QColor(240, 240, 255) else: self._bg_color = QtGui.QColor(255, 255, 255) self.update() def purgePaintCaches(self) -> None: pass def createToolBox(self) -> tools.ToolBox: raise NotImplementedError def toolBox(self) -> tools.ToolBox: return self.__toolbox def currentTool(self) -> tools.ToolBase: return self.__toolbox.currentTool() def currentToolType(self) -> tools.ToolType: return self.__toolbox.currentToolType() def setCurrentToolType(self, tool: tools.ToolType) -> None: self.__toolbox.setCurrentToolType(tool) def playerState(self) -> player_state_lib.PlayerState: return self.__player_state def resizeEvent(self, evt: QtGui.QResizeEvent) -> None: self.sizeChanged.emit(evt.size()) super().resizeEvent(evt) def _paint(self, painter: QtGui.QPainter, rect: QtCore.QRect) -> None: painter.setRenderHints(QtGui.QPainter.Antialiasing | QtGui.QPainter.TextAntialiasing) font = QtGui.QFont("Arial") font.setPixelSize(14) painter.setFont(font) pen = QtGui.QPen() pen.setColor(Qt.black) painter.setPen(pen) painter.drawText( QtCore.QRect(0, 0, self.width(), self.height()), Qt.AlignCenter, "%s.paintEvent() not implemented" % type(self).__name__) def paintEvent(self, evt: QtGui.QPaintEvent) -> None: painter = QtGui.QPainter(self) try: painter.fillRect(evt.rect(), self._bg_color) painter.translate(-self.xOffset(), 0) self._paint(painter, evt.rect().translated(self.xOffset(), 0)) finally: painter.end() def _makeMouseEvent(self, evt: QtGui.QMouseEvent) -> QtGui.QMouseEvent: return QtGui.QMouseEvent(evt.type(), evt.localPos() + self.offset(), evt.windowPos(), evt.screenPos(), evt.button(), evt.buttons(), evt.modifiers()) def contextMenuEvent(self, evt: QtGui.QContextMenuEvent) -> None: evt = QtGui.QContextMenuEvent(evt.reason(), evt.pos() + self.offset(), evt.globalPos(), evt.modifiers()) self.__toolbox.contextMenuEvent(evt) def mouseMoveEvent(self, evt: QtGui.QMouseEvent) -> None: self.__toolbox.mouseMoveEvent(self._makeMouseEvent(evt)) def mousePressEvent(self, evt: QtGui.QMouseEvent) -> None: self.__editor.setCurrentTrack(self.track) self.__toolbox.mousePressEvent(self._makeMouseEvent(evt)) def mouseReleaseEvent(self, evt: QtGui.QMouseEvent) -> None: self.__toolbox.mouseReleaseEvent(self._makeMouseEvent(evt)) def mouseDoubleClickEvent(self, evt: QtGui.QMouseEvent) -> None: self.__toolbox.mouseDoubleClickEvent(self._makeMouseEvent(evt)) def wheelEvent(self, evt: QtGui.QWheelEvent) -> None: evt = QtGui.QWheelEvent(evt.pos() + self.offset(), evt.globalPos(), evt.pixelDelta(), evt.angleDelta(), 0, Qt.Horizontal, evt.buttons(), evt.modifiers(), evt.phase(), evt.source()) self.__toolbox.wheelEvent(evt) def keyPressEvent(self, evt: QtGui.QKeyEvent) -> None: self.__toolbox.keyPressEvent(evt) def keyReleaseEvent(self, evt: QtGui.QKeyEvent) -> None: self.__toolbox.keyReleaseEvent(evt)
def xToTime(self, x: int) -> audioproc.MusicalTime: x -= self.leftMargin() + self.additionalXOffset() if x <= 0: return audioproc.MusicalTime(0, 1) return audioproc.MusicalTime(x / self.scaleX())
def renderTimeGrid( self, painter: QtGui.QPainter, rect: QtCore.QRect, *, show_numbers: bool = False ) -> None: grid_step = self.gridStep() tick_num = int(self.xToTime(rect.x()) / grid_step) tick_time = (grid_step * tick_num).as_time() while tick_time < self.projectEndTime(): x = self.timeToX(tick_time) if x > rect.right(): break if tick_num == 0: painter.fillRect(x, rect.y(), 2, rect.height(), Qt.black) else: if tick_time % audioproc.MusicalTime(1, 1) == audioproc.MusicalTime(0, 1): c = QtGui.QColor(0, 0, 0) elif tick_time % audioproc.MusicalTime(1, 4) == audioproc.MusicalTime(0, 1): c = QtGui.QColor(160, 160, 160) elif tick_time % audioproc.MusicalTime(1, 8) == audioproc.MusicalTime(0, 1): c = QtGui.QColor(185, 185, 185) elif tick_time % audioproc.MusicalTime(1, 16) == audioproc.MusicalTime(0, 1): c = QtGui.QColor(210, 210, 210) elif tick_time % audioproc.MusicalTime(1, 32) == audioproc.MusicalTime(0, 1): c = QtGui.QColor(225, 225, 225) else: c = QtGui.QColor(240, 240, 240) painter.fillRect(x, rect.y(), 1, rect.height(), c) if (show_numbers and tick_time % audioproc.MusicalTime(1, 1) == audioproc.MusicalTime(0, 1)): beat_num = int(tick_time / audioproc.MusicalTime(1, 4)) painter.setPen(Qt.black) painter.drawText(x + 5, 12, '%d' % (beat_num + 1)) tick_time += grid_step tick_num += 1 x = self.timeToX(self.projectEndTime()) painter.fillRect(x, rect.y(), 2, rect.height(), Qt.black)
def projectEndTime(self) -> audioproc.MusicalTime: return audioproc.MusicalTime() + self.project.duration