class Sequencer: def __init__(self, logger=None): if logger is None: logger = logging.getLogger("Sequencer") self.logger = logger self._params = {"tracks": {}, "buses": {}, "reference_colour": None, "max_colour_distance": 1.0} self._sounds = {} self._tracks = collections.OrderedDict() self._buses = collections.OrderedDict() self._groups = [] self._scheduler = Scheduler() self.control_panels = set() self._setup_colour_receiver() SynthController.kill_potential_engine_from_previous_process() self._synth = SynthController() self._synth.launch_engine() self._synth.connect(self._synth.lang_port) self._setup_websocket_server() def _setup_colour_receiver(self): self._current_colour = None self._colour_receiver = ColourReceiver() self._colour_receiver.received_colour = self.set_current_colour self._colour_receiver.start() def set_current_colour(self, rgb): self._current_colour = rgb if self._params["reference_colour"] is not None: self._estimate_age() self._set_age_dependent_rates() for control_panel in self.control_panels: control_panel.send_params() def _estimate_age(self): distance_to_reference = self._colour_distance( self._current_colour, self._params["reference_colour"]) age = distance_to_reference / self._params["max_colour_distance"] age = min(age, 1.0) self._estimated_age = age # self.log("estimated age: %.2f" % self._estimated_age) def _set_age_dependent_rates(self): for track in self._tracks.values(): params = self._params["tracks"][track["name"]] if params["age_type"] is not None: params["rate"] = self._age_dependent_rate(params["age_type"]) # self.log("rate %.1f for age_type=%s, track %s" % ( # params["rate"], params["age_type"], track["name"])) self._on_track_params_changed(track) def _age_dependent_rate(self, age_type): if age_type == "decay": return RATE_MIN + (RATE_MAX - RATE_MIN) * (1 - self._estimated_age) else: return RATE_MIN + (RATE_MAX - RATE_MIN) * self._estimated_age def _colour_distance(self, colour1, colour2): diffs = [colour1[n] - colour2[n] for n in range(3)] return math.sqrt(sum([diff*diff for diff in diffs])) def calibrate_colour(self): self.log("calibrate_colour %s" % self._current_colour) if self._current_colour is not None: self._params["reference_colour"] = self._current_colour def get_tracks(self): return self._tracks def get_buses(self): return self._buses def get_params(self): return self._params def play(self, sound, looped=0): track_name = self._sounds[sound]["track_name"] track = self._tracks[track_name] params = self._params["tracks"][track_name] self._synth.play( sound, params["pan"], params["fade"], params["gain"] + params["gain_adjustment"], params["rate"], looped, params["send"], params["send_gain"] + params["gain_adjustment"], params["comp_threshold"]) def schedule(self, action, delay): self._scheduler.schedule(action, delay) def is_playing(self, sound): return self._synth.is_playing(sound) def load_sounds(self, pattern): for sound in glob.glob(pattern): self.load_sound(sound) def load_sound(self, sound): self._synth.load_sound(sound) self._sounds[sound] = {} def add_track(self, name, pattern, params_overrides): params = copy.copy(DEFAULT_SOUND_PARAMS) params.update(params_overrides) sounds = glob.glob(pattern) track = {"name": name, "sounds": sounds} self._params["tracks"][name] = params for sound in sounds: self._sounds[sound]["track_name"] = name self._tracks[name] = track def add_group(self, pattern, params): group = Group(self, params) for sound in glob.glob(pattern): group.add(sound) self._groups.append(group) def add_bus(self, name): self._synth.add_bus(name) self._buses[name] = {"name": name} self._params["buses"][name] = DEFAULT_BUS_PARAMS def set_bus_params(self, bus, new_params): params = self._params["buses"][bus] params.update(new_params) self._synth.set_bus_params( bus, params["reverb_mix"], params["reverb_room"], params["reverb_damp"]) def try_to_load_params(self): if os.path.exists(PARAMS_FILENAME): self.load_params() def run_main_loop(self): while True: self._process() time.sleep(.1) def _process(self): self._synth.process() self._scheduler.run_scheduled_events() self._colour_receiver.serve() for group in self._groups: group.process() def _setup_websocket_server(self): self._server = WebsocketServer(ControlPanelHandler, {"sequencer": self}) server_thread = threading.Thread(target=self._server.start) server_thread.daemon = True server_thread.start() def set_global_param(self, param, value): self._params[param] = value def set_track_param(self, track_name, param, value): track = self._tracks[track_name] params = self._params["tracks"][track_name] params[param] = value self._on_track_params_changed(track) def _on_tracks_params_changed(self): for track in self._tracks.values(): self._on_track_params_changed(track) def _on_track_params_changed(self, track): params = self._params["tracks"][track["name"]] for sound in track["sounds"]: if self.is_playing(sound): self._synth.set_param(sound, "gain", params["gain"] + params["gain_adjustment"]) self._synth.set_param(sound, "send_gain", params["send_gain"] + params["gain_adjustment"]) self._synth.set_param(sound, "rate", params["rate"]) def save_params(self): f = open(PARAMS_FILENAME, "w") cPickle.dump(self._params, f) f.close() def load_params(self): f = open(PARAMS_FILENAME, "r") self._params = cPickle.load(f) self._on_tracks_params_changed() f.close() def log(self, string): print string self.logger.debug(string)
class Visualizer: def __init__(self, args, file_class=File, chunk_class=Chunk, segment_class=Segment, peer_class=Peer): if hasattr(self, "_initialized") and self._initialized: return self.file_class = file_class self.chunk_class = chunk_class self.segment_class = segment_class self.peer_class = peer_class self.args = args self.sync = args.sync self.width = args.width self.height = args.height self.margin = args.margin self.show_fps = args.show_fps self.export = args.export self.capture_message_log = args.capture_message_log self.play_message_log = args.play_message_log self.waveform_gain = args.waveform_gain self._standalone = args.standalone self._target_aspect_ratio = self._get_aspect_ratio_from_args() self.logger = logging.getLogger("visualizer") self.reset() self._frame_count = 0 self.exiting = False self.time_increment = 0 self.stopwatch = Stopwatch() self._synth_instance = None self._synth_port = None self._synced = False self._layers = [] self._warned_about_missing_pan_segment = False self.gl_display_mode = GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH self._accum_enabled = False self._3d_enabled = False self.fovy = 45 self.near = 0.1 self.far = 100.0 self._fullscreen = False self._text_renderer_class = getattr(text_renderer_module, TEXT_RENDERERS[args.text_renderer]) if args.camera_script: self._camera_script = CameraScriptInterpreter(args.camera_script) else: self._camera_script = None if self.show_fps: self.fps_history = collections.deque(maxlen=10) self.previous_shown_fps_time = None if not args.standalone: if args.port: port = args.port else: port = self._get_orchestra_port() self.orchestra_host = args.host self.orchestra_port = port self.setup_osc() self.orchestra.register(self.server.port) self._screen_dumper = Exporter(".", self.margin, self.margin, self.width, self.height) if self.export: self.export_fps = args.export_fps import shutil if args.export_dir: export_dir = args.export_dir elif hasattr(args, "sessiondir"): export_dir = "%s/rendered_%s" % (args.sessiondir, self.__class__.__name__) else: export_dir = "export" if os.path.exists(export_dir): shutil.rmtree(export_dir) os.mkdir(export_dir) self.exporter = Exporter(export_dir, self.margin, self.margin, self.width, self.height) if self.play_message_log: self._message_log_reader = MessageLogReader(self.play_message_log) if self.capture_message_log: self._message_log_writer = MessageLogWriter(self.capture_message_log) self._audio_capture_start_time = None self._initialized = True def _get_aspect_ratio_from_args(self): w, h = map(float, self.args.aspect.split(":")) return w / h def _get_orchestra_port(self): if self.args.host == "localhost": return self._read_port_from_disk() else: return self._read_port_from_network_share() def _read_port_from_disk(self): self._read_port_from_file("server_port.txt") def _read_port_from_file(self, filename): f = open(filename, "r") line = f.read() port = int(line) f.close() return port def _read_port_from_network_share(self): if platform.system() == "Linux": return self._read_port_with_unix_smbclient() elif platform.system() == "Windows": return self._read_port_via_windows_samba_access() else: raise Exception("don't know how to handle your OS (%s)" % platform.system()) def _read_port_with_unix_smbclient(self): subprocess.call( 'smbclient -N \\\\\\\\%s\\\\TorrentialForms -c "get server_port.txt server_remote_port.txt"' % self.args.host, shell=True) return self._read_port_from_file("server_remote_port.txt") def _read_port_via_windows_samba_access(self): return self._read_port_from_file( '\\\\%s\\TorrentialForms\\server_port.txt' % self.args.host) def reset(self): self.files = {} self.peers = {} self.peers_by_addr = {} self._segments_by_id = {} self.torrent_length = 0 self.torrent_title = "" self.torrent_download_completion_time = None self.num_segments = 0 self.num_received_segments = 0 self._notified_finished = False def enable_3d(self): self._3d_enabled = True def run(self): self.window_width = self.width + self.margin*2 self.window_height = self.height + self.margin*2 glutInit(sys.argv) if self.args.left is None: self._left = (glutGet(GLUT_SCREEN_WIDTH) - self.window_width) / 2 else: self._left = self.args.left if self.args.top is None: self._top = (glutGet(GLUT_SCREEN_HEIGHT) - self.window_height) / 2 else: self._top = self.args.top glutInitDisplayMode(self.gl_display_mode) glutInitWindowSize(self.window_width, self.window_height) self._non_fullscreen_window = glutCreateWindow("") glutDisplayFunc(self.DrawGLScene) glutIdleFunc(self.DrawGLScene) glutReshapeFunc(self.ReSizeGLScene) glutKeyboardFunc(self.keyPressed) self.InitGL() glutPositionWindow(self._left, self._top) if self.args.fullscreen: self._open_fullscreen_window() self._fullscreen = True self.ReSizeGLScene(self.window_width, self.window_height) glutMainLoop() def _open_fullscreen_window(self): glutGameModeString("%dx%d:32@75" % (self.window_width, self.window_height)) glutEnterGameMode() glutSetCursor(GLUT_CURSOR_NONE) glutDisplayFunc(self.DrawGLScene) glutIdleFunc(self.DrawGLScene) glutReshapeFunc(self.ReSizeGLScene) glutKeyboardFunc(self.keyPressed) self.InitGL() glutPositionWindow(self._left, self._top) def handle_torrent_message(self, num_files, download_duration, total_size, num_chunks, num_segments, encoded_torrent_title): self.num_files = num_files self.download_duration = download_duration self.total_size = total_size self.num_segments = num_segments self.torrent_title = encoded_torrent_title.decode("unicode_escape") def handle_file_message(self, filenum, offset, length): f = self.files[filenum] = self.file_class(self, filenum, offset, length) self.logger.debug("added file %s" % f) self.torrent_length += length self.added_file(f) if len(self.files) == self.num_files: self.logger.debug("added all files") self.added_all_files() def handle_chunk_message(self, chunk_id, torrent_position, byte_size, filenum, peer_id, t): if filenum in self.files: f = self.files[filenum] peer = self.peers[peer_id] begin = torrent_position - f.offset end = begin + byte_size chunk = self.chunk_class( chunk_id, begin, end, byte_size, filenum, f, peer, t, self.current_time(), self) self.files[filenum].add_chunk(chunk) else: print "ignoring chunk from undeclared file %s" % filenum def handle_segment_message(self, segment_id, torrent_position, byte_size, filenum, peer_id, t, duration): if filenum in self.files: f = self.files[filenum] peer = self.peers[peer_id] begin = torrent_position - f.offset end = begin + byte_size segment = self.segment_class( segment_id, begin, end, byte_size, filenum, f, peer, t, duration, self.current_time(), self) self._segments_by_id[segment_id] = segment self.add_segment(segment) else: print "ignoring segment from undeclared file %s" % filenum def handle_peer_message(self, peer_id, addr, bearing, pan, location): peer = self.peer_class(self, addr, bearing, pan, location) self.peers[peer_id] = peer self.peers_by_addr[addr] = peer def add_segment(self, segment): f = self.files[segment.filenum] segment.f = f segment.pan = 0.5 f.add_segment(segment) self.pan_segment(segment) segment.peer.add_segment(segment) self.num_received_segments += 1 def added_file(self, f): pass def added_all_files(self): pass def pan_segment(self, segment): if not self._warned_about_missing_pan_segment: print "WARNING: pan_segment undefined in visualizer. Orchestra and synth now control panning." self._warned_about_missing_pan_segment = True def handle_shutdown(self): self.exiting = True def handle_reset(self): self.reset() def handle_amp_message(self, segment_id, amp): try: segment = self._segments_by_id[segment_id] except KeyError: print "WARNING: amp message for unknown segment ID %s" % segment_id return self.handle_segment_amplitude(segment, amp) def handle_segment_amplitude(self, segment, amp): pass def handle_waveform_message(self, segment_id, value): try: segment = self._segments_by_id[segment_id] except KeyError: print "WARNING: waveform message for unknown segment ID %s" % segment_id return self.handle_segment_waveform_value(segment, value * self.waveform_gain) def handle_segment_waveform_value(self, segment, value): pass def handle_synth_address(self, port): self._synth_instance = None self._synth_port = port self.synth_address_received() def handle_audio_captured_started(self, start_time): self._audio_capture_start_time = float(start_time) def synth_address_received(self): pass def setup_osc(self): self.orchestra = OrchestraController(self.orchestra_host, self.orchestra_port) self.server = simple_osc_receiver.OscReceiver( listen=self.args.listen, name="Visualizer") self.server.add_method("/torrent", "ifiiis", self._handle_osc_message, "handle_torrent_message") self.server.add_method("/file", "iii", self._handle_osc_message, "handle_file_message") self.server.add_method("/chunk", "iiiiif", self._handle_osc_message, "handle_chunk_message") self.server.add_method("/segment", "iiiiiff", self._handle_osc_message, "handle_segment_message") self.server.add_method("/peer", "isffs", self._handle_osc_message, "handle_peer_message") self.server.add_method("/reset", "", self._handle_osc_message, "handle_reset") self.server.add_method("/shutdown", "", self._handle_osc_message, "handle_shutdown") self.server.add_method("/synth_address", "i", self._handle_osc_message, "handle_synth_address") self.server.add_method("/audio_captured_started", "s", self._handle_osc_message, "handle_audio_captured_started") self.server.start() self.waveform_server = None def setup_waveform_server(self): if not self._standalone: import osc_receiver self.waveform_server = osc_receiver.OscReceiver(proto=osc.UDP) self.waveform_server.add_method("/amp", "if", self._handle_osc_message, "handle_amp_message") self.waveform_server.add_method("/waveform", "if", self._handle_osc_message, "handle_waveform_message") self.waveform_server.start() def _handle_osc_message(self, path, args, types, src, handler_name): if self.capture_message_log: received_time = time.time() self._call_handler(handler_name, args) if self.capture_message_log: if self._audio_capture_start_time is None: capture_time = 0.0 print "WARNING: received OSC before audio capture started: %s" % path else: capture_time = received_time - self._audio_capture_start_time self._message_log_writer.write( capture_time, handler_name, args) def _call_handler(self, handler_name, args): handler = getattr(self, handler_name) handler(*args) def InitGL(self): glClearColor(1.0, 1.0, 1.0, 0.0) glClearAccum(0.0, 0.0, 0.0, 0.0) glClearDepth(1.0) glShadeModel(GL_SMOOTH) glutMouseFunc(self._mouse_clicked) glutMotionFunc(self._mouse_moved) glutSpecialFunc(self._special_key_pressed) def ReSizeGLScene(self, window_width, window_height): self.window_width = window_width self.window_height = window_height if window_height == 0: window_height = 1 glViewport(0, 0, window_width, window_height) self.width = window_width - 2*self.margin self.height = window_height - 2*self.margin self._aspect_ratio = float(window_width) / window_height self.min_dimension = min(self.width, self.height) self._refresh_layers() if not self._3d_enabled: self.configure_2d_projection() self.resized_window() def resized_window(self): pass def configure_2d_projection(self): glMatrixMode(GL_PROJECTION) glLoadIdentity() glOrtho(0.0, self.window_width, self.window_height, 0.0, -1.0, 1.0) glMatrixMode(GL_MODELVIEW) def _refresh_layers(self): for layer in self._layers: layer.refresh() def DrawGLScene(self): if self.exiting: self.logger.debug("total number of rendered frames: %s" % self._frame_count) if self.stopwatch.get_elapsed_time() > 0: self.logger.debug("total FPS: %s" % (float(self._frame_count) / self.stopwatch.get_elapsed_time())) if self.args.profile: import yappi yappi.print_stats(sys.stdout, yappi.SORTTYPE_TTOT) glutDestroyWindow(glutGetWindow()) return try: self._draw_gl_scene_error_handled() except Exception as error: traceback_printer.print_traceback() self.exiting = True raise error def _draw_gl_scene_error_handled(self): if self._camera_script: self._move_camera_by_script() glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) glLoadIdentity() if self.export: self.current_export_time = float(self._frame_count) / self.export_fps self.now = self.current_time() is_waiting_for_synth = (self.sync and not self._synth() and not self._synced) is_waiting_for_audio_capture_to_start = ( self.capture_message_log and self._audio_capture_start_time is None) if self._frame_count == 0 and \ not is_waiting_for_synth and \ not is_waiting_for_audio_capture_to_start: self.stopwatch.start() if self.sync: self._synced = True else: if self._frame_count == 0: self.time_increment = 0 else: self.time_increment = self.now - self.previous_frame_time self.handle_incoming_messages() self.update() if not self.capture_message_log: glTranslatef(self.margin, self.margin, 0) if self.args.border: self.draw_border() if self._3d_enabled and not self._accum_enabled: self.set_perspective( 0, 0, -self._camera_position.x, -self._camera_position.y, self._camera_position.z) self.render() if self.show_fps and self._frame_count > 0: self.update_fps_history() self.show_fps_if_timely() if self.export: self.exporter.export_frame() glutSwapBuffers() self.previous_frame_time = self.now finished = self.finished() if (self.export or self.args.exit_when_finished) and finished: self.exiting = True if not self._standalone: if finished and not self._notified_finished: self.orchestra.notify_finished() self._notified_finished = True if not is_waiting_for_synth: self._frame_count += 1 def finished(self): return False def handle_incoming_messages(self): if self.args.standalone: if self.play_message_log: self._process_message_log_until(self.now) else: self.server.serve() if self.waveform_server: self.waveform_server.serve() def _process_message_log_until(self, t): messages = self._message_log_reader.read_until(t) for _t, handler_name, args in messages: self._call_handler(handler_name, args) def update_fps_history(self): fps = 1.0 / self.time_increment self.fps_history.append(fps) def show_fps_if_timely(self): if self.previous_shown_fps_time: if (self.now - self.previous_shown_fps_time) > 1.0: self.calculate_and_show_fps() else: self.calculate_and_show_fps() def calculate_and_show_fps(self): print sum(self.fps_history) / len(self.fps_history) self.previous_shown_fps_time = self.now def draw_border(self): x1 = y1 = -1 x2 = self.width y2 = self.height glDisable(GL_LINE_SMOOTH) glLineWidth(1) glColor3f(BORDER_OPACITY, BORDER_OPACITY, BORDER_OPACITY) glBegin(GL_LINE_LOOP) glVertex2i(x1, y2) glVertex2i(x2, y2) glVertex2i(x2, y1) glVertex2i(x1, y1) glEnd() def keyPressed(self, key, x, y): if key == ESCAPE: # stop_all disabled as it also deletes ~reverb # self._synth().stop_all() self.exiting = True elif key == 's': self._dump_screen() elif key == 'f': if self._fullscreen: glutSetCursor(GLUT_CURSOR_INHERIT) glutLeaveGameMode() glutSetWindow(self._non_fullscreen_window) self._fullscreen = False else: self._open_fullscreen_window() self._fullscreen = True def _dump_screen(self): self._screen_dumper.export_frame() def playing_segment(self, segment): if not self._standalone: self.orchestra.visualizing_segment(segment.id) segment.playing = True def current_time(self): if self.export: return self.current_export_time else: return self.stopwatch.get_elapsed_time() def set_color(self, color_vector, alpha=1.0): glColor4f(color_vector[0], color_vector[1], color_vector[2], alpha) def set_listener_position(self, x, y): self.orchestra.set_listener_position(x, y) def set_listener_orientation(self, orientation): self.orchestra.set_listener_orientation(-orientation) def place_segment(self, segment_id, x, y, duration): self.orchestra.place_segment(segment_id, -x, y, duration) def _mouse_clicked(self, button, state, x, y): if self._3d_enabled: if button == GLUT_LEFT_BUTTON: self._dragging_orientation = (state == GLUT_DOWN) else: self._dragging_orientation = False if button == GLUT_RIGHT_BUTTON: self._dragging_y_position = (state == GLUT_DOWN) if state == GLUT_DOWN: self._drag_x_previous = x self._drag_y_previous = y def _mouse_moved(self, x, y): if self._3d_enabled: if self._dragging_orientation: self._disable_camera_script() self._set_camera_orientation( self._camera_y_orientation + x - self._drag_x_previous, self._camera_x_orientation - y + self._drag_y_previous) self._print_camera_settings() elif self._dragging_y_position: self._disable_camera_script() self._camera_position.y += CAMERA_Y_SPEED * (y - self._drag_y_previous) self._print_camera_settings() self._drag_x_previous = x self._drag_y_previous = y def _disable_camera_script(self): self._camera_script = None def _special_key_pressed(self, key, x, y): if self._3d_enabled: r = math.radians(self._camera_y_orientation) new_position = self._camera_position if key == GLUT_KEY_LEFT: new_position.x += CAMERA_KEY_SPEED * math.cos(r) new_position.z += CAMERA_KEY_SPEED * math.sin(r) elif key == GLUT_KEY_RIGHT: new_position.x -= CAMERA_KEY_SPEED * math.cos(r) new_position.z -= CAMERA_KEY_SPEED * math.sin(r) elif key == GLUT_KEY_UP: new_position.x += CAMERA_KEY_SPEED * math.cos(r + math.pi/2) new_position.z += CAMERA_KEY_SPEED * math.sin(r + math.pi/2) elif key == GLUT_KEY_DOWN: new_position.x -= CAMERA_KEY_SPEED * math.cos(r + math.pi/2) new_position.z -= CAMERA_KEY_SPEED * math.sin(r + math.pi/2) self._set_camera_position(new_position) self._print_camera_settings() def _print_camera_settings(self): print print "%s, %s, %s" % ( self._camera_position.v, self._camera_y_orientation, self._camera_x_orientation) def _set_camera_position(self, position): self._camera_position = position if not self._standalone: self.set_listener_position(position.z, position.x) def _set_camera_orientation(self, y_orientation, x_orientation): self._camera_y_orientation = y_orientation self._camera_x_orientation = x_orientation if not self._standalone: self.set_listener_orientation(y_orientation) def set_perspective(self, pixdx, pixdy, eyedx, eyedy, eyedz): assert self._3d_enabled fov2 = ((self.fovy*math.pi) / 180.0) / 2.0 top = self.near * math.tan(fov2) bottom = -top right = top * self._aspect_ratio left = -right xwsize = right - left ywsize = top - bottom # dx = -(pixdx*xwsize/self.width + eyedx*self.near/focus) # dy = -(pixdy*ywsize/self.height + eyedy*self.near/focus) # I don't understand why this modification solved the problem (focus was 1.0) dx = -(pixdx*xwsize/self.width) dy = -(pixdy*ywsize/self.height) glMatrixMode(GL_PROJECTION) glLoadIdentity() glFrustum (left + dx, right + dx, bottom + dy, top + dy, self.near, self.far) glMatrixMode(GL_MODELVIEW) glLoadIdentity() glRotatef(self._camera_x_orientation, 1.0, 0.0, 0.0) glRotatef(self._camera_y_orientation, 0.0, 1.0, 0.0) glTranslatef(self._camera_position.x, self._camera_position.y, self._camera_position.z) def enable_accum(self): self.gl_display_mode |= GLUT_ACCUM self._accum_enabled = True def accum(self, render_method): glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_ACCUM_BUFFER_BIT) for jitter in range(NUM_ACCUM_SAMPLES): glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) self.set_perspective(ACCUM_JITTER[jitter][0], ACCUM_JITTER[jitter][1], -self._camera_position.x, -self._camera_position.y, self._camera_position.z) render_method() glAccum(GL_ACCUM, 1.0/NUM_ACCUM_SAMPLES) glAccum(GL_RETURN, 1.0) def subscribe_to_amp(self): if not self.waveform_server: self.setup_waveform_server() self._synth().subscribe_to_amp(self.waveform_server.port) def subscribe_to_waveform(self): if not self._standalone: if not self.waveform_server: self.setup_waveform_server() self._synth().subscribe_to_waveform(self.waveform_server.port) def _move_camera_by_script(self): position, orientation = self._camera_script.position_and_orientation( self.current_time()) self._set_camera_position(position) self._set_camera_orientation(orientation.y, orientation.x) def new_layer(self, rendering_function): layer = Layer(rendering_function, self.new_display_list_id()) self._layers.append(layer) return layer def new_display_list_id(self): return glGenLists(1) def _synth(self): if self._synth_instance is None and self._synth_port: from synth_controller import SynthController self._synth_instance = SynthController(self.logger) self._synth_instance.connect(self._synth_port) return self._synth_instance def draw_text(self, text, size, x, y, font=None, spacing=None, v_align="left", h_align="top"): if font is None: font = self.args.font self.text_renderer(text, size, font).render(x, y, v_align, h_align) def text_renderer(self, text, size, font=None): if font is None: font = self.args.font return self._text_renderer_class(self, text, size, font, aspect_ratio=self._target_aspect_ratio) def download_completed(self): if self.torrent_download_completion_time: return True else: if self.num_segments > 0 and self.num_received_segments == self.num_segments and not self.active(): self.torrent_download_completion_time = self.current_time() return True def active(self): return False def update(self): pass @staticmethod def add_parser_arguments(parser): parser.add_argument("-host", type=str, default="localhost") parser.add_argument('-port', type=int) parser.add_argument("-listen", type=str) parser.add_argument('-sync', action='store_true') try: parser.add_argument('-width', dest='width', type=int, default=1024) parser.add_argument('-height', dest='height', type=int, default=768) except argparse.ArgumentError: pass parser.add_argument("-left", type=int) parser.add_argument("-top", type=int) parser.add_argument('-margin', dest='margin', type=int, default=0) parser.add_argument('-show-fps', dest='show_fps', action='store_true') parser.add_argument('-capture-message-log', dest='capture_message_log') parser.add_argument('-play-message-log', dest='play_message_log') parser.add_argument('-export', dest='export', action='store_true') parser.add_argument('-export-fps', dest='export_fps', default=25.0, type=float) parser.add_argument('-export-dir') parser.add_argument("-waveform", dest="waveform", action='store_true') parser.add_argument("-waveform-gain", dest="waveform_gain", default=1, type=float) parser.add_argument("-camera-script", dest="camera_script", type=str) parser.add_argument("-border", action="store_true") parser.add_argument("-fullscreen", action="store_true") parser.add_argument("-standalone", action="store_true") parser.add_argument("-profile", action="store_true") parser.add_argument("-check-opengl-errors", action="store_true") parser.add_argument("-exit-when-finished", action="store_true") parser.add_argument("--text-renderer", choices=TEXT_RENDERERS.keys(), default="glut") parser.add_argument("--font", type=str) parser.add_argument("-aspect", type=str, default="1:1", help="Target aspect ratio (e.g. 16:9)") @staticmethod def add_margin_argument(parser, name): parser.add_argument(name, type=str, default="0,0,0,0", help="top,right,bottom,left in relative units") def parse_margin_argument(self, argument_string): return MarginAttributes.from_argument(argument_string, self)
#!/usr/bin/env python CONFIG_FILENAME = "sc/parameters.config" import pygtk import gtk import cPickle import os import sys sys.path.append(os.path.dirname(__file__) + "/..") from synth_controller import SynthController synth = SynthController() synth.connect(SynthController.DEFAULT_PORT) parameters = ["mix", "room", "damp"] def send_values_to_sc(): global values for parameter, value in values.iteritems(): synth._send("/set_reverb_%s" % parameter, value) def value_changed(adj, parameter): global values values[parameter] = adj.value/100 send_values_to_sc() save_values() def destroy(widget, data=None): gtk.main_quit()
class Orchestra: SAMPLE_RATE = 44100 BYTES_PER_SAMPLE = 2 # mpg123, used by predecode, outputs 16-bit PCM mono PLAYABLE_FORMATS = ['mp3', 'flac', 'wav', 'm4b'] JACK = "jack" SSR = "ssr" @staticmethod def add_parser_arguments(parser): parser.add_argument("--rt", action="store_true", dest="realtime") parser.add_argument("-t", "--torrent", dest="torrentname", default="") parser.add_argument("-z", "--timefactor", dest="timefactor", type=float, default=1) parser.add_argument("--start", dest="start_time", type=float, default=0) parser.add_argument("-q", "--quiet", action="store_true", dest="quiet") parser.add_argument("--pretend-sequential", action="store_true", dest="pretend_sequential") parser.add_argument("--gui", action="store_true", dest="gui_enabled") parser.add_argument("--fast-forward", action="store_true", dest="ff") parser.add_argument("--fast-forward-to-start", action="store_true", dest="ff_to_start") parser.add_argument("--quit-at-end", action="store_true", dest="quit_at_end") parser.add_argument("--loop", dest="loop", action="store_true") parser.add_argument("--max-pause-within-segment", type=float) parser.add_argument("--max-segment-duration", type=float) parser.add_argument("--looped-duration", dest="looped_duration", type=float) parser.add_argument("-o", "--output", dest="output", type=str, default=Orchestra.JACK) parser.add_argument("--include-non-playable", action="store_true") parser.add_argument("-f", "--file", dest="selected_files", type=int, nargs="+") parser.add_argument("--title", type=str, default="") parser.add_argument("--pretend-audio", dest="pretend_audio_filename") parser.add_argument("--capture-audio") parser.add_argument("--leading-pause", type=float, default=0) _extension_re = re.compile('\.(\w+)$') def __init__(self, server, sessiondir, tr_log, options): self.server = server self.options = options self.sessiondir = sessiondir self.tr_log = tr_log self.realtime = options.realtime self.timefactor = options.timefactor self.quiet = options.quiet self._loop = options.loop self.looped_duration = options.looped_duration self.output = options.output self.include_non_playable = options.include_non_playable self._leading_pause = options.leading_pause if server.options.locate_peers: self._peer_location = {} for peeraddr in tr_log.peers: self._peer_location[peeraddr] = server.ip_locator.locate(peeraddr) self._peers_center_location_x = self._get_peers_center_location_x() if options.pretend_audio_filename: self._pretended_file = self._fileinfo_for_pretended_audio_file() self._pretended_file["duration"] = self._get_file_duration(self._pretended_file) self._pretended_files = [self._pretended_file] self._files_to_play = self._pretended_files else: self._files_to_play = self.tr_log.files self.predecode = server.options.predecode if self.predecode: predecoder = Predecoder( tr_log.files, sample_rate=self.SAMPLE_RATE, location=tr_log.file_location) predecoder.decode(server.options.force_predecode) if options.pretend_audio_filename: predecoder = Predecoder( self._pretended_files, sample_rate=self.SAMPLE_RATE) predecoder.decode(server.options.force_predecode) if options.selected_files: tr_log.select_files(options.selected_files) self.playback_enabled = True self.fast_forwarding = False self.gui = None self._check_which_files_are_audio() self._player_class = WavPlayer self.players = [] self._player_for_peer = dict() self._prepare_playable_files() self.stopwatch = Stopwatch() self.playable_chunks = self._filter_playable_chunks(tr_log, tr_log.chunks) if self.include_non_playable: self.chunks = tr_log.chunks self._num_selected_files = len(self.tr_log.files) else: self.chunks = self.playable_chunks self._num_selected_files = self._num_playable_files logger.debug("total num chunks: %s" % len(tr_log.chunks)) logger.debug("num playable chunks: %s" % len(self.playable_chunks)) logger.debug("num selected chunks: %s" % len(self.chunks)) self.score = self._interpret_chunks_to_score(tr_log, self.playable_chunks, options) self.estimated_duration = self._estimated_playback_duration(self.score, options) print "playback duration: %s" % datetime.timedelta(seconds=self.estimated_duration) self._chunks_by_id = {} self.segments_by_id = {} self._playing = False self._quitting = False self._was_stopped = False self.space = Space() self._scheduler_queue = Queue.Queue() self.scheduler = sched.scheduler(time.time, time.sleep) self._run_scheduler_thread() if self.output == self.SSR: self.ssr = SsrControl() self._warned_about_max_sources = False def init_playback(self): if self.server.options.no_synth: self.synth = None else: from synth_controller import SynthController self.synth = SynthController(logger) self.synth.launch_engine(self.server.options.sc_mode) self.synth.connect(self.synth.lang_port) self.synth.subscribe_to_info() if self.options.capture_audio: self._load_sounds() self._start_capture_audio() self._tell_visualizers("/synth_address", self.synth.lang_port) if self.output == self.SSR: self.ssr.run() if not self.options.capture_audio: self._load_sounds() self._log_time_for_last_handled_event = 0 if self.options.ff_to_start: self._ff_to_time = self.options.start_time self.set_time_cursor(0) else: self._ff_to_time = None self.set_time_cursor(self.options.start_time) def _start_capture_audio(self): self._audio_capture_filename = self.options.capture_audio if os.path.exists(self._audio_capture_filename): os.remove(self._audio_capture_filename) self._audio_capture_process = subprocess.Popen( ["./jack_capture/jack_capture", "-f", self._audio_capture_filename, "-d", "-1", "-B", "65536", "SuperCollider:out_1", "SuperCollider:out_2"], shell=False, stdout=subprocess.PIPE) self._wait_until_audio_capture_started() def _wait_until_audio_capture_started(self): print "waiting for audio capture to start" while True: line = self._audio_capture_process.stdout.readline().rstrip("\r\n") m = re.match('^audio capture started at (.*)$', line) if m: audio_capture_start_time = float(m.group(1)) self._tell_visualizers("/audio_captured_started", str(audio_capture_start_time)) print "audio capture started" return @classmethod def _estimated_playback_duration(cls, score, options): last_segment = score[-1] return last_segment["onset"] / options.timefactor + last_segment["duration"] @classmethod def _interpret_chunks_to_score(cls, tr_log, chunks, options): score = Interpreter(options.max_pause_within_segment, options.max_segment_duration).interpret( chunks, tr_log.files) for segment in score: segment["duration"] /= options.timefactor return score @classmethod def _filter_playable_chunks(cls, tr_log, chunks): return filter(lambda chunk: (cls._chunk_is_playable(tr_log, chunk)), chunks) @classmethod def _chunk_is_playable(cls, tr_log, chunk): file_info = tr_log.files[chunk["filenum"]] return file_info["playable_file_index"] != -1 def _run_scheduler_thread(self): self._scheduler_thread = threading.Thread(target=self._process_scheduled_events) self._scheduler_thread.daemon = True self._scheduler_thread.start() def _process_scheduled_events(self): while not self._quitting: while True: try: delay, priority, action, arguments = self._scheduler_queue.get(True, 0.01) except Queue.Empty: break self.scheduler.enter(delay, priority, action, arguments) self.scheduler.run() def _handle_visualizing_message(self, path, args, types, src, data): segment_id = args[0] segment = self.segments_by_id[segment_id] logger.debug("visualizing segment %s" % segment) player = self.get_player_for_segment(segment) self._ask_synth_to_play_segment(segment, channel=0, pan=player.spatial_position.pan) def _ask_synth_to_play_segment(self, segment, channel, pan): if self.synth: logger.debug("asking synth to play %s" % segment) file_info = self.tr_log.files[segment["filenum"]] if self.output == self.SSR: segment["sound_source_id"] = self.ssr.allocate_source() if segment["sound_source_id"] and not self._warned_about_max_sources: channel = segment["sound_source_id"] - 1 pan = None else: print "WARNING: max sources exceeded, skipping segment playback (this warning will not be repeated)" self._warned_about_max_sources = True return self.synth.play_segment( segment["id"], segment["audio_filenum"], segment["relative_start_time_in_file"], segment["relative_end_time_in_file"], segment["duration"], self.looped_duration, channel, pan) self._scheduler_queue.put( (segment["playback_duration"], 1, self.stopped_playing, [segment])) def _check_which_files_are_audio(self): for file_info in self.tr_log.files: file_info["is_audio"] = self._has_audio_extension(file_info["name"]) @staticmethod def _has_audio_extension(filename): return Orchestra._extension(filename) in Orchestra.PLAYABLE_FORMATS @staticmethod def _extension(filename): m = Orchestra._extension_re.search(filename) if m: return m.group(1).lower() def _prepare_playable_files(self): if self.predecode: self._num_playable_files = self._get_wav_files_info( self.tr_log, self.include_non_playable) else: raise Exception("playing wav without precoding is not supported") def _load_sounds(self): if self.synth: print "loading sounds" for filenum in range(len(self._files_to_play)): file_info = self._files_to_play[filenum] if file_info["playable_file_index"] != -1: logger.info("load_sound(%s)" % file_info["decoded_name"]) result = self._load_sound_stubbornly(filenum, file_info["decoded_name"]) logger.info("load_sound result: %s" % result) print "OK" def _load_sound_stubbornly(self, filenum, filename): while True: result = self.synth.load_sound(filenum, filename) if result > 0: return result else: warn(logger, "synth returned %s - retrying soon" % result) time.sleep(1.0) @classmethod def _get_wav_files_info(cls, tr_log, include_non_playable=False): playable_file_index = 0 for filenum in range(len(tr_log.files)): file_info = tr_log.files[filenum] file_info["playable_file_index"] = -1 if "decoded_name" in file_info: file_info["duration"] = cls._get_file_duration(file_info) if file_info["duration"] > 0: file_info["playable_file_index"] = playable_file_index logger.debug("duration for %r: %r\n" % (file_info["name"], file_info["duration"])) playable_file_index += 1 if include_non_playable: file_info["index"] = filenum else: file_info["index"] = file_info["playable_file_index"] return playable_file_index @classmethod def _get_file_duration(cls, file_info): if "decoded_name" in file_info: statinfo = os.stat(file_info["decoded_name"]) wav_header_size = 44 return float((statinfo.st_size - wav_header_size) / cls.BYTES_PER_SAMPLE) / cls.SAMPLE_RATE def get_current_log_time(self): if self.fast_forwarding: return self._log_time_for_last_handled_event else: return self.log_time_played_from + self.stopwatch.get_elapsed_time() * self.timefactor def play_non_realtime(self, quit_on_end=False): logger.info("entering play_non_realtime") self._was_stopped = False self._num_finished_visualizers = 0 if self._loop: while True: self._play_until_end() if not self._was_stopped: self._wait_for_visualizers_to_finish() self.set_time_cursor(0) else: self._play_until_end() if not self._was_stopped: self._wait_for_visualizers_to_finish() if quit_on_end: self._quit() logger.info("leaving play_non_realtime") def _quit(self): if self.options.capture_audio: self._audio_capture_process.kill() self._quitting = True def _play_until_end(self): logger.info("entering _play_until_end") self._playing = True self.stopwatch.start() time.sleep(self._leading_pause) no_more_events = False while self._playing and not no_more_events: event = self._get_next_chunk_or_segment() if event: self._handle_event(event) else: no_more_events = True logger.info("leaving _play_until_end") def _get_next_chunk_or_segment(self): logger.debug("chunk index = %d, segment index = %d" % ( self.current_chunk_index, self.current_segment_index)) chunk = self._get_next_chunk() segment = self._get_next_segment() logger.debug("next chunk: %s" % chunk) logger.debug("next segment: %s" % segment) if chunk and segment: return self._choose_nearest_chunk_or_segment(chunk, segment) elif chunk: return {"type": "chunk", "chunk": chunk} elif segment: return {"type": "segment", "segment": segment} else: return None def _get_next_chunk(self): if self.current_chunk_index < len(self.chunks): return self.chunks[self.current_chunk_index] def _get_next_segment(self): if len(self.score) == 0: return None elif self.current_segment_index < len(self.score): return self.score[self.current_segment_index] def _handle_event(self, event): if event["type"] == "chunk": self.handle_chunk(event["chunk"]) self.current_chunk_index += 1 elif event["type"] == "segment": self.handle_segment(event["segment"]) self.current_segment_index += 1 else: raise Exception("unexpected event %s" % event) def _choose_nearest_chunk_or_segment(self, chunk, segment): if chunk["t"] < segment["onset"]: return {"type": "chunk", "chunk": chunk} else: return {"type": "segment", "segment": segment} def stop(self): # stop_all disabled as it also deletes ~reverb # if self.synth: # self.synth.stop_all() self._was_stopped = True self._playing = False self.log_time_played_from = self.get_current_log_time() self.stopwatch.stop() def handle_segment(self, segment): logger.debug("handling segment %s" % segment) player = self.get_player_for_segment(segment) if not player: logger.debug("get_player_for_segment returned None - skipping playback") if self.fast_forwarding: self._stop_ff_if_necessary() else: now = self.get_current_log_time() time_margin = segment["onset"] - now logger.debug("time_margin=%f-%f=%f" % (segment["onset"], now, time_margin)) if not self.realtime and time_margin > 0: sleep_time = time_margin logger.debug("sleeping %f" % sleep_time) time.sleep(sleep_time) if player: logger.debug("player.enabled=%s" % player.enabled) if player and player.enabled: player.play(segment, pan=0.5) self._log_time_for_last_handled_event = segment["onset"] def handle_chunk(self, chunk): logger.debug("handling chunk %s" % chunk) player = self.get_player_for_chunk(chunk) logger.debug("get_player_for_chunk returned %s" % player) if self.fast_forwarding: self._stop_ff_if_necessary() else: now = self.get_current_log_time() time_margin = chunk["t"] - now logger.debug("time_margin=%f-%f=%f" % (chunk["t"], now, time_margin)) if not self.realtime and time_margin > 0: sleep_time = time_margin logger.debug("sleeping %f" % sleep_time) time.sleep(sleep_time) if player: logger.debug("player.enabled=%s" % player.enabled) if player and player.enabled: player.visualize(chunk) self._log_time_for_last_handled_event = chunk["t"] def _stop_ff_if_necessary(self): if self._ff_to_time is not None and \ self._log_time_for_last_handled_event >= self._ff_to_time: self._ff_to_time = None self.fast_forwarding = False self.set_time_cursor(self.log_time_played_from) def highlight_segment(self, segment): if self.gui: self.gui.highlight_segment(segment) def visualize_chunk(self, chunk, player): if len(self.visualizers) > 0: self._inform_visualizers_about_peer(player) file_info = self.tr_log.files[chunk["filenum"]] self._chunks_by_id[chunk["id"]] = chunk self._tell_visualizers( "/chunk", chunk["id"], chunk["begin"], chunk["end"] - chunk["begin"], file_info["index"], player.id, chunk["t"]) def visualize_segment(self, segment, player): if len(self.visualizers) > 0: self._inform_visualizers_about_peer(player) file_info = self.tr_log.files[segment["filenum"]] self.segments_by_id[segment["id"]] = segment self._tell_visualizers( "/segment", segment["id"], segment["begin"], segment["end"] - segment["begin"], file_info["index"], player.id, segment["t"], segment["playback_duration"]) else: self._ask_synth_to_play_segment(segment, channel=0, pan=0.5) def stopped_playing(self, segment): logger.debug("stopped segment %s" % segment) if self.gui: self.gui.unhighlight_segment(segment) if self.output == self.SSR and segment["sound_source_id"]: self.ssr.free_source(segment["sound_source_id"]) def play_segment(self, segment, player): self.segments_by_id[segment["id"]] = segment if self.looped_duration: segment["playback_duration"] = self.looped_duration else: segment["playback_duration"] = segment["duration"] self.visualize_segment(segment, player) def _send_torrent_info_to_uninformed_visualizers(self): for visualizer in self.visualizers: if not visualizer.informed_about_torrent: self._send_torrent_info_to_visualizer(visualizer) def _inform_visualizers_about_peer(self, player): for visualizer in self.visualizers: if player.id not in visualizer.informed_about_peer: if visualizer.send( "/peer", player.id, player.addr, player.spatial_position.bearing, player.spatial_position.pan, player.location_str): visualizer.informed_about_peer[player.id] = True def _send_torrent_info_to_visualizer(self, visualizer): if not visualizer.send( "/torrent", self._num_selected_files, self.tr_log.lastchunktime(), self.tr_log.total_file_size(), len(self.chunks), len(self.score), self.options.title): return for filenum in range(len(self.tr_log.files)): file_info = self.tr_log.files[filenum] if self.include_non_playable or file_info["playable_file_index"] != -1: if not visualizer.send( "/file", file_info["index"], file_info["offset"], file_info["length"]): return visualizer.informed_about_torrent = True def get_player_for_chunk(self, chunk): try: return chunk["player"] except KeyError: peer_player = self.get_player_for_peer(chunk["peeraddr"]) chunk["player"] = peer_player return peer_player def get_player_for_segment(self, segment): try: return segment["player"] except KeyError: peer_player = self.get_player_for_peer(segment["peeraddr"]) segment["player"] = peer_player return peer_player def get_player_for_peer(self, peeraddr): peer_player = None try: peer_player = self._player_for_peer[peeraddr] except KeyError: peer_player = self._create_player(peeraddr) self.players.append(peer_player) self._player_for_peer[peeraddr] = peer_player return peer_player def _create_player(self, addr): count = len(self.players) logger.debug("creating player number %d" % count) player = self._player_class(self, count) player.addr = addr if self.server.options.locate_peers and self._peer_location[addr] is not None: x, y, place_name = self._peer_location[addr] if place_name: place_name = place_name.encode("unicode_escape") else: place_name = "" player.location_str = "%s,%s,%s" % (x, y, place_name) if x < self._peers_center_location_x: player.spatial_position.pan = -1.0 else: player.spatial_position.pan = 1.0 else: player.location_str = "" return player def set_time_cursor(self, log_time): assert not self.realtime logger.debug("setting time cursor at %f" % log_time) self.log_time_played_from = log_time if self._playing: self.stopwatch.restart() self.current_chunk_index = self._get_current_chunk_index() self.current_segment_index = self._get_current_segment_index() def _get_current_chunk_index(self): index = 0 next_to_last_index = len(self.chunks) - 2 while index < next_to_last_index: if self.chunks[index+1]["t"] >= self.log_time_played_from: return index index += 1 return len(self.chunks) - 1 def _get_current_segment_index(self): index = 0 next_to_last_index = len(self.score) - 2 while index < next_to_last_index: if self.score[index+1]["onset"] >= self.log_time_played_from: return index index += 1 return len(self.score) - 1 def _handle_set_listener_position(self, path, args, types, src, data): if self.output == self.SSR: x, y = args self.ssr.set_listener_position(x, y) def _handle_set_listener_orientation(self, path, args, types, src, data): if self.output == self.SSR: orientation = args[0] self.ssr.set_listener_orientation(orientation) def _handle_place_segment(self, path, args, types, src, data): segment_id, x, y, duration = args if self.output == self.SSR: segment = self.segments_by_id[segment_id] sound_source_id = segment["sound_source_id"] if sound_source_id is not None: self.ssr.place_source(sound_source_id, x, y, duration) else: pan = self._spatial_position_to_stereo_pan(x, y) if self.synth: self.synth.pan(segment_id, pan) def _handle_enable_smooth_movement(self, path, args, types, src, data): pass # OBSOLETE after smooth movement made default def _handle_start_segment_movement_from_peer(self, path, args, types, src, data): segment_id, duration = args if self.output == self.SSR: segment = self.segments_by_id[segment_id] sound_source_id = segment["sound_source_id"] if sound_source_id is not None: player = self.get_player_for_segment(segment) self.ssr.start_source_movement( sound_source_id, player.trajectory, duration) def _spatial_position_to_stereo_pan(self, x, y): # compare rectangular_visualizer.Visualizer.pan_segment # NOTE: assumes default listener position and orientation! return float(x) / 5 + 0.5 def reset(self): if self.synth: self.synth.stop_engine() self._tell_visualizers("/reset") for visualizer in self.visualizers: visualizer.informed_about_torrent = False visualizer.informed_about_peer = {} def _tell_visualizers(self, *args): self._send_torrent_info_to_uninformed_visualizers() self.server._tell_visualizers(*args) def _fileinfo_for_pretended_audio_file(self): return {"offset": 0, "length": os.stat(self.options.pretend_audio_filename).st_size, "name": self.options.pretend_audio_filename, "playable_file_index": 0} def _handle_finished(self, path, args, types, src, data): self._num_finished_visualizers += 1 def _wait_for_visualizers_to_finish(self): while self._num_finished_visualizers < len(self.visualizers): time.sleep(0.1) def _get_peers_center_location_x(self): if len(self._peer_location) <= 1: return 0 else: sorted_xs = sorted([x for x,y,location_str in self._peer_location.values()]) center_index = int((len(self._peer_location)-1) / 2) return float(sorted_xs[center_index] + sorted_xs[center_index+1]) / 2