def init(params): g = graph.connected_directed_graph(version=params["sounds"], bd=params.get("bidirectional", False)) p = player.Player(g, speed=params["speed"], target_nnodes=params["target"], flipped=params.get("flipped", False), burnbridges=params.get("burn", False), bidirectional=params.get("bidirectional", False)) # trigger for pt in params.get('mouse', []): node = g.nearest(pt[0], pt[1]) p.trigger(node) for num in params.get('nummap', []): for node in g.grpnodes.get(num,[]): p.trigger(node) if params.get('seizure', False): for node in g.get_all_nodes(): p.trigger(node) return p
numm.ui.NummRun(a_out).run() audiothread = threading.Thread(target=audio) audiothread.start() app.exec_() if __name__=='__main__': app = QtGui.QApplication(sys.argv) app.setApplicationName('qplayer') print 'load graph' # g = graph.load_graph(sys.argv[1]=='left') # print 'connect to samples' # graph.connect_to_samples(g, sys.argv[2:]) sounds = None if len(sys.argv) > 1: sounds = sys.argv[1] g = graph.connected_directed_graph(sounds) print 'qview' p = Player(g) view = QView(p) view.show() run() # import cProfile # cProfile.run('run()', 'proofile')
def render(params, outdir): curdir = os.getcwd() if os.path.exists(outdir): print 'skipping', outdir return os.makedirs(outdir) g = graph.connected_directed_graph(version=params["sounds"], bd=params.get("bidirectional", False), files=params.get("files", None)) p = player.Player(g, speed=params["speed"], target_nnodes=params["target"], flipped=params.get("flipped", False), burnbridges=params.get("burn", False)) # shutil.copy(sys.argv[1], os.path.join(outdir, 'orig.params.json')) # os.chdir(outdir) p.toggle_recording() # trigger for pt in params.get('mouse', []): node = g.nearest(pt[0], pt[1]) p.trigger(node) print 'trigger', node.pt, pt, node.payload for num in params.get('nummap',[]): for node in g.grpnodes.get(num,[]): p.trigger(node) if params.get('seizure', False): for node in g.get_all_nodes(): p.trigger(node) cur_t = 0 rec_video = None while cur_t < MAX_DUR: p.next(buffer_size=WINDOW) im = p.draw() # RESIZE im = cv2.resize(im, (800, 800)) if rec_video is None: rec_video = cv2.VideoWriter() rec_video.open(os.path.join(outdir, 'out.avi'), cv2.cv.CV_FOURCC(*'DIVX'), 30, (im.shape[1], im.shape[0]), True) rec_video.write(im) cur_t += WINDOW / float(R) if len(p._state_edges) + len(p._state_nodes) == 0: print 'early finish', cur_t break os.chdir(outdir) p.toggle_recording() # Actually, replace audio. Whatever. # print 're-render audio' # rerender.render('out.samples.txt') # join a/v & delete v cmd = ['ffmpeg', '-i', 'out.avi', '-i', 'noclip.wav', '-acodec', 'copy', '-vcodec', 'copy', 'merge.avi'] p = subprocess.Popen(cmd) p.wait() os.unlink('out.avi') os.chdir(curdir)
libpath = '/home/rmo/src/connectomusic' import sys sys.path.append(libpath) import os os.chdir(libpath) import graph import player g = graph.connected_directed_graph() p = player.Player(g) #-- # set resolution p = player.Player(g, scale=800/1500.0, thick=2) #-- p.thick=1 p._baseframe = None #-- # selections CENTER = [[602, 833]] CORNERS = [[48, 1234], [1194, 1239], [1196, 47], [41, 73]]