def make_video(out_fname, ims, fps, tmp_ext='.ppm'): assert tmp_ext.startswith('.') in_dir = ut.make_temp_dir_big() # for i, x in enumerate(ims): # # pad to be even # if x.shape[0] % 2 == 1 or x.shape[1] % 2 == 1: # x = ig.pad_corner(x, x.shape[1] % 2, x.shape[0] % 2) # ig.save(ut.pjoin(in_dir, 'ao-video-frame%05d%s' % (i, tmp_ext)), x) ut.parmap(make_video_helper, [(i, x, in_dir, tmp_ext) for i, x in enumerate(ims)]) cmd = 'ffmpeg -loglevel warning -f image2 -r %f -i %s/ao-video-frame%%05d%s -pix_fmt yuv420p -vcodec h264 -acodec aac -strict 2 -y %s' % ( fps, in_dir, tmp_ext, out_fname) # try to make it work well in keynote (hack) #cmd = 'ffmpeg -r %f -i %s/ao-video-frame%%05d%s -y -qscale:v 0 %s' % (fps, in_dir, tmp_ext, out_fname) print cmd #print cmd os.system(cmd) #os.system('ffmpeg -f image2 -r %f -i %s/ao-video-frame%%05d%s -pix_fmt yuv420p -vcodec h264 -acodec aac -strict 2 -y %s' % (fps, in_dir, tmp_ext, out_fname)) #os.system('ffmpeg -f image2 -r %f -i %s/ao-video-frame%%05d%s -pix_fmt yuv420p -vcodec h264 -acodec aac -strict 2 -y %s' % (fps, in_dir, tmp_ext, out_fname)) if 0: print 'HACK' else: for x in glob.glob(ut.pjoin(in_dir, 'ao-video-frame*%s' % tmp_ext)): os.remove(x) os.rmdir(in_dir)
def run(d_entity, d_routine): for v in d_entity.values(): do_print(v, d_entity) l_subs = d_routine.values() l_data_to_write = [("%s.l" % os.path.join(mandir, s.name), do_print_subroutines(s)) for s in l_subs] def worker(l): filename, text = l lazy_write_file(filename, text) parmap(worker, l_data_to_write) tags = [] for v in d_entity.keys(): line = d_entity[v].prototype tags.append('%s\t%s\t%d\n' % (v, line.filename[0], line.i)) for v in d_routine.keys(): line = d_routine[v].prototype tags.append('%s\t%s\t%d\n' % (v, line.filename, line.i)) lazy_write_file("tags", ''.join(sorted(tags))) l_data = [ e for k, v in sorted(d_entity.items()) for e in do_print_short(v) ] str_ = '\n'.join(l_data) lazy_write_file(filename="irpf90_entities", text='%s\n' % str_)
def make_temp_animated_gif_cmdline(seq, duration=0.5, dir=None, tmp_ext='.ppm'): seq_fname = ut.make_temp('.gif', dir=dir) # avoid ffmpeg prompt os.system('rm %s' % seq_fname) base = ut.make_temp('') #fnames = ['%s_%d.jpg' % (base, i) for i in xrange(len(seq))] fnames = ['%s_%04d%s' % (base, i, tmp_ext) for i in xrange(len(seq))] # for fname, x in zip(fnames, seq): # ig.save(fname, x) # def f((fname, x)): # ig.save(fname, x) ut.parmap(save_helper, zip(fnames, seq)) ut.prn_sys('convert -layers OptimizePlus -delay %f -loop 0 %s_*%s %s' % (duration, base, tmp_ext, seq_fname)) #fps = 1./duration # ut.prn_sys('ffmpeg -r %f -i %s_%%d.jpg %s.avi' % (fps, base, base)) # ut.prn_sys('ffmpeg -i %s.avi -pix_fmt rgb24 %s' % (base, seq_fname)) return seq_fname
def make_video(out_fname, ims, fps, tmp_ext = '.ppm'): assert tmp_ext.startswith('.') in_dir = ut.make_temp_dir_big() # for i, x in enumerate(ims): # # pad to be even # if x.shape[0] % 2 == 1 or x.shape[1] % 2 == 1: # x = ig.pad_corner(x, x.shape[1] % 2, x.shape[0] % 2) # ig.save(ut.pjoin(in_dir, 'ao-video-frame%05d%s' % (i, tmp_ext)), x) ut.parmap(make_video_helper, [(i, x, in_dir, tmp_ext) for i, x in enumerate(ims)]) cmd = 'ffmpeg -loglevel warning -f image2 -r %f -i %s/ao-video-frame%%05d%s -pix_fmt yuv420p -vcodec h264 -acodec aac -strict 2 -y %s' % (fps, in_dir, tmp_ext, out_fname) # try to make it work well in keynote (hack) #cmd = 'ffmpeg -r %f -i %s/ao-video-frame%%05d%s -y -qscale:v 0 %s' % (fps, in_dir, tmp_ext, out_fname) print cmd #print cmd os.system(cmd) #os.system('ffmpeg -f image2 -r %f -i %s/ao-video-frame%%05d%s -pix_fmt yuv420p -vcodec h264 -acodec aac -strict 2 -y %s' % (fps, in_dir, tmp_ext, out_fname)) #os.system('ffmpeg -f image2 -r %f -i %s/ao-video-frame%%05d%s -pix_fmt yuv420p -vcodec h264 -acodec aac -strict 2 -y %s' % (fps, in_dir, tmp_ext, out_fname)) if 0: print 'HACK' else: for x in glob.glob(ut.pjoin(in_dir, 'ao-video-frame*%s' % tmp_ext)): os.remove(x) os.rmdir(in_dir)
def make_video(out_fname, ims, fps, tmp_ext='.ppm', sound=None): # Compressed images if type(ims[0]) == type(''): #tmp_ext = '.jpg' tmp_ext = '.png' assert tmp_ext.startswith('.') in_dir = ut.make_temp_dir_big() ut.parmap(make_video_helper, [(i, x, in_dir, tmp_ext) for i, x in enumerate(ims)]) with ut.temp_file('.wav') as tmp_wav: if sound is None: sound_flags = '' else: if type(sound) != type(''): sound.save(tmp_wav) sound = tmp_wav sound_flags = '-i "%s" -shortest' % sound # removed aac flag from above, for compatibility with some wav files cmd = ( 'ffmpeg -loglevel warning -f image2 -r %f -i %s/ao-video-frame%%05d%s %s ' '-pix_fmt yuv420p -vcodec h264 -strict -2 -y %s') % ( fps, in_dir, tmp_ext, sound_flags, out_fname) print cmd if 0 == os.system(cmd): for x in glob.glob(ut.pjoin(in_dir, 'ao-video-frame*%s' % tmp_ext)): os.remove(x) os.rmdir(in_dir) else: raise RuntimeError()
def t_filename_preprocessed_text(self): '''Tuple (filename, preprocessed_text)''' from preprocessed_text import Preprocess_text def worker_preprocess(filename): return (filename, Preprocess_text(filename).preprocessed_text) return parmap(worker_preprocess, self.irpf90_files_ordered)
def lookup_bilinear1d(xs, i, order = 4, mode = 'constant', cval = 0.0, par = 0): if np.ndim(xs) == 1: return scipy.ndimage.map_coordinates(xs, i, order = order, mode = mode, cval = cval) else: if par: vals = [(xs[:, j], i, order, mode) for j in xrange(xs.shape[1])] return np.concatenate(ut.parmap(map_helper, vals), axis = 1) else: return np.concatenate([scipy.ndimage.map_coordinates(xs[:, j], np.array([i]), order = order, mode = mode)[:, np.newaxis] \ for j in xrange(xs.shape[1])], axis = 1)
def make_temp_animated_gif_cmdline(seq, duration = 0.5, dir = None, tmp_ext = '.ppm'): seq_fname = ut.make_temp('.gif', dir = dir) # avoid ffmpeg prompt os.system('rm %s' % seq_fname) base = ut.make_temp('') #fnames = ['%s_%d.jpg' % (base, i) for i in xrange(len(seq))] fnames = ['%s_%04d%s' % (base, i, tmp_ext) for i in xrange(len(seq))] # for fname, x in zip(fnames, seq): # ig.save(fname, x) # def f((fname, x)): # ig.save(fname, x) ut.parmap(save_helper, zip(fnames, seq)) ut.prn_sys('convert -layers OptimizePlus -delay %f -loop 0 %s_*%s %s' % (duration, base, tmp_ext, seq_fname)) #fps = 1./duration # ut.prn_sys('ffmpeg -r %f -i %s_%%d.jpg %s.avi' % (fps, base, base)) # ut.prn_sys('ffmpeg -i %s.avi -pix_fmt rgb24 %s' % (base, seq_fname)) return seq_fname
def concat_sounds(snds, par = 1): #snds = [snd.to_mono() for snd in snds] if ut.ndistinct(ut.mapattr(snds).rate) == 1: return Sound(None, snds[0].rate, np.concatenate(ut.mapattr(snds).samples, axis = 0)) else: sr = max(ut.mapattr(snds).rate) #new_snds = [snd.resampled(sr) for snd in snds] if par: new_snds = ut.parmap(resample_snd, [(snd, sr) for snd in snds]) else: new_snds = map(resample_snd, [(snd, sr) for snd in snds]) assert ut.ndistinct(ut.mapattr(new_snds).rate) == 1 return concat_sounds(new_snds)
def t_filename_parsed_text(self): '''(filename,parsed_text)''' d_entity = self.d_entity d_routine = self.d_routine import parsed_text vtuple = [(v, s.same_as, s.regexp) for v, s in d_entity.iteritems()] def worker_parsed(filename_text): filename, text = filename_text return parsed_text.get_parsed_text(filename, text, d_entity, d_routine, vtuple) parsed_text_0 = parmap(worker_parsed, self.t_filename_preprocessed_text) from irpf90_t import NoDep, Declaration, Implicit, Use, Cont_provider def moved_to_top_l(ptext): l = [NoDep, Declaration, Implicit, Use, Cont_provider] for _, text in ptext: parsed_text.move_to_top_list(text, l) #Touch routine parsed_text.build_sub_needs(parsed_text_0, d_routine) moved_to_top_l(parsed_text_0) parsed_text_1 = parsed_text.add_subroutine_needs(parsed_text_0, d_routine) parsed_text_1 = parsed_text.move_variables(parsed_text_1) moved_to_top_l(parsed_text_1) parsed_text.check_opt(parsed_text_1) parsed_text_1 = parsed_text.perform_loop_substitutions(parsed_text_1) #touch entity stuple = [(s, v.regexp) for s, v in d_routine.iteritems() if v.is_function] parsed_text.build_needs(parsed_text_1, d_routine, stuple, d_entity) return parsed_text_1