Exemplo n.º 1
0
 def titleSequence(self, titlefile, frames=450):
     assert os.path.exists(titlefile)
     if framelimit is not None: frames = min(frames, framelimit)
     first_yuv = self.yuv_name()
     jobqueue.do('convert %s %s %s %s' %
                 (titlefile, border.border(), video.exactGeometry(), first_yuv))
     self.frame += 1
     for i in range(1, frames):
         import shutil
         shutil.copy(first_yuv, self.yuv_name())
         self.frame += 1
Exemplo n.º 2
0
 def titleSequence(self, titlefile, frames=450):
     assert os.path.exists(titlefile)
     if framelimit is not None: frames = min(frames, framelimit)
     first_yuv = self.yuv_name()
     jobqueue.do('convert %s %s %s %s' %
                 (titlefile, border.border(), video.exactGeometry(), first_yuv))
     self.frame += 1
     for i in range(1, frames):
         import shutil
         shutil.copy(first_yuv, self.yuv_name())
         self.frame += 1
Exemplo n.º 3
0
 def textBottom(self, jpgfmt, start, incr, frames, avg, divider, rgb, titleImage):
     """You've got some text near the bottom of the screen and you
     want to set a dividing height, and take the lower portion of
     the screen, where the text is, and fade it toward light blue
     so you have good contrast for the text. Above the division you
     have normal chrominance for the sequence.
     """
     # avg is how many subframes are averaged to produce each frame
     # ratio is the ratio of subframes to frames
     if framelimit is not None:
         frames = min(frames, framelimit)
     tmpimage = "/tmp/foo.jpg"
     tmpimage2 = "/tmp/foo2.jpg"
     averages = []
     averages2 = []
     # Stick the averages in a bunch of GIF files
     assert jpgfmt[-4:] == ".jpg"
     srcdir, fil = os.path.split(jpgfmt)
     jpgfmt = fil
     dstdir = srcdir
     avgfmt = jpgfmt[:-4] + ".gif"
     if avg > 1:
         # parallelize the averaging because it's the slowest operation
         # each job will be five frames
         q = jobqueue.JobQueue()
         i = 0
         while i < frames:
             frames_this_job = min(5, frames - i)
             ifiles = []
             ofiles = []
             for j in range(frames_this_job):
                 P = start + (i + j) * incr
                 for k in range(avg):
                     ifiles.append(jpgfmt % (P + k))
                 ofile = avgfmt % P
                 averages.append(ofile)
                 ofiles.append(ofile)
             job = MotionBlurJob(srcdir, dstdir, ifiles, ofiles)
             q.append(job)
             i += frames_this_job
         q.start()
         q.wait()
     else:
         averages = map(lambda i: jpgfmt % (start + i * incr), range(frames))
     for i in range(frames):
         fnum = start + incr * i
         yuv = (self.yuv_format() % self.frame) + ".yuv"
         jobqueue.do("convert %s %s %s" % (os.path.join(dstdir, averages[i]), clipped.exactGeometry(), tmpimage))
         # tmpimage is now in clipped dimensions
         if titleImage is not None:
             w, h = clipped.width, clipped.height
             jobqueue.do(
                 ('mogrify -region +0+%d -fill "rgb(%d,%d,%d)" -colorize 75 %s')
                 % (divider, rgb[0], rgb[1], rgb[2], tmpimage)
             )
             jobqueue.do("composite %s %s %s %s" % (titleImage, clipped.exactGeometry(), tmpimage, tmpimage))
         jobqueue.do("convert %s %s %s %s" % (tmpimage, border.border(), video.exactGeometry(), yuv))
         self.frame += 1
     return start + incr * frames
Exemplo n.º 4
0
def drawOneFrame(t, filename):
    def move(t, start, finish):
        return start + t * (finish - start)
    # t varies from 0.0 to 1.0 over the course of the sequence
    preamble_xpos = 80
    preamble_ypos = move(t, -90, 180)
    nanorex_xpos = 50
    nanorex_ypos = move(t, 460, 140)
    cmd = (('composite -geometry +%d+%d ' % (nanorex_xpos, nanorex_ypos)) +
           'Nanorex_logos/nanorex_logo_text_outline_medium.png black.jpg /tmp/foo.jpg')
    jobqueue.do(cmd)
    # Fonts are in /usr/lib/openoffice/share/psprint/fontmetric/
    preamble = "Presented by"
    cmd = ('convert -fill white -font Helvetica-Bold -pointsize 48 -draw' +
           ' "text %d,%d \'%s\'" /tmp/foo.jpg %s' %
           (preamble_xpos, preamble_ypos, preamble, filename))
    jobqueue.do(cmd)
Exemplo n.º 5
0
def drawOneFrame(t, filename):
    def move(t, start, finish):
        return start + t * (finish - start)

    # t varies from 0.0 to 1.0 over the course of the sequence
    preamble_xpos = 80
    preamble_ypos = move(t, -90, 180)
    nanorex_xpos = 50
    nanorex_ypos = move(t, 460, 140)
    cmd = (
        ('composite -geometry +%d+%d ' % (nanorex_xpos, nanorex_ypos)) +
        'Nanorex_logos/nanorex_logo_text_outline_medium.png black.jpg /tmp/foo.jpg'
    )
    jobqueue.do(cmd)
    # Fonts are in /usr/lib/openoffice/share/psprint/fontmetric/
    preamble = "Presented by"
    cmd = ('convert -fill white -font Helvetica-Bold -pointsize 48 -draw' +
           ' "text %d,%d \'%s\'" /tmp/foo.jpg %s' %
           (preamble_xpos, preamble_ypos, preamble, filename))
    jobqueue.do(cmd)
Exemplo n.º 6
0
 def encode(self):
     parfil = mpeg_dir + "/foo.par"
     outf = open(parfil, "w")
     outf.write(params % {'sourcefileformat': self.yuv_format(),
                          'frames': len(self),
                          'height': video.height,
                          'width': video.width,
                          'bitrate': bitrate})
     outf.close()
     # encoding is an inexpensive operation, do it even if not for real
     jobqueue.do('mpeg2encode %s/foo.par %s/foo.mpeg' % (mpeg_dir, mpeg_dir))
     jobqueue.do('rm -f %s/foo.mp4' % mpeg_dir)
     jobqueue.do('ffmpeg -i %s/foo.mpeg -sameq %s/foo.mp4' % (mpeg_dir, mpeg_dir))
Exemplo n.º 7
0
 def encode(self):
     parfil = mpeg_dir + "/foo.par"
     outf = open(parfil, "w")
     outf.write(params % {'sourcefileformat': self.yuv_format(),
                          'frames': len(self),
                          'height': video.height,
                          'width': video.width,
                          'bitrate': bitrate})
     outf.close()
     # encoding is an inexpensive operation, do it even if not for real
     jobqueue.do('mpeg2encode %s/foo.par %s/foo.mpeg' % (mpeg_dir, mpeg_dir))
     jobqueue.do('rm -f %s/foo.mp4' % mpeg_dir)
     jobqueue.do('ffmpeg -i %s/foo.mpeg -sameq %s/foo.mp4' % (mpeg_dir, mpeg_dir))
Exemplo n.º 8
0
 def encode(self):
     parfil = mpeg_dir + "/foo.par"
     outf = open(parfil, "w")
     outf.write(
         params
         % {
             "sourcefileformat": self.yuv_format(),
             "frames": len(self),
             "height": video.height,
             "width": video.width,
             "bitrate": bitrate,
         }
     )
     outf.close()
     # encoding is an inexpensive operation, do it even if not for real
     jobqueue.do("mpeg2encode %s/foo.par %s/foo.mpeg" % (mpeg_dir, mpeg_dir))
     jobqueue.do("rm -f %s/foo.mp4" % mpeg_dir)
     jobqueue.do("ffmpeg -i %s/foo.mpeg -sameq %s/foo.mp4" % (mpeg_dir, mpeg_dir))
Exemplo n.º 9
0
    ]

rendering = False

# Comment out stuff that's already done

for arg in sys.argv[1:]:
    if arg.startswith('debug='):
        jobqueue.DEBUG = string.atoi(arg[6:])
    elif arg == 'ugly':
        animate.povray_pretty = False
        animate.setScale(0.25)
    elif arg == 'clean':
        dirs = ('slowjpeg', 'slow_cpk_jpeg', 'fastjpeg', 'fast_cpk_jpeg')
        for d in dirs:
            jobqueue.do('rm -rf ' + os.path.join(animate.mpeg_dir, d))
            jobqueue.do('mkdir -p ' + os.path.join(animate.mpeg_dir, d))
    elif arg == 'rendering':
        rendering = True
    elif arg.startswith('framelimit='):
        animate.framelimit = string.atoi(arg[11:])

#################################

struct_name = 'wwrot'
# struct_name = 'simp'

animate.remove_old_yuvs()

m = animate.MpegSequence()
Exemplo n.º 10
0
    def motionBlur(self,
                   jpgfmt,
                   start,
                   incr,
                   frames,
                   avg,
                   textlist=None,
                   fadeTo=None,
                   titleImage=None):
        # avg is how many subframes are averaged to produce each frame
        # ratio is the ratio of subframes to frames
        if framelimit is not None: frames = min(frames, framelimit)
        tmpimage = '/tmp/foo.jpg'
        tmpimage2 = '/tmp/foo2.jpg'
        averages = []
        averages2 = []
        # Stick the averages in a bunch of GIF files
        assert jpgfmt[-4:] == '.jpg'
        srcdir, fil = os.path.split(jpgfmt)
        jpgfmt = fil
        dstdir = srcdir
        avgfmt = jpgfmt[:-4] + '.gif'
        if fadeTo is not None:
            assert fadeTo[-4:] == '.jpg'
            fadesrcdir, fadeTo = os.path.split(fadeTo)
            avgfadefmt = fadeTo[:-4] + '.gif'
            fadedstdir = fadesrcdir
        if avg > 1:
            # parallelize the averaging because it's the slowest operation
            # each job will be five frames
            q = jobqueue.JobQueue()
            i = 0
            while i < frames:
                frames_this_job = min(5, frames - i)
                ifiles = []
                ofiles = []
                for j in range(frames_this_job):
                    P = start + (i + j) * incr
                    for k in range(avg):
                        ifiles.append(jpgfmt % (P + k))
                    ofile = avgfmt % P
                    averages.append(ofile)
                    ofiles.append(ofile)
                job = MotionBlurJob(srcdir, dstdir, ifiles, ofiles)
                q.append(job)
                i += frames_this_job
            if fadeTo is not None:
                i = 0
                while i < frames:
                    frames_this_job = min(5, frames - i)
                    ifiles = []
                    ofiles = []
                    for j in range(frames_this_job):
                        P = start + (i + j) * incr
                        for k in range(avg):
                            ifiles.append(fadeTo % (P + k))
                        ofile = avgfadefmt % P
                        averages2.append(ofile)
                        ofiles.append(ofile)
                    job = MotionBlurJob(fadesrcdir, fadedstdir, ifiles, ofiles)
                    q.append(job)
                    i += frames_this_job
            q.start()
            q.wait()
        else:
            averages = map(lambda i: jpgfmt % (start + i * incr),
                           range(frames))
            if fadeTo is not None:
                averages2 = map(lambda i: fadeTo % (start + i * incr),
                                range(frames))

        for i in range(frames):
            fnum = start + incr * i
            yuv = (self.yuv_format() % self.frame) + '.yuv'
            jobqueue.do('convert %s %s %s' % (os.path.join(
                dstdir, averages[i]), clipped.exactGeometry(), tmpimage))
            # tmpimage is now in clipped dimensions
            if fadeTo is not None:
                jobqueue.do('convert %s %s %s' %
                            (os.path.join(fadedstdir, averages2[i]),
                             clipped.exactGeometry(), tmpimage2))
                # perform a cross-fade
                inputs = ''
                for j in range(frames):
                    if j < i:
                        inputs += ' ' + tmpimage2
                    else:
                        inputs += ' ' + tmpimage
                jobqueue.do('convert -average %s %s' % (inputs, tmpimage))
            if titleImage is not None:
                jobqueue.do(
                    'convert %s -average lightblue.jpg lightblue.jpg lightblue.jpg %s %s'
                    % (clipped.exactGeometry(), tmpimage, tmpimage))
                jobqueue.do(
                    'composite %s %s %s %s' %
                    (titleImage, clipped.exactGeometry(), tmpimage, tmpimage))
            elif textlist is not None:
                texts = textlist(i)
                cmd = 'convert ' + tmpimage + ' -font Courier-Bold -pointsize 30 '
                for j in range(len(texts)):
                    cmd += ' -annotate +10+%d "%s"' % (30 * (j + 1), texts[j])
                cmd += ' ' + tmpimage
                jobqueue.do(cmd)
            jobqueue.do(
                'convert %s %s %s %s' %
                (tmpimage, border.border(), video.exactGeometry(), yuv))
            self.frame += 1
        return start + incr * frames
Exemplo n.º 11
0
 def textBottom(self, jpgfmt, start, incr, frames, avg, divider, rgb,
                titleImage):
     """You've got some text near the bottom of the screen and you
     want to set a dividing height, and take the lower portion of
     the screen, where the text is, and fade it toward light blue
     so you have good contrast for the text. Above the division you
     have normal chrominance for the sequence.
     """
     # avg is how many subframes are averaged to produce each frame
     # ratio is the ratio of subframes to frames
     if framelimit is not None: frames = min(frames, framelimit)
     tmpimage = '/tmp/foo.jpg'
     tmpimage2 = '/tmp/foo2.jpg'
     averages = []
     averages2 = []
     # Stick the averages in a bunch of GIF files
     assert jpgfmt[-4:] == '.jpg'
     srcdir, fil = os.path.split(jpgfmt)
     jpgfmt = fil
     dstdir = srcdir
     avgfmt = jpgfmt[:-4] + '.gif'
     if avg > 1:
         # parallelize the averaging because it's the slowest operation
         # each job will be five frames
         q = jobqueue.JobQueue()
         i = 0
         while i < frames:
             frames_this_job = min(5, frames - i)
             ifiles = []
             ofiles = []
             for j in range(frames_this_job):
                 P = start + (i + j) * incr
                 for k in range(avg):
                     ifiles.append(jpgfmt % (P + k))
                 ofile = avgfmt % P
                 averages.append(ofile)
                 ofiles.append(ofile)
             job = MotionBlurJob(srcdir, dstdir, ifiles, ofiles)
             q.append(job)
             i += frames_this_job
         q.start()
         q.wait()
     else:
         averages = map(lambda i: jpgfmt % (start + i * incr),
                        range(frames))
     for i in range(frames):
         fnum = start + incr * i
         yuv = (self.yuv_format() % self.frame) + '.yuv'
         jobqueue.do('convert %s %s %s' % (os.path.join(
             dstdir, averages[i]), clipped.exactGeometry(), tmpimage))
         # tmpimage is now in clipped dimensions
         if titleImage is not None:
             w, h = clipped.width, clipped.height
             jobqueue.do((
                 'mogrify -region +0+%d -fill \"rgb(%d,%d,%d)\" -colorize 75 %s'
             ) % (divider, rgb[0], rgb[1], rgb[2], tmpimage))
             jobqueue.do(
                 'composite %s %s %s %s' %
                 (titleImage, clipped.exactGeometry(), tmpimage, tmpimage))
         jobqueue.do(
             'convert %s %s %s %s' %
             (tmpimage, border.border(), video.exactGeometry(), yuv))
         self.frame += 1
     return start + incr * frames
Exemplo n.º 12
0
def remove_old_yuvs():
    # you don't always want to do this
    jobqueue.do("rm -rf " + mpeg_dir + "/yuvs")
    jobqueue.do("mkdir -p " + mpeg_dir + "/yuvs")
Exemplo n.º 13
0
]

rendering = False

# Comment out stuff that's already done

for arg in sys.argv[1:]:
    if arg.startswith('debug='):
        jobqueue.DEBUG = string.atoi(arg[6:])
    elif arg == 'ugly':
        animate.povray_pretty = False
        animate.setScale(0.25)
    elif arg == 'clean':
        dirs = ('slowjpeg', 'slow_cpk_jpeg', 'fastjpeg', 'fast_cpk_jpeg')
        for d in dirs:
            jobqueue.do('rm -rf ' + os.path.join(animate.mpeg_dir, d))
            jobqueue.do('mkdir -p ' + os.path.join(animate.mpeg_dir, d))
    elif arg == 'rendering':
        rendering = True
    elif arg.startswith('framelimit='):
        animate.framelimit = string.atoi(arg[11:])

#################################

struct_name = 'wwrot'
# struct_name = 'simp'

animate.remove_old_yuvs()

m = animate.MpegSequence()
Exemplo n.º 14
0
    def motionBlur(self, jpgfmt, start, incr, frames, avg, textlist=None, fadeTo=None, titleImage=None):
        # avg is how many subframes are averaged to produce each frame
        # ratio is the ratio of subframes to frames
        if framelimit is not None:
            frames = min(frames, framelimit)
        tmpimage = "/tmp/foo.jpg"
        tmpimage2 = "/tmp/foo2.jpg"
        averages = []
        averages2 = []
        # Stick the averages in a bunch of GIF files
        assert jpgfmt[-4:] == ".jpg"
        srcdir, fil = os.path.split(jpgfmt)
        jpgfmt = fil
        dstdir = srcdir
        avgfmt = jpgfmt[:-4] + ".gif"
        if fadeTo is not None:
            assert fadeTo[-4:] == ".jpg"
            fadesrcdir, fadeTo = os.path.split(fadeTo)
            avgfadefmt = fadeTo[:-4] + ".gif"
            fadedstdir = fadesrcdir
        if avg > 1:
            # parallelize the averaging because it's the slowest operation
            # each job will be five frames
            q = jobqueue.JobQueue()
            i = 0
            while i < frames:
                frames_this_job = min(5, frames - i)
                ifiles = []
                ofiles = []
                for j in range(frames_this_job):
                    P = start + (i + j) * incr
                    for k in range(avg):
                        ifiles.append(jpgfmt % (P + k))
                    ofile = avgfmt % P
                    averages.append(ofile)
                    ofiles.append(ofile)
                job = MotionBlurJob(srcdir, dstdir, ifiles, ofiles)
                q.append(job)
                i += frames_this_job
            if fadeTo is not None:
                i = 0
                while i < frames:
                    frames_this_job = min(5, frames - i)
                    ifiles = []
                    ofiles = []
                    for j in range(frames_this_job):
                        P = start + (i + j) * incr
                        for k in range(avg):
                            ifiles.append(fadeTo % (P + k))
                        ofile = avgfadefmt % P
                        averages2.append(ofile)
                        ofiles.append(ofile)
                    job = MotionBlurJob(fadesrcdir, fadedstdir, ifiles, ofiles)
                    q.append(job)
                    i += frames_this_job
            q.start()
            q.wait()
        else:
            averages = map(lambda i: jpgfmt % (start + i * incr), range(frames))
            if fadeTo is not None:
                averages2 = map(lambda i: fadeTo % (start + i * incr), range(frames))

        for i in range(frames):
            fnum = start + incr * i
            yuv = (self.yuv_format() % self.frame) + ".yuv"
            jobqueue.do("convert %s %s %s" % (os.path.join(dstdir, averages[i]), clipped.exactGeometry(), tmpimage))
            # tmpimage is now in clipped dimensions
            if fadeTo is not None:
                jobqueue.do(
                    "convert %s %s %s" % (os.path.join(fadedstdir, averages2[i]), clipped.exactGeometry(), tmpimage2)
                )
                # perform a cross-fade
                inputs = ""
                for j in range(frames):
                    if j < i:
                        inputs += " " + tmpimage2
                    else:
                        inputs += " " + tmpimage
                jobqueue.do("convert -average %s %s" % (inputs, tmpimage))
            if titleImage is not None:
                jobqueue.do(
                    "convert %s -average lightblue.jpg lightblue.jpg lightblue.jpg %s %s"
                    % (clipped.exactGeometry(), tmpimage, tmpimage)
                )
                jobqueue.do("composite %s %s %s %s" % (titleImage, clipped.exactGeometry(), tmpimage, tmpimage))
            elif textlist is not None:
                texts = textlist(i)
                cmd = "convert " + tmpimage + " -font Courier-Bold -pointsize 30 "
                for j in range(len(texts)):
                    cmd += ' -annotate +10+%d "%s"' % (30 * (j + 1), texts[j])
                cmd += " " + tmpimage
                jobqueue.do(cmd)
            jobqueue.do("convert %s %s %s %s" % (tmpimage, border.border(), video.exactGeometry(), yuv))
            self.frame += 1
        return start + incr * frames
Exemplo n.º 15
0
def remove_old_yuvs():
    # you don't always want to do this
    jobqueue.do("rm -rf " + mpeg_dir + "/yuvs")
    jobqueue.do("mkdir -p " + mpeg_dir + "/yuvs")