Пример #1
0
def image_encode(src,f,frame_height, mode) :
    # Get the alpha band
    alpha = src.split()[-1]
    r,g,b= src.convert('RGB').convert('P', palette=Image.ADAPTIVE, colors=16).convert('RGB').split()
    src=Image.merge("RGBA",(r,g,b,alpha))

    data = [reduce(c) for c in src.getdata()] # keep image in RAM as bytes / None
    w,h=src.size
    palette=sorted([x for x in set(data) if x!=None])

    print '//',len(palette),'colors '

    s_blits = [] # stringified blits for all image

    start_file = f.tell()
    line16=[] # offsets from start as u16 index on  words

    for y in range(h) :
        if y%16==0 :
            ofs = sum(len(x) for x in s_blits)
            line16.append(ofs)

        skipped=0
        blits=[]

        line=data[y*w:(y+1)*w] # byte/none data
        singles=[]
        for c,g in groupby(line, lambda x:x!=TRANSP) :
            t = tuple(g)
            if not c :
                skipped = len(t)
                # if skip too big, split !
                while (skipped>MAXSKIP) :
                    blits.append([MAXSKIP,(),False])
                    skipped -= MAXSKIP
            else :
                # idem
                while t :
                    blits.append([skipped,t[:MAXBLIT],False])
                    skipped=0
                    t=t[MAXBLIT:]

        # enleve derniers blits si vides
        while(blits and blits[-1][1])==() :
            del blits[-1]

        # set EOL
        if blits :
            blits[-1][2]=True
        else :
            blits.append([0,[],True])


        # now encode line : (header + blit) x n
        for skip, blit, eol in blits :
            header=(skip<<4) | (len(blit) << 1) | (1 if eol else 0)
            s = struct.pack('B', header)
            if mode=='p4' :
                s+= p4_encode(blit,palette=palette)
            elif mode=='u8': # keep native
                s+= struct.pack('%dB'%len(blit),*blit)
            else :
                raise ValueError,"bad mode"
            s_blits.append(s)

    data = ''.join(s_blits)
    data+= '\0'*((-len(data))%4)

    # -- save to file

    # save header
    add_record(f,'header',struct.pack("<2I",w,frame_height))

    # save palette
    if mode in ('p4',) :
        add_record(f,'palette',struct.pack("<%dB"%len(palette),*palette))

    # write data
    add_record(f,mode,''.join(s_blits))

    # line16 record
    add_record(f,'line16',struct.pack("%dH"%len(line16),*line16))

    # finish file
    add_record(f,'end','')

    size=f.tell()
    print '// %d bytes (%d/1M), reduction by %.1f'%(size,1024*1024/size,2*float(w)*h/size)
Пример #2
0
def image_encode(src,f,frame_height, mode) :
    # Get the alpha band
    alpha = src.split()[-1]
    r,g,b= src.convert('RGB').convert('P', palette=Image.ADAPTIVE, colors=16).convert('RGB').split()
    src=Image.merge("RGBA",(r,g,b,alpha))

    data = [reduce(c) for c in src.getdata()] # keep image in RAM as bytes / None
    w,h=src.size
    palette=sorted([x for x in set(data) if x!=None])

    print '//',len(palette),'colors '

    s_blits = [] # stringified blits for all image

    start_file = f.tell()
    line16=[] # offsets from start as u16 index on  words

    for y in range(h) :
        if y%16==0 :
            ofs = sum(len(x) for x in s_blits)
            line16.append(ofs)

        skipped=0
        blits=[]

        line=data[y*w:(y+1)*w] # byte/none data
        singles=[]
        for c,g in groupby(line, lambda x:x!=TRANSP) :
            t = tuple(g)
            if not c :
                skipped = len(t)
                # if skip too big, split !
                while (skipped>MAXSKIP) :
                    blits.append([MAXSKIP,(),False])
                    skipped -= MAXSKIP
            else :
                # idem
                while t :
                    blits.append([skipped,t[:MAXBLIT],False])
                    skipped=0
                    t=t[MAXBLIT:]

        # enleve derniers blits si vides
        while(blits and blits[-1][1])==() :
            del blits[-1]

        # set EOL
        if blits :
            blits[-1][2]=True
        else :
            blits.append([0,[],True])


        # now encode line : (header + blit) x n
        for skip, blit, eol in blits :
            header=(skip<<4) | (len(blit) << 1) | (1 if eol else 0)
            s = struct.pack('B', header)
            if mode=='p4' :
                s+= p4_encode(blit,palette=palette)
            elif mode=='u8': # keep native
                s+= struct.pack('%dB'%len(blit),*blit)
            else :
                raise ValueError,"bad mode"
            s_blits.append(s)

    data = ''.join(s_blits)
    data+= '\0'*((-len(data))%4)

    # -- save to file

    # save header
    add_record(f,'header',struct.pack("<2I",w,frame_height))

    # save palette
    if mode in ('p4',) :
        add_record(f,'palette',struct.pack("<%dB"%len(palette),*palette))

    # write data
    add_record(f,mode,''.join(s_blits))

    # line16 record
    add_record(f,'line16',struct.pack("%dH"%len(line16),*line16))

    # finish file
    add_record(f,'end','')

    size=f.tell()
    print '// %d bytes (%d/1M), reduction by %.1f'%(size,1024*1024/size,2*float(w)*h/size)
Пример #3
0
def couples_encode(img,f,frame_height, mode, out_file):
    w,h=img.size
    line16=[]

    # get all couples
    # premultiply alpha (could be faster with numpy)
    d=list((r,g,b,255) if a>ALPHA_T else (0,0,0,0) for (r,g,b,a) in img.getdata()) # cut alpha

    # ok now encode couples
    couples = []
    all_line_couples = []

    for y in range(h) : 
        line = d[y*w: y*w+w]
        if len(line)%2 : line.append(line[-1])
        line_couples = [line[i]+line[i+1] for i in range(0,len(line),2)]
        all_line_couples.append(line_couples)
        couples += line_couples

    print '  ',len(couples),"couples,",len(set(couples)),"differents"

    nbcol = 255 if w*h > 1024 else 64
    pal,invpal = quantize_couples((couples), nbcol ) #
    intpal = [reduce_couple(p) for p in pal] # reduced palette
    if DEBUG : 
        for n,p in enumerate(pal) : print n,p, "%08x"%reduce_couple(p)

    size = len(pal)*2
    s_blits=[] # all lines
    for y,line_couples in enumerate(all_line_couples) : 
        if y%16==0 : 
            ofs = sum(len(x) for x in s_blits)
            line16.append(ofs/4) # XXX use /4 but need to align 

        blits = []
        skipped=0
        for skip,g in groupby(line_couples, lambda x:x==(0,0,0,0,0,0,0,0)) : 
            t = tuple(g)
            if skip : 
                skipped = len(t)
                # if skip too big, split !
                while (skipped>127) : 
                    blits.append([127,(),False])
                    skipped -= 127
            else :
                # idem 
                while t :
                    blits.append([skipped,t[:127],False])
                    skipped=0
                    t=t[127:]
        
        # set EOL 
        if blits : 
            blits[-1][2]=True
        else : 
            blits.append([0,[],True])
            
        # now encode line : (header + blit) x n
        for skip, blit, eol in blits :      
            header=(skip<<9) | (len(blit) << 1) | (1 if eol else 0)
            sdata = [invpal[c] for c in blit]
            sdata+=(0,)* ((2-len(blit))%4) # pad to 2+4 (ie len%4==2 )
            if DEBUG : print 'skip',skip,'blt',len(blit),'eol',eol,sdata
            s_blits.append(struct.pack('<H%dB'%len(sdata), header,*sdata))
            


    add_record(f,'header',struct.pack("<2I",w,frame_height)) # 1 frame for now
    add_record(f,'palette_couple',struct.pack("<%dL"%len(intpal),*intpal))
    add_record(f,mode,''.join(s_blits))
    add_record(f,'line16',struct.pack("%dH"%len(line16),*line16))
    add_record(f,'end','')

    size=f.tell()
    print '// %d bytes (%d/1M), reduction by %.1f'%(size,1024*1024/size,2*float(w)*h/size)

    # outputting resulting image

    # project each component 
    tr1 = dict((k,tuple(pal[invpal[k]][:4])) for k in invpal)
    tr2 = dict((k,tuple(pal[invpal[k]][4:])) for k in invpal)

    # replace each couple by its transform
    newdata = []
    for l in all_line_couples : 
        newdata += list(chain(*[(tr1[c],tr2[c]) for c in l]))

    img = Image.new("RGBA",(len(all_line_couples[0])*2,h))
    img.putdata(newdata)
    # out file
    img.info['transparency']=None # if so , FIXME handle alpha
    img.save(out_file)
Пример #4
0
def couples_encode(img,f,frame_height, mode, out_file):
    w,h=img.size
    line16=[]

    # get all couples
    # premultiply alpha (could be faster with numpy)
    d=list((r,g,b,255) if a>ALPHA_T else (0,0,0,0) for (r,g,b,a) in img.getdata()) # cut alpha

    # ok now encode couples
    couples = []
    all_line_couples = []

    for y in range(h) : 
        line = d[y*w: y*w+w]
        if len(line)%2 : line.append(line[-1])
        line_couples = [line[i]+line[i+1] for i in range(0,len(line),2)]
        all_line_couples.append(line_couples)
        couples += line_couples

    print '  ',len(couples),"couples,",len(set(couples)),"differents"

    nbcol = 255 if w*h > 1024 else 64
    pal,invpal = quantize_couples((couples), nbcol ) #
    intpal = [reduce_couple(p) for p in pal] # reduced palette
    if DEBUG : 
        for n,p in enumerate(pal) : print n,p, "%08x"%reduce_couple(p)

    size = len(pal)*2
    s_blits=[] # all lines
    for y,line_couples in enumerate(all_line_couples) : 
        if y%16==0 : 
            ofs = sum(len(x) for x in s_blits)
            line16.append(ofs/4) # XXX use /4 but need to align 

        blits = []
        skipped=0
        for skip,g in groupby(line_couples, lambda x:x==(0,0,0,0,0,0,0,0)) : 
            t = tuple(g)
            if skip : 
                skipped = len(t)
                # if skip too big, split !
                while (skipped>127) : 
                    blits.append([127,(),False])
                    skipped -= 127
            else :
                # idem 
                while t :
                    blits.append([skipped,t[:127],False])
                    skipped=0
                    t=t[127:]
        
        # set EOL 
        if blits : 
            blits[-1][2]=True
        else : 
            blits.append([0,[],True])
            
        # now encode line : (header + blit) x n
        for skip, blit, eol in blits :      
            header=(skip<<9) | (len(blit) << 1) | (1 if eol else 0)
            sdata = [invpal[c] for c in blit]
            sdata+=(0,)* ((2-len(blit))%4) # pad to 2+4 (ie len%4==2 )
            if DEBUG : print 'skip',skip,'blt',len(blit),'eol',eol,sdata
            s_blits.append(struct.pack('<H%dB'%len(sdata), header,*sdata))
            


    add_record(f,'header',struct.pack("<2I",w,frame_height)) # 1 frame for now
    add_record(f,'palette_couple',struct.pack("<%dL"%len(intpal),*intpal))
    add_record(f,mode,''.join(s_blits))
    add_record(f,'line16',struct.pack("%dH"%len(line16),*line16))
    add_record(f,'end','')

    size=f.tell()
    print '// %d bytes (%d/1M), reduction by %.1f'%(size,1024*1024/size,2*float(w)*h/size)

    # outputting resulting image

    # project each component 
    tr1 = dict((k,tuple(pal[invpal[k]][:4])) for k in invpal)
    tr2 = dict((k,tuple(pal[invpal[k]][4:])) for k in invpal)

    # replace each couple by its transform
    newdata = []
    for l in all_line_couples : 
        newdata += list(chain(*[(tr1[c],tr2[c]) for c in l]))

    img = Image.new("RGBA",(len(all_line_couples[0])*2,h))
    img.putdata(newdata)
    # out file
    img.info['transparency']=None # if so , FIXME handle alpha
    img.save(out_file)
Пример #5
0
def image_encode(src,f,frame_height, quality) : 
    def err(i) : 
        "error function"
        f = 1/(1.+quality*254)
        t=rgba(i)
        c2=(int(int(t[0]*f)/f),int(int(t[1]*f)/f),int(int(t[2]*f)/f),t[3])
        return reduce(c2)

    data = [reduce(c) for c in src.getdata()] # keep image in RAM as RGBA tuples. 
    w,h=src.size

    s_blits = [] # stringified blits for all image
    
    start_file = f.tell()
    line16=[] # offsets from start as u16 index on  words 

    for y in range(h) :
        if y%16==0 : 
            ofs = sum(len(x) for x in s_blits)
            line16.append(ofs/4) # XXX use /4 but need to align 

        skipped=0
        blits=[]



        line=data[y*w:(y+1)*w] # 16 bit data
        singles=[] 
        for c,g in groupby(line, key=err) : 
            t = tuple(g)
            n = len(t)
            # take the most frequent reral color
            cnt = Counter(t)
            col = cnt.most_common(1)[0][0]

            blits.append([n,col,False])

        # set EOL 
        if blits : 
            blits[-1][2]=True
        else : 
            blits.append([0,[],True])


        # now encode line : (header + blit) x n
        for num, color, eol in blits :          
            header=(num<<3) | (1 if eol else 0)
            s = struct.pack('<HH', header, color)
            s_blits.append(s)

    # save header
    add_record(f,'header',struct.pack("<2I",w,frame_height)) # 1 frame for now

    # write data
    add_record(f,'rle',''.join(s_blits))

    # line16 record
    add_record(f,'line16',struct.pack("%dH"%len(line16),*line16))

    # finish file
    add_record(f,'end','')

    size=f.tell()

    print '// %d bytes (%d/1M), reduction by %.1f'%(size,1024*1024/size,2*float(w)*h/size)
Пример #6
0
def couples_encode(img,f,frame_height, mode, micro, out_file=None):
    w,h=img.size
    line16=[]

    # get all couples
    # premultiply alpha (could be faster with numpy)
    d=list((r,g,b,255) if a>ALPHA_T else (0,0,0,0) for (r,g,b,a) in img.getdata()) # cut alpha

    # ok now encode couples
    couples = []
    all_line_couples = []

    for y in range(h) :
        line = d[y*w: y*w+w]
        if len(line)%2 : line.append(line[-1])
        line_couples = [line[i]+line[i+1] for i in range(0,len(line),2)]
        all_line_couples.append(line_couples)
        couples += line_couples

    print '  ',len(couples),"couples,",len(set(couples)),"differents"

    nbcol = 255 if w*h > 1024 else 64 # small images should not have a big couples palette -> XXX use less bits per couple ?
    pal,invpal = quantize_couples((couples), nbcol ) #
    intpal = [reduce_couple(p) for p in pal] # reduced palette
    intpal8 = [reduce_couple8(p) for p in pal] # idem but 8bit

    size = len(pal)*2
    s_blits=[] # all lines
    for y,line_couples in enumerate(all_line_couples) :
        # now encode line : (header + blit) x n
        if y%16==0 :
            ofs = sum(len(x) for x in s_blits)
            line16.append(ofs)

        blits = [] # a blit will be either a list of couples or a tuple (nb,couple)
        #print len(line_couples), [invpal[x] for x in line_couples]

        for bl in packbits(invpal[x] for x in line_couples) :
            if type(bl)==tuple :
                s_blits.append(struct.pack('bB',-bl[0],bl[1]))
            else :
                s_blits.append(struct.pack('b%dB'%len(bl),len(bl),*bl))

    # spr write
    add_record(f,'header',struct.pack("<2I",w,frame_height)) # 1 frame for now
    if micro : 
        add_record(f,'palette_couple8',struct.pack("<%dH"%len(intpal8),*intpal8))
        print "palette:",intpal8
    else : 
        add_record(f,'palette_couple',struct.pack("<%dL"%len(intpal),*intpal))
    add_record(f,mode,''.join(s_blits))
    add_record(f,'line16',struct.pack("%dH"%len(line16),*line16))
    add_record(f,'end','')

    """
    # no spr write
    f.write(struct.pack("<4H",w,frame_height,img.size[1]/frame_height,len(intpal))) # u32 w,frame_h,nbframes,nb pal
    f.write(struct.pack("<%dL"%len(intpal),*intpal)) # len pal x u32 couple
    f.write(struct.pack("H%dH"%len(line16),len(line16),*line16)) # len(line16), line16
    f.write(''.join(s_blits))
    """

    size=f.tell()
    print '// %d bytes (%d/1M), reduction by %.1f'%(size,1024*1024/size,2*float(w)*h/size)

    # outputting resulting image

    # project each component
    tr1 = dict((k,tuple(pal[invpal[k]][:4])) for k in invpal)
    tr2 = dict((k,tuple(pal[invpal[k]][4:])) for k in invpal)

    # replace each couple by its transform
    newdata = []
    for l in all_line_couples :
        newdata += list(chain(*[(tr1[c],tr2[c]) for c in l]))

    if out_file :
        img = Image.new("RGBA",(len(all_line_couples[0])*2,h))
        img.putdata(newdata)
        # out file
        img.info['transparency']=None # if so , FIXME handle alpha
        img.save(out_file)
Пример #7
0
def image_encode(src, f, frame_height, quality):
    def err(i):
        "error function"
        f = 1 / (1. + quality * 254)
        t = rgba(i)
        c2 = (int(int(t[0] * f) / f), int(int(t[1] * f) / f),
              int(int(t[2] * f) / f), t[3])
        return reduce(c2)

    data = [reduce(c)
            for c in src.getdata()]  # keep image in RAM as RGBA tuples.
    w, h = src.size

    s_blits = []  # stringified blits for all image

    start_file = f.tell()
    line16 = []  # offsets from start as u16 index on  words

    for y in range(h):
        if y % 16 == 0:
            ofs = sum(len(x) for x in s_blits)
            line16.append(ofs / 4)  # XXX use /4 but need to align

        skipped = 0
        blits = []

        line = data[y * w:(y + 1) * w]  # 16 bit data
        singles = []
        for c, g in groupby(line, key=err):
            t = tuple(g)
            n = len(t)
            # take the most frequent reral color
            cnt = Counter(t)
            col = cnt.most_common(1)[0][0]

            blits.append([n, col, False])

        # set EOL
        if blits:
            blits[-1][2] = True
        else:
            blits.append([0, [], True])

        # now encode line : (header + blit) x n
        for num, color, eol in blits:
            header = (num << 3) | (1 if eol else 0)
            s = struct.pack('<HH', header, color)
            s_blits.append(s)

    # save header
    add_record(f, 'header', struct.pack("<2I", w,
                                        frame_height))  # 1 frame for now

    # write data
    add_record(f, 'rle', ''.join(s_blits))

    # line16 record
    add_record(f, 'line16', struct.pack("%dH" % len(line16), *line16))

    # finish file
    add_record(f, 'end', '')

    size = f.tell()

    print '// %d bytes (%d/1M), reduction by %.1f' % (size, 1024 * 1024 / size,
                                                      2 * float(w) * h / size)