Exemplo n.º 1
0
class Mem(object):
    def __init__(self):
        self.real = BitStream(600 * 16)
        self.jumps = 0

    def load(self, file):
        self.real = BitStream(filename=file)

    def save(self, file):
        self.real.tofile(file)

    def jump(self, pos):
        self.jumps += 1
        self.real.bytepos = pos

    def read(self, size=16):
        return self.real.read(16)

    def get(self, pos, size=16):
        realpos = pos * 8
        return self.real[realpos:realpos + size]

    def set(self, pos, bits):
        realpos = pos * 8
        self.real[realpos:realpos + len(bits)] = bits

    @property
    def pos(self):
        return self.real.bytepos
Exemplo n.º 2
0
class Mem(object):
    def __init__(self):
        self.real = BitStream(600*16)
        self.jumps = 0

    def load(self, file):
        self.real = BitStream(filename=file)

    def save(self, file):
        self.real.tofile(file)

    def jump(self, pos):
        self.jumps += 1
        self.real.bytepos = pos

    def read(self, size=16):
        return self.real.read(16)

    def get(self, pos, size=16):
        realpos = pos * 8
        return self.real[realpos:realpos+size]

    def set(self, pos, bits):
        realpos = pos * 8
        self.real[realpos:realpos+len(bits)] = bits

    @property
    def pos(self):
        return self.real.bytepos
 def save(self, filename, font_type = FONT_TYPES.font01, game = GAMES.dr):
   data = BitStream(SPFT_MAGIC)
   
   data += BitStream(uintle = len(self.data), length = 32)
   
   mapping_table_len = self.find_max_char() + 1 # zero-indexed so +1 for the size.
   mapping_table_start = 0x20
   font_table_start = mapping_table_len * 2 + mapping_table_start
   
   data += BitStream(uintle = font_table_start, length = 32)
   data += BitStream(uintle = mapping_table_len, length = 32)
   data += BitStream(uintle = mapping_table_start, length = 32)
   data += UNKNOWN1[game][font_type] + UNKNOWN2
   
   data += self.gen_mapping_table(mapping_table_len)
   
   data += self.gen_font_table()
   
   padding = BitStream(hex = '0x00') * (16 - ((data.len / 8) % 16))
   
   data += padding
   
   f = open(filename, "wb")
   data.tofile(f)
   f.close()
    def save(self, filename, font_type=FONT_TYPES.font01, game=GAMES.dr):
        data = BitStream(SPFT_MAGIC)

        data += BitStream(uintle=len(self.data), length=32)

        mapping_table_len = self.find_max_char(
        ) + 1  # zero-indexed so +1 for the size.
        mapping_table_start = 0x20
        font_table_start = mapping_table_len * 2 + mapping_table_start

        data += BitStream(uintle=font_table_start, length=32)
        data += BitStream(uintle=mapping_table_len, length=32)
        data += BitStream(uintle=mapping_table_start, length=32)
        data += UNKNOWN1[game][font_type] + UNKNOWN2

        data += self.gen_mapping_table(mapping_table_len)

        data += self.gen_font_table()

        padding = BitStream(hex='0x00') * (16 - ((data.len / 8) % 16))

        data += padding

        f = open(filename, "wb")
        data.tofile(f)
        f.close()
Exemplo n.º 5
0
    def decode(self, in_stream, out_stream):
        bs = BitStream()
        dq = deque()
        at_least_three = False
        for word in self.words_from_file(in_stream):
            if not word or word not in self.word_dict:
                continue
            #print >> sys.stderr, 'word:"', word, '"'
            dq.append(self.word_dict[word])
            if at_least_three or len(dq) == 3:
                bs.append(pack(self.int_type, dq.popleft()))
                at_least_three = True
                if bs.len > self.bit_buffer:
                    cut = 0
                    for byte in bs.cut(self.bit_buffer):
                        cut += 1
                        byte.tofile(out_stream)
                    del bs[:cut * self.bit_buffer]

        # dq has to have exactly 2 elements here, the last is the bit length of the first, unless it's 0
        #print >> sys.stderr, 'dq:', dq
        extra_bits = dq.pop()
        bs.append(pack('uint:' + str(extra_bits), dq.popleft()))

        bs.tofile(out_stream)
Exemplo n.º 6
0
    def decode(self, in_stream, out_stream):
        bs = BitStream()
        dq = deque()
        at_least_three = False
        for word in self.words_from_file(in_stream):
            if not word or word not in self.word_dict:
                continue
            #print >> sys.stderr, 'word:"', word, '"'
            dq.append(self.word_dict[word])
            if at_least_three or len(dq) == 3:
                bs.append(pack(self.int_type, dq.popleft()))
                at_least_three = True
                if bs.len > self.bit_buffer:
                    cut = 0
                    for byte in bs.cut(self.bit_buffer):
                        cut += 1
                        byte.tofile(out_stream)
                    del bs[:cut * self.bit_buffer]

        # dq has to have exactly 2 elements here, the last is the bit length of the first, unless it's 0
        #print >> sys.stderr, 'dq:', dq
        extra_bits = dq.pop()
        bs.append(pack('uint:' + str(extra_bits), dq.popleft()))

        bs.tofile(out_stream)
 def save(self, filename):
   
   data = BitStream(self.magic) + BitStream(uintle = len(self.lines), length = 16)
   
   for line in self.lines:
     data += line.to_data()
   
   with open(filename, "wb") as f:
     data.tofile(f)
Exemplo n.º 8
0
    def save(self, filename):

        data = BitStream(self.magic) + BitStream(uintle=len(self.lines),
                                                 length=16)

        for line in self.lines:
            data += line.to_data()

        with open(filename, "wb") as f:
            data.tofile(f)
Exemplo n.º 9
0
def decrypt_com2us_png():
    com2us_decrypt_values = [
        0x2f, 0x7c, 0x47, 0x55, 0x32, 0x77, 0x9f, 0xfb, 0x5b, 0x86, 0xfe, 0xb6,
        0x3e, 0x06, 0xf4, 0xc4, 0x2e, 0x08, 0x49, 0x11, 0x0e, 0xce, 0x84, 0xd3,
        0x7b, 0x18, 0xa6, 0x5c, 0x71, 0x56, 0xe2, 0x3b, 0xfd, 0xb3, 0x2b, 0x97,
        0x9d, 0xfc, 0xca, 0xba, 0x8e, 0x7e, 0x6f, 0x0f, 0xe8, 0xbb, 0xc7, 0xc2,
        0xd9, 0xa4, 0xd2, 0xe0, 0xa5, 0x95, 0xee, 0xab, 0xf3, 0xe4, 0xcb, 0x63,
        0x25, 0x70, 0x4e, 0x8d, 0x21, 0x37, 0x9a, 0xb0, 0xbc, 0xc6, 0x48, 0x3f,
        0x23, 0x80, 0x20, 0x01, 0xd7, 0xf9, 0x5e, 0xec, 0x16, 0xd6, 0xd4, 0x1f,
        0x51, 0x42, 0x6c, 0x10, 0x14, 0xb7, 0xcc, 0x82, 0x7f, 0x13, 0x02, 0x00,
        0x72, 0xed, 0x90, 0x57, 0xc1, 0x2c, 0x5d, 0x28, 0x81, 0x1d, 0x38, 0x1a,
        0xac, 0xad, 0x35, 0x78, 0xdc, 0x68, 0xb9, 0x8b, 0x6a, 0xe1, 0xc3, 0xe3,
        0xdb, 0x6d, 0x04, 0x27, 0x9c, 0x64, 0x5a, 0x8f, 0x83, 0x0c, 0xd8, 0xa8,
        0x1c, 0x89, 0xd5, 0x43, 0x74, 0x73, 0x4d, 0xae, 0xea, 0x31, 0x6e, 0x1e,
        0x91, 0x1b, 0x59, 0xc9, 0xbd, 0xf7, 0x07, 0xe7, 0x8a, 0x05, 0x8c, 0x4c,
        0xbe, 0xc5, 0xdf, 0xe5, 0xf5, 0x2d, 0x4b, 0x76, 0x66, 0xf2, 0x50, 0xd0,
        0xb4, 0x85, 0xef, 0xb5, 0x3c, 0x7d, 0x3d, 0xe6, 0x9b, 0x03, 0x0d, 0x61,
        0x33, 0xf1, 0x92, 0x53, 0xff, 0x96, 0x09, 0x67, 0x69, 0x44, 0xa3, 0x4a,
        0xaf, 0x41, 0xda, 0x54, 0x46, 0xd1, 0xfa, 0xcd, 0x24, 0xaa, 0x88, 0xa7,
        0x19, 0xde, 0x40, 0xeb, 0x94, 0x5f, 0x45, 0x65, 0xf0, 0xb8, 0x34, 0xdd,
        0x0b, 0xb1, 0x29, 0xe9, 0x2a, 0x75, 0x87, 0x39, 0xcf, 0x79, 0x93, 0xa1,
        0xb2, 0x30, 0x15, 0x7a, 0x52, 0x12, 0x62, 0x36, 0xbf, 0x22, 0x4f, 0xc0,
        0xa2, 0x17, 0xc8, 0x99, 0x3a, 0x60, 0xa9, 0xa0, 0x58, 0xf6, 0x0a, 0x9e,
        0xf8, 0x6b, 0x26, 0x98
    ]

    for im_path in iglob('herders/static/herders/images/**/*.png',
                         recursive=True):
        encrypted = BitStream(filename=im_path)

        # Check if it is encrypted. 8th byte is 0x0B instead of the correct signature 0x0A
        encrypted.pos = 0x07 * 8
        signature = encrypted.peek('uint:8')
        if signature == 0x0B:
            print('Decrypting {}'.format(im_path))
            # Correct the PNG signature
            encrypted.overwrite('0x0A', encrypted.pos)

            # Replace bits with magic decrypted values
            try:
                while True:
                    pos = encrypted.pos
                    val = encrypted.peek('uint:8')
                    encrypted.overwrite(
                        Bits(uint=com2us_decrypt_values[val], length=8), pos)
            except ReadError:
                # EOF
                pass

            # Write it back to the file
            with open(im_path, 'wb') as f:
                encrypted.tofile(f)
Exemplo n.º 10
0
def decrypt_images(**kwargs):
    path = kwargs.pop('path', 'herders/static/herders/images')
    for im_path in iglob(f'{path}/**/*.png', recursive=True):
        encrypted = BitStream(filename=im_path)

        # Check if it is 'encrypted'. 8th byte is 0x0B instead of the correct signature 0x0A
        encrypted.pos = 0x07 * 8
        signature = encrypted.peek('uint:8')
        if signature == 0x0B:
            print(f'Decrypting {im_path}')
            # Correct the PNG signature
            encrypted.overwrite('0x0A', encrypted.pos)

            # Replace bits with magic decrypted values
            try:
                while True:
                    pos = encrypted.pos
                    val = encrypted.peek('uint:8')
                    encrypted.overwrite(
                        Bits(uint=com2us_decrypt_values[val], length=8), pos)
            except ReadError:
                # EOF
                pass

            # Write it back to the file
            with open(im_path, 'wb') as f:
                encrypted.tofile(f)

            continue

        # Check for weird jpeg format with extra header junk. Convert to png.
        encrypted.pos = 0
        if encrypted.peek('bytes:5') == b'Joker':
            print(f'Converting Joker container JPEG to PNG {im_path}')
            with open(im_path, 'rb') as f:
                img = JokerContainerFile(f)

            # Open it as a jpg and resave to disk
            try:
                new_imfile = Image.open(io.BytesIO(img.data.tobytes()))
                new_imfile.save(im_path)
            except IOError:
                print(f'Unable to open {im_path}')
Exemplo n.º 11
0
class NaluStreamer():
    def __init__(self, nalu_type):
        '''
        init the value of nalu unit
        : param nalu_type: something like NAL_UNIT_TYPE_CODED_SLICE_IDR in nalutypes.py
        '''
        if (nalu_type != nalutypes.NAL_UNIT_TYPE_UNSPECIFIED):
            # for specific nalutypes
            self.forbidden_zero_bit = '0b0'
            self.nal_ref_idc = '0b11'
            self.nal_unit_type = "0b" + "{0:05b}".format(nalu_type)

            self.stream = BitStream(START_CODE_PREFIX)
            self.stream.append(self.forbidden_zero_bit)
            self.stream.append(self.nal_ref_idc)
            self.stream.append(self.nal_unit_type)
        else:
            # for slice_data
            self.stream = BitStream()

    def rbsp_trailing_bits(self):
        '''
        according to RBSP trainling bits syntax on page 35, and according to explanation page 49
        '''
        rbsp_stop_one_bit = '0b1'
        rbsp_alignment_zero_bit = '0b0'
        self.stream.append(rbsp_stop_one_bit)

        plus = 8 - (self.stream.length % 8)

        for x in range(0, plus):
            self.stream.append(rbsp_alignment_zero_bit)

        #print("length after plus:")
        #print(self.stream.length)

    def export(self, bitstream_output_handler):
        """
        output the binary data into file
        """
        self.stream.tofile(bitstream_output_handler)
Exemplo n.º 12
0
def dump_to_file(p_scale_vars, seeds, bits_per_block, out_file):
    """Write the p_scale and the seeds to file in a bit-efficient way

    Parameters
    ---------

    p_scale_vars: list
    seeds: list
    bits_per_block: int
    out_file: str
    """
    # Create the bitstream,
    stream = BitStream()
    # Add the p_scale_vars to the bitstream
    for p_scale_var in p_scale_vars:
        stream.append(BitStream(float=p_scale_var, length=P_SCALE_LENGTH))
    # Add the seeds to the bitstream
    for seed in seeds:
        stream.append(BitStream(uint=seed, length=bits_per_block))

    # Dump the reads to file
    with open(out_file, mode='wb+') as f:
        stream.tofile(f)
def decode_image(file_location="lsb_RGB.png"):
    """Decodes the hidden message in an image

    file_location: the location of the image file to decode. By default is the provided encoded image in the images folder
    """
    encoded_image = Image.open(file_location)
    red_channel = encoded_image.split()[0]
    green_channel = encoded_image.split()[1]
    blue_channel = encoded_image.split()[2]

    b = BitStream()

    x_size = encoded_image.size[0]
    y_size = encoded_image.size[1]

    decoded_image = Image.new("RGB", encoded_image.size)
    pixels = decoded_image.load()

    for i in range(x_size):
        for j in range(y_size):
            if red_channel.getpixel((i, j)) & 1 == 1:
                b.append('0b1')
            else:
                b.append('0b0')
            if green_channel.getpixel((i, j)) & 1 == 1:
                b.append('0b1')
            else:
                b.append('0b0')
            if blue_channel.getpixel((i, j)) & 1 == 1:
                b.append('0b1')
            else:
                b.append('0b0')

    print('save file')
    f = open('output.bin', 'wb')
    b.tofile(f)
    f.close()
Exemplo n.º 14
0
def main():
    """
    """

    if args.infile == args.outfile :
        print ('Error! Source and Destination can not be the same file!')
        sys.exit()

    if not os.path.exists(args.infile) :
        print ('Error! Given input file name not found! Please check path given in CMD or set in script code!')
        sys.exit()
    if md_arg_str :
        md = re.findall('\d+',md_arg_str)
        if len(md) != 10 :
            print ('Specified wrong "-masterdisplay" parameter! Please check!\n Example: G(13250,34500)B(7500,3000)R(34000,16000)WP(15635,16450)L(10000000,1) or do not specify')
            sys.exit()

    if maxcll :
        mcll = re.findall('\d+',maxcll)
    sei_ok = 0 
    
    F = open (args.infile,'r+b')
    o = open (args.outfile,'wb')

    print ('Parsing the infile:')
    print ('')
    print ('==========================')
    print ('')
    print ('Prepending SEI data')
    s = BitStream(F.read(chunk))
    
    nals = list(s.findall('0x000001', bytealigned=True))
    sps_nals = list(s.findall('0x00000142', bytealigned=True))
    sei_pref_nals = list (s.findall('0x0000014e', bytealigned=True))
    size = [y - x for x,y in zip(nals,nals[1:])]
    sps_pos = list(set(nals).intersection(sps_nals))
    sei_pref_nals_pos = list(set(nals).intersection(sei_pref_nals))
    sps_size = size[nals.index(sps_nals[0])]
    if sei_pref_nals :
        sei_pref_nal_size = ( size[nals.index(sei_pref_nals[0])])
### MAXCLL & MAXFALL ###

    if args.maxcll or md_arg_str :
        sei_forbidden_zero_bit  = 0
        sei_nal_unit_type = 39
        sei_nuh_layer_id = 0
        sei_nuh_temporal_id_plus1 = 1
        new_sei_string = pack ('uint:1,2*uint:6,uint:3',sei_forbidden_zero_bit,sei_nal_unit_type,sei_nuh_layer_id,sei_nuh_temporal_id_plus1)
        print ('Starting new SEI NALu...')

        if maxcll :
            sei_last_payload_type_byte = 144
            sei_last_payload_size_byte = 4
            sei_max_content_light_level = int(mcll[0])
            sei_max_pic_average_light_level = int(mcll[1])
            new_sei_string += pack ('2*uint:8,2*uint:16',sei_last_payload_type_byte,sei_last_payload_size_byte,sei_max_content_light_level,sei_max_pic_average_light_level)
            print ('SEI message with MaxCLL=',sei_max_content_light_level,' and MaxFall=',sei_max_pic_average_light_level,' created in SEI NAL')

        if md_arg_str :
            md_sei_last_payload_type_byte = 137
            md_sei_last_payload_size_byte = 24
            #MD string ref
            #G(13250,34500)B(7500,3000)R(34000,16000)WP(15635,16450)L(10000000,1)
            new_sei_string += pack ('2*uint:8',md_sei_last_payload_type_byte,md_sei_last_payload_size_byte)
            for i in range (len(md)-2) :
                new_sei_string += pack ('uint:16',int(md[i]))

            new_sei_string += pack ('uint:32',int(md[8]))
            new_sei_string += pack ('uint:32',int(md[9]))

            new_sei_string.replace ('0x0000','0x000003',bytealigned=True)
            print ('SEI message Mastering Display Data',md_arg_str,'created in SEI NAL')     

        new_sei_string = '0x00000001' + new_sei_string + '0x00'
        sei_ok = True



### ------------------ ###   
    
    print ('Looking for SPS.........', sps_pos)
    print ('SPS_Nals_addresses', sps_pos)
    print ('SPS NAL Size', sps_size)
    print ('Starting reading SPS NAL contents')

    
    s.pos = sps_pos[0]
    t = s.peek(sps_size)

    t.pos = t.pos + 24

    forbidden_zero_bit  = t.read('uint:1')
    nal_unit_type = t.read('uint:6')
    nuh_layer_id = t.read('uint:6')
    nuh_temporal_id_plus1 = t.read('uint:3')
    nal_t = t[:]

# 7.3.1.1
    # Convert NAL data (Annex B format) to RBSP data

    t.tobytes()
    t.replace ('0x000003','0x0000')
    
    
# SPS parse block


    sps_video_parameter_set_id = t.read('uint:4')
    sps_max_sub_layers_minus1 = t.read('uint:3')
    sps_temporal_id_nesting_flag = t.read('uint:1')
    ptl = profile_tier_level(t, sps_max_sub_layers_minus1)
    sps_seq_parameter_set_id = t.read('ue')
    chroma_format_idc = t.read('ue')
    if chroma_format_idc == 3:
        separate_colour_plane_flag = t.read('uint:1')
    pic_width_in_luma_samples = t.read ('ue')
    pic_height_in_luma_samples = t.read ('ue')
    conformance_window_flag = t.read ('uint:1')
    if (conformance_window_flag) :
        conf_win_left_offset = t.read('ue')
        conf_win_right_offset = t.read('ue')
        conf_win_top_offset = t.read('ue')
        conf_win_bottom_offset = t.read('ue')
    bit_depth_luma_minus8 = t.read ('ue')
    bit_depth_chroma_minus8 = t.read ('ue')
    log2_max_pic_order_cnt_lsb_minus4 = t.read('ue')
    sps_sub_layer_ordering_info_present_flag = t.read('uint:1')
#   for (i = (sps_sub_layer_ordering_info_present_flag ? 0 : sps.max_sub_layers_minus1); i <= sps.max_sub_layers_minus1; i++)
    if sps_sub_layer_ordering_info_present_flag :
            sps_max_dec_pic_buffering_minus1 = t.read('ue')
            sps_max_num_reorder_pics = t.read('ue')
            sps_max_latency_increase_plus1 = t.read('ue')

    log2_min_luma_coding_block_size_minus3 = t.read ('ue')
    log2_diff_max_min_luma_coding_block_size = t.read ('ue')
    log2_min_luma_transform_block_size_minus2 = t.read ('ue')
    log2_diff_max_min_luma_transform_block_size = t.read ('ue')
    max_transform_hierarchy_depth_inter = t.read ('ue')
    max_transform_hierarchy_depth_intra = t.read ('ue')
    scaling_list_enabled_flag = t.read ('uint:1')
    """
    if( scaling_list_enabled_flag ) {
    sps_scaling_list_data_present_flag u(1)
    if( sps_scaling_list_data_present_flag )
    scaling_list_data( )
    }
    """
    amp_enabled_flag = t.read ('uint:1')
    sample_adaptive_offset_enabled_flag = t.read ('uint:1')
    pcm_enabled_flag = t.read ('uint:1')
    if pcm_enabled_flag :
        pcm_sample_bit_depth_luma_minus1 = t.read ('uint:4')
        pcm_sample_bit_depth_chroma_minus1 = t.read ('uint:4')
        log2_min_pcm_luma_coding_block_size_minus3  = t.read ('ue')
        log2_diff_max_min_pcm_luma_coding_block_size = t.read ('ue')
        pcm_loop_filter_disabled_flag = t.read ('uint:1')
    num_short_term_ref_pic_sets = t.read ('ue')
    if num_short_term_ref_pic_sets :
        for i in range (num_short_term_ref_pic_sets):
            if i != 0 :
                inter_ref_pic_set_prediction_flag = t.read ('uint:1')
        
            if not 'inter_ref_pic_set_prediction_flag' in globals() :
                """    
            
                if i == num_short_term_ref_pic_sets :
                    delta_idx_minus1 = t.read ('ue')
                if not 'delta_idx_minus1' in globals():
                    delta_idx_minus1 = 0
                delta_rps_sign = t.read ('uint:1')
                abs_delta_rps_minus1 = t.read ('ue')
                for j in range (NumDeltaPoc) :
                    used_by_curr_pic_flag[j] = t.read ('uint:1')
                if used_by_curr_pic_flag[j] :
                    use_delta_flag[j] = t.read ('uint:1')
        
             else:      
                """            
            
                num_negative_pics = t.read ('ue')
                num_positive_pics = t.read ('ue')
                delta_poc_s0_minus1 = [t.read ('ue') for _ in range (num_negative_pics)]
                used_by_curr_pic_s0_flag = [ t.read ('uint:1') for _ in range (num_negative_pics)]
                delta_poc_s1_minus1 = [t.read ('ue') for _ in range(num_positive_pics)]
                used_by_curr_pic_s1_flag = [t.read ('uint:1') for _ in range(num_positive_pics)]

          
    long_term_ref_pics_present_flag = t.read ('uint:1')
    if long_term_ref_pics_present_flag :
        num_long_term_ref_pics_sps = t.read ('ue')
        
        for i in range < (num_long_term_ref_pics_sps): 
            lt_ref_pic_poc_lsb_sps[i] = t.read ('ue')
            used_by_curr_pic_lt_sps_flag[i] = t.read ('uint:1')
       
    sps_temporal_mvp_enabled_flag = t.read ('uint:1')
    strong_intra_smoothing_enabled_flag = t.read ('uint:1')
    vui_parameters_present_flag = t.read ('uint:1')
    if vui_parameters_present_flag :
       vp = vui_parameters(t)
    sps_extension_present_flag = t.read ('uint:1')
    if sps_extension_present_flag :
        sps_range_extension_flag = t.read ('uint:1')
        sps_multilayer_extension_flag = t.read ('uint:1')
        sps_3d_extension_flag = t.read ('uint:1')
        sps_extension_5bits = t.read ('uint:1')
    tb = rbsp_trailing_bits(t,len(t))
    print ('Reading of SPS NAL finished. Read ',len(t),' of SPS NALu data.\n')
    
# print block
    """
    print ('sps_video_parameter_set_id', sps_video_parameter_set_id)
    print ('sps_max_sub_layers_minus1', sps_max_sub_layers_minus1)
    print ('sps_temporal_id_nesting_flag', sps_temporal_id_nesting_flag)
    ptl.show()
    print ('sps_seq_parameter_set_id', sps_seq_parameter_set_id)
    print ('chroma_format_idc', chroma_format_idc)
    if chroma_format_idc == 3:
        print ('separate_colour_plane_flag', separate_colour_plane_flag)
    print ('pic_width_in_luma_samples ', pic_width_in_luma_samples) #produces wrong number
    print ('pic_height_in_luma_samples', pic_height_in_luma_samples) #produces wrong number
    print ('conformance_window_flag', conformance_window_flag)
    print ('bit_depth_luma_minus8', bit_depth_luma_minus8)
    print ('bit_depth_chroma_minus8', bit_depth_chroma_minus8)
    print ('log2_max_pic_order_cnt_lsb_minus4', log2_max_pic_order_cnt_lsb_minus4)
    print ('sps_sub_layer_ordering_info_present_flag', sps_sub_layer_ordering_info_present_flag)

    if sps_sub_layer_ordering_info_present_flag :
       print ('sps_max_dec_pic_buffering_minus1', sps_max_dec_pic_buffering_minus1)
       print ('sps_max_num_reorder_pics', sps_max_num_reorder_pics)
       print ('sps_max_latency_increase_plus1', sps_max_latency_increase_plus1)
    
    print ('log2_min_luma_coding_block_size_minus3',log2_min_luma_coding_block_size_minus3)
    print ('log2_diff_max_min_luma_coding_block_size',log2_diff_max_min_luma_coding_block_size)
    print ('log2_min_luma_transform_block_size_minus2',log2_min_luma_transform_block_size_minus2)
    print ('log2_diff_max_min_luma_transform_block_size', log2_diff_max_min_luma_transform_block_size)
    print ('max_transform_hierarchy_depth_inter', max_transform_hierarchy_depth_inter)
    print ('max_transform_hierarchy_depth_intra', max_transform_hierarchy_depth_intra)
    print ('scaling_list_enabled_flag',scaling_list_enabled_flag)
    print ('amp_enabled_flag',amp_enabled_flag)
    print ('sample_adaptive_offset_enabled_flag',sample_adaptive_offset_enabled_flag)
    print ('pcm_enabled_flag',pcm_enabled_flag)
    if pcm_enabled_flag :
        print ('pcm_sample_bit_depth_luma_minus1',pcm_sample_bit_depth_luma_minus1)
        print ('pcm_sample_bit_depth_chroma_minus1',pcm_sample_bit_depth_chroma_minus1)
        print ('log2_min_pcm_luma_coding_block_size_minus3',log2_min_pcm_luma_coding_block_size_minus3)
        print ('log2_diff_max_min_pcm_luma_coding_block_size',log2_diff_max_min_pcm_luma_coding_block_size)
        print ('pcm_loop_filter_disabled_flag',pcm_loop_filter_disabled_flag)
    print ('num_short_term_ref_pic_sets',num_short_term_ref_pic_sets)
    print ('long_term_ref_pics_present_flag',long_term_ref_pics_present_flag)
    print ('sps_temporal_mvp_enabled_flag',sps_temporal_mvp_enabled_flag)
    print ('strong_intra_smoothing_enabled_flag',strong_intra_smoothing_enabled_flag)
    print ('vui_parameters_present_flag',vui_parameters_present_flag)
    if vui_parameters_present_flag :
        vp.show()
    print ('sps_extension_present_flag',sps_extension_present_flag)
    """
# New BS write Block
    print ('Making modified SPS NALu...')
    new_bs = BitStream()
    new_bs += pack('uint:4,uint:3,uint:1',sps_video_parameter_set_id,sps_max_sub_layers_minus1,sps_temporal_id_nesting_flag)
    new_bs += pack ('uint:2,uint:1,uint:5',ptl.general_profile_space, ptl.general_tier_flag,ptl.general_profile_idc)
    for i in range (32) :
        new_bs += pack('uint:1',int(ptl.general_profile_compatibility_flag[i]))
    new_bs += pack ('uint:1',ptl.general_progressive_source_flag)
    new_bs += pack ('uint:1',ptl.general_interlaced_source_flag)
    new_bs += pack ('uint:1',ptl.general_non_packed_constraint_flag)
    new_bs += pack ('uint:1',ptl.general_frame_only_constraint_flag)
    new_bs += pack ('uint:44',ptl.general_reserved_zero_44bits)
    new_bs += pack ('uint:8',ptl.general_level_idc)
    new_bs += pack ('ue',sps_seq_parameter_set_id)
    new_bs += pack ('ue',chroma_format_idc)
    if chroma_format_idc == 3:
        new_bs += pack ('uint:1',separate_colour_plane_flag)
    new_bs += pack ('ue',pic_width_in_luma_samples)
    new_bs += pack ('ue',pic_height_in_luma_samples)
    new_bs += pack ('uint:1',conformance_window_flag)
    if (conformance_window_flag) :
        new_bs += pack ('ue',conf_win_left_offset)
        new_bs += pack ('ue',conf_win_right_offset)
        new_bs += pack ('ue',conf_win_top_offset)
        new_bs += pack ('ue',conf_win_bottom_offset)
    new_bs += pack ('ue',bit_depth_luma_minus8)
    new_bs += pack ('ue',bit_depth_chroma_minus8)
    new_bs += pack ('ue',log2_max_pic_order_cnt_lsb_minus4)
    new_bs += pack ('uint:1',sps_sub_layer_ordering_info_present_flag)
#   for (i = (sps_sub_layer_ordering_info_present_flag ? 0 : sps.max_sub_layers_minus1); i <= sps.max_sub_layers_minus1; i++)
    if sps_sub_layer_ordering_info_present_flag :
            new_bs += pack ('ue',sps_max_dec_pic_buffering_minus1)
            new_bs += pack ('ue',sps_max_num_reorder_pics)
            new_bs += pack ('ue',sps_max_latency_increase_plus1)
    new_bs += pack ('ue',log2_min_luma_coding_block_size_minus3)
    new_bs += pack ('ue',log2_diff_max_min_luma_coding_block_size)
    new_bs += pack ('ue',log2_min_luma_transform_block_size_minus2)
    new_bs += pack ('ue',log2_diff_max_min_luma_transform_block_size)
    new_bs += pack ('ue',max_transform_hierarchy_depth_inter)
    new_bs += pack ('ue',max_transform_hierarchy_depth_intra)
    new_bs += pack ('uint:1',scaling_list_enabled_flag)
    #
    new_bs += pack ('uint:1',amp_enabled_flag)
    new_bs += pack ('uint:1',sample_adaptive_offset_enabled_flag)
    new_bs += pack ('uint:1',pcm_enabled_flag)
    if pcm_enabled_flag :
        new_bs += pack ('uint:4',pcm_sample_bit_depth_luma_minus1)
        new_bs += pack ('uint:4',pcm_sample_bit_depth_chroma_minus1)
        new_bs += pack ('ue',log2_min_pcm_luma_coding_block_size_minus3)
        new_bs += pack ('ue',log2_diff_max_min_pcm_luma_coding_block_size)
        new_bs += pack ('uint:1',pcm_loop_filter_disabled_flag)
    new_bs += pack ('ue',num_short_term_ref_pic_sets)


    if num_short_term_ref_pic_sets :
        for i in range (num_short_term_ref_pic_sets) :
            if i != 0 :
                new_bs += pack ('uint:1',inter_ref_pic_set_prediction_flag)

        
        
            if  not 'inter_ref_pic_set_prediction_flag' in globals() :
                """     
                if i == num_short_term_ref_pic_sets :
                    new_bs += pack ('ue',delta_idx_minus1)
                new_bs += pack ('uint:1', delta_rps_sign)
                new_bs += pack ('ue',abs_delta_rps_minus1)
                for j in range (NumDeltaPocs[i - (delta_idx_minus1 +1)]) :
                    new_bs += pack ('uint:1', used_by_curr_pic_flag[j])
                    if used_by_curr_pic_flag[j] :
                        new_bs += pack ('uint:1',use_delta_flag[j])
        
            else :
                """    
                new_bs += pack ('ue',num_negative_pics)
                new_bs += pack ('ue',num_positive_pics)
                new_bs += [pack ('ue',delta_poc_s0_minus1[_]) for _ in range (num_negative_pics)]
                new_bs += [pack ('uint:1',used_by_curr_pic_s0_flag[_]) for _ in range (num_negative_pics)]
                new_bs += [pack ('ue',delta_poc_s1_minus1[_]) for _ in range(num_positive_pics)]
                new_bs += [pack ('uint:1',used_by_curr_pic_s1_flag[_]) for _ in range(num_positive_pics)]
        

    new_bs += pack ('uint:1',long_term_ref_pics_present_flag)
    if long_term_ref_pics_present_flag :
        new_bs += pack ('ue',num_long_term_ref_pics_sps)
    new_bs += pack ('uint:1',sps_temporal_mvp_enabled_flag)
    new_bs += pack ('uint:1',strong_intra_smoothing_enabled_flag)
    new_bs += pack ('uint:1',vui_parameters_present_flag)
# VUI VP pack Section
    if vui_parameters_present_flag :
       new_bs += pack ('uint:1',vp.aspect_ratio_info_present_flag)
       if vp.aspect_ratio_info_present_flag :
            new_bs += pack ('uint:8',vp.aspect_ratio_idc)
            if vp.aspect_ratio_idc == 255 :
                new_bs += pack ('uint:16',vp.sar_width)
                new_bs += pack ('uint:16',vp.sar_height)
       new_bs += pack ('uint:1',vp.overscan_info_present_flag)
       if vp.overscan_info_present_flag :
           new_bs += pack ('uint:1',vp.overscan_appropriate_flag)
       new_bs += pack ('uint:1',vp.video_signal_type_present_flag)
       if vp.video_signal_type_present_flag :
           new_bs += pack ('uint:3',vp.video_format)
           new_bs += pack ('uint:1',vp.video_full_range_flag)
           new_bs += pack ('uint:1',vp.colour_description_present_flag)
           if vp.colour_description_present_flag :
               new_bs += pack ('uint:8',vp.colour_primaries)
               new_bs += pack ('uint:8',vp.transfer_characteristics)
               new_bs += pack ('uint:8',vp.matrix_coeffs)
       new_bs += pack ('uint:1',vp.chroma_loc_info_present_flag)
       if vp.chroma_loc_info_present_flag :
           new_bs += pack ('ue',vp.chroma_sample_loc_type_top_field)
           new_bs += pack ('ue',vp.chroma_sample_loc_type_bottom_field)
       new_bs += pack ('uint:1',vp.neutral_chroma_indication_flag)
       new_bs += pack ('uint:1',vp.field_seq_flag)
       new_bs += pack ('uint:1',vp.frame_field_info_present_flag)
       new_bs += pack ('uint:1',vp.default_display_window_flag)
       if vp.default_display_window_flag :
           new_bs += pack ('ue',vp.def_disp_win_left_offset)
           new_bs += pack ('ue',vp.def_disp_win_right_offset)
           new_bs += pack ('ue',vp.def_disp_win_top_offset)
           new_bs += pack ('ue',vp.def_disp_win_bottom_offset)
       new_bs += pack ('uint:1',vp.vui_timing_info_present_flag)
       if vp.vui_timing_info_present_flag :
           new_bs += pack ('uint:32',vp.vui_num_units_in_tick)
           new_bs += pack ('uint:32',vp.vui_time_scale)
           new_bs += pack ('uint:1',vp.vui_poc_proportional_to_timing_flag)
           if vp.vui_poc_proportional_to_timing_flag :
               new_bs += pack ('ue',vp.vui_num_ticks_poc_diff_one_minus1)
           new_bs += pack ('uint:1',vp.vui_hrd_parameters_present_flag)
           """
           if( vui_hrd_parameters_present_flag )
           hrd_parameters( 1, sps_max_sub_layers_minus1 )
           """
       new_bs += pack ('uint:1',vp.bitstream_restriction_flag)
       if vp. bitstream_restriction_flag :
           new_bs += pack ('uint:1',vp.tiles_fixed_structure_flag)
           new_bs += pack ('uint:1',vp.motion_vectors_over_pic_boundaries_flag)
           new_bs += pack ('uint:1',vp.restricted_ref_pic_lists_flag)
           new_bs += pack ('ue',vp.min_spatial_segmentation_idc)
           new_bs += pack ('ue',vp.max_bytes_per_pic_denom)
           new_bs += pack ('ue',vp.max_bits_per_min_cu_denom)
           new_bs += pack ('ue',vp.log2_max_mv_length_horizontal)
           new_bs += pack ('ue',vp.log2_max_mv_length_vertical)

    new_bs += pack ('uint:1',sps_extension_present_flag)
    if sps_extension_present_flag :
        new_bs += pack ('uint:1',sps_range_extension_flag)
        new_bs += pack ('uint:1',sps_multilayer_extension_flag)
        new_bs += pack ('uint:1',sps_3d_extension_flag)
        new_bs += pack ('uint:1',sps_extension_5bits)

    new_bs += pack ('uint:1',tb.rbsp_stop_one_bit)
    while len(new_bs) < t.pos :
        new_bs += pack ('uint:1',tb.rbsp_alignment_zero_bit)

#    self.sub_layer_profile_present_flag = []
#    self.sub_layer_level_present_flag = []
#    for i in range(maxNumSubLayersMinus1):
#        self.sub_layer_profile_present_flag.append(t.read('uint:1'))
#        self.sub_layer_level_present_flag.append(t.read('uint:1'))
    
    pre_new_bs = pack ('uint:1,2*uint:6,uint:3', forbidden_zero_bit,nal_unit_type,nuh_layer_id,nuh_temporal_id_plus1)
    new_bs.replace ('0x0000','0x000003',bytealigned=True)
    new_bs = pre_new_bs + new_bs + '0x00'
    nal_t_rep = nal_t[24:]
    repl = s.replace (nal_t_rep,new_bs, bytealigned=True)
    print ('Made modified SPS NALu - OK')
    if sei_ok :
        s.prepend (new_sei_string)
        print ('New SEI prepended')
    print ('Writing new stream...')
    s.tofile(o)
    progr = chunk
    while True:
        s = F.read(chunk)
        o.write(s)
        if progr < os.path.getsize(args.infile):
            print ('\rProgress ',int(round((progr/os.path.getsize(args.infile))*100)),'%',end='')
        progr = progr + chunk
        if not s:
            break
    o.close()
    F.close()
    print ('\rProgress: 100 %')
    print ('=====================')
    print ('Done!')
    print ('')
    print ('File ',args.outfile,' created.')
    sys.exit()
Exemplo n.º 15
0
def decrypt_images(**kwargs):
    path = kwargs.pop('path', 'herders/static/herders/images')
    for im_path in iglob(f'{path}/**/*.png', recursive=True):
        encrypted = BitStream(filename=im_path)

        # Check if it is 'encrypted'. 8th byte is 0x0B instead of the correct signature 0x0A
        encrypted.pos = 0x07 * 8
        signature = encrypted.peek('uint:8')
        if signature == 0x0B:
            print(f'Decrypting {im_path}')
            # Correct the PNG signature
            encrypted.overwrite('0x0A', encrypted.pos)

            # Replace bits with magic decrypted values
            try:
                while True:
                    pos = encrypted.pos
                    val = encrypted.peek('uint:8')
                    encrypted.overwrite(
                        Bits(uint=com2us_decrypt_values[val], length=8), pos)
            except ReadError:
                # EOF
                pass

            # Write it back to the file
            with open(im_path, 'wb') as f:
                encrypted.tofile(f)

            continue

        # Check for weird jpeg format with extra header junk. Convert to png.
        encrypted.pos = 0
        if encrypted.peek('bytes:5') == b'Joker':
            print(f'Converting Joker container JPEG to PNG {im_path}')
            with open(im_path, 'rb') as f:
                bts = f.read()
                first_img = bts.find(b'JFIF')
                second_img = bts.rfind(b'JFIF')
                imgs = []
                if second_img > -1 and first_img != second_img:
                    imgs = [bts[:second_img], bts[second_img:]]
                    # Add Joker & header to immitate new file
                    imgs[1] = imgs[0][imgs[0].find(b'Joker'
                                                   ):first_img] + imgs[1]
                    imgs = [
                        JokerContainerFile(img, read=False) for img in imgs
                    ]
                else:
                    img = JokerContainerFile(bts, read=False)

            # Open it as a jpg and resave to disk
            try:
                if len(imgs) > 1:
                    new_imfile = Image.open(io.BytesIO(imgs[0].data.tobytes()))
                    new_mask = Image.open(io.BytesIO(
                        imgs[1].data.tobytes())).convert('L')
                    new_imfile.putalpha(new_mask)
                else:
                    new_imfile = Image.open(io.BytesIO(img.data.tobytes()))
                new_imfile.save(im_path)
            except IOError:
                print(f'Unable to open {im_path}')
    def create_archives(self):

        try:
            self.width = self.parent.width()
            self.height = self.parent.height()
            self.x = self.parent.x()
            self.y = self.parent.y()
        except:
            self.width = 1920
            self.height = 1080
            self.x = 0
            self.y = 0

        self.progress = QProgressDialog("Reading...", QtCore.QString(), 0,
                                        7600, self.parent)
        self.progress.setWindowModality(Qt.Qt.WindowModal)
        self.progress.setValue(0)
        self.progress.setAutoClose(False)
        self.progress.setMinimumDuration(0)

        USRDIR = os.path.join(common.editor_config.iso_dir, "PSP_GAME",
                              "USRDIR")
        eboot_path = os.path.join(common.editor_config.iso_dir, "PSP_GAME",
                                  "SYSDIR", "EBOOT.BIN")

        eboot = BitStream(filename=eboot_path)
        eboot = eboot_patch.apply_eboot_patches(eboot)

        # So we can loop. :)
        ARCHIVE_INFO = [
            {
                "dir":
                common.editor_config.data00_dir,
                "cpk":
                os.path.join(USRDIR, "data00.cpk"),
                "csv":
                os.path.join(
                    "data",
                    "data00.csv" if not common.editor_config.quick_build else
                    "data00-quick.csv"),
                "name":
                "data00.cpk",
                "pack":
                common.editor_config.pack_data00,
            },
            {
                "dir":
                common.editor_config.data01_dir,
                "cpk":
                os.path.join(USRDIR, "data01.cpk"),
                "csv":
                os.path.join(
                    "data",
                    "data01.csv" if not common.editor_config.quick_build else
                    "data01-quick.csv"),
                "name":
                "data01.cpk",
                "pack":
                common.editor_config.pack_data01,
            },
        ]

        # temp_dir = tempfile.mkdtemp(prefix = "sdse-")
        temp_dir = common.editor_config.build_cache

        for archive in ARCHIVE_INFO:

            if not archive["pack"]:
                continue

            self.progress.setWindowTitle("Building " + archive["name"])

            csv_template_f = open(archive["csv"], "rb")
            csv_template = csv.reader(csv_template_f)

            csv_out_path = os.path.join(temp_dir, "cpk.csv")
            csv_out_f = open(csv_out_path, "wb")
            csv_out = csv.writer(csv_out_f)

            for row in csv_template:
                if len(row) < 4:
                    continue

                base_path = row[0]

                real_path = os.path.join(archive["dir"], base_path)
                out_path = os.path.join(temp_dir, archive["name"], base_path)

                self.progress.setValue(self.progress.value() + 1)
                self.progress.setLabelText("Reading...\n%s" % real_path)

                # All items in the CPK list should be files.
                # Therefore, if we have a directory, then it needs to be packed.
                if os.path.isdir(real_path):
                    if self.__cache_outdated(real_path, out_path):
                        out_dir = os.path.dirname(out_path)
                        try:
                            os.makedirs(out_dir)
                        except:
                            pass

                        data = pack_dir(real_path)
                        with open(out_path, "wb") as out_file:
                            data.tofile(out_file)
                        del data

                elif os.path.isfile(real_path):
                    # If it's a file, though, we can just use it directly.
                    out_path = real_path

                row[0] = out_path
                csv_out.writerow(row)

            csv_template_f.close()
            csv_out_f.close()

            self.__pack_cpk(csv_out_path, archive["cpk"])

        self.progress.setWindowTitle("Building...")
        self.progress.setLabelText("Saving EBOOT.BIN...")
        self.progress.setValue(self.progress.maximum())

        with open(eboot_path, "wb") as f:
            eboot.tofile(f)

        # self.progress.setLabelText("Deleting temporary files...")
        # shutil.rmtree(temp_dir)
        self.progress.close()
 def create_archives(self):
   
   try:
     self.width = self.parent.width()
     self.height = self.parent.height()
     self.x = self.parent.x()
     self.y = self.parent.y()
   except:
     self.width = 1920
     self.height = 1080
     self.x = 0
     self.y = 0
   
   self.progress = QProgressDialog("Reading...", QtCore.QString(), 0, 7600, self.parent)
   self.progress.setWindowModality(Qt.Qt.WindowModal)
   self.progress.setValue(0)
   self.progress.setAutoClose(False)
   self.progress.setMinimumDuration(0)
   
   USRDIR     = os.path.join(common.editor_config.iso_dir, "PSP_GAME", "USRDIR")
   eboot_path = os.path.join(common.editor_config.iso_dir, "PSP_GAME", "SYSDIR", "EBOOT.BIN")
   
   eboot = BitStream(filename = eboot_path)
   eboot = eboot_patch.apply_eboot_patches(eboot)
   
   # So we can loop. :)
   ARCHIVE_INFO = [
     {
       "dir":  common.editor_config.data00_dir,
       "cpk":  os.path.join(USRDIR, "data00.cpk"),
       "csv":  os.path.join("data", "data00.csv" if not common.editor_config.quick_build else "data00-quick.csv"),
       "name": "data00.cpk",
       "pack": common.editor_config.pack_data00,
     },
     {
       "dir":  common.editor_config.data01_dir,
       "cpk":  os.path.join(USRDIR, "data01.cpk"),
       "csv":  os.path.join("data", "data01.csv" if not common.editor_config.quick_build else "data01-quick.csv"),
       "name": "data01.cpk",
       "pack": common.editor_config.pack_data01,
     },
   ]
   
   # temp_dir = tempfile.mkdtemp(prefix = "sdse-")
   temp_dir = common.editor_config.build_cache
   
   for archive in ARCHIVE_INFO:
     
     if not archive["pack"]:
       continue
     
     self.progress.setWindowTitle("Building " + archive["name"])
     
     toc_info = {}
     file_list = None
     
     if archive["toc"]:
       file_list = []
       
       toc = get_toc(eboot, archive["toc"])
       
       for entry in toc:
         filename  = entry["filename"]
         pos_pos   = entry["file_pos_pos"]
         len_pos   = entry["file_len_pos"]
         
         toc_info[filename] = [pos_pos, len_pos]
         file_list.append(filename)
     
     # Causes memory issues if I use the original order, for whatever reason.
     file_list = None
     
     csv_template_f  = open(archive["csv"], "rb")
     csv_template    = csv.reader(csv_template_f)
     
     csv_out_path    = os.path.join(temp_dir, "cpk.csv")
     csv_out_f       = open(csv_out_path, "wb")
     csv_out         = csv.writer(csv_out_f)
     
     for row in csv_template:
       if len(row) < 4:
         continue
       
       base_path = row[0]
       
       real_path = os.path.join(archive["dir"], base_path)
       out_path  = os.path.join(temp_dir, archive["name"], base_path)
       
       self.progress.setValue(self.progress.value() + 1)
       self.progress.setLabelText("Reading...\n%s" % real_path)
       
       # All items in the CPK list should be files.
       # Therefore, if we have a directory, then it needs to be packed.
       if os.path.isdir(real_path):
         if self.__cache_outdated(real_path, out_path):
           out_dir = os.path.dirname(out_path)
           try:
             os.makedirs(out_dir)
           except:
             pass
           
           data = pack_dir(real_path)
           with open(out_path, "wb") as out_file:
             data.tofile(out_file)
           del data
           
       elif os.path.isfile(real_path):
       # If it's a file, though, we can just use it directly.
         out_path = real_path
         
       row[0] = out_path
       csv_out.writerow(row)
     
     csv_template_f.close()
     csv_out_f.close()
     
     self.__pack_cpk(csv_out_path, archive["cpk"])
     
     # We're playing fast and loose with the file count anyway, so why not?
     self.file_count += 1
     self.progress.setValue(self.file_count)
     self.progress.setLabelText("Saving " + archive["name"] + "...")
     
     if archive["toc"]:
       for entry in table_of_contents:
         if not entry in toc_info:
           _LOGGER.warning("%s missing from %s table of contents." % (entry, archive["name"]))
           continue
         
         file_pos  = table_of_contents[entry]["pos"]
         file_size = table_of_contents[entry]["size"]
         
         eboot.overwrite(BitStream(uintle = file_pos, length = 32),  toc_info[entry][0] * 8)
         eboot.overwrite(BitStream(uintle = file_size, length = 32), toc_info[entry][1] * 8)
     
     del table_of_contents
   
   self.progress.setWindowTitle("Building...")
   self.progress.setLabelText("Saving EBOOT.BIN...")
   self.progress.setValue(self.progress.maximum())
   
   with open(eboot_path, "wb") as f:
     eboot.tofile(f)
     
   # Text replacement
   to_replace = eboot_text.get_eboot_text()
   for replacement in to_replace:
   
     orig = bytearray(replacement.orig, encoding = replacement.enc)
     
     # If they left something blank, write the original text back.
     if len(replacement.text) == 0:
       data = orig
     else:
       data = bytearray(replacement.text, encoding = replacement.enc)
     
     pos  = replacement.pos.int + eboot_offset
     
     padding = len(orig) - len(data)
     if padding > 0:
       # Null bytes to fill the rest of the space the original took.
       data.extend(bytearray(padding))
     
     data = ConstBitStream(bytes = data)
     eboot.overwrite(data, pos * 8)
   
   eboot_out = os.path.join(common.editor_config.iso_dir, "PSP_GAME", "SYSDIR", "EBOOT.BIN")
   
   with open(eboot_out, "wb") as f:
     eboot.tofile(f)
   
   self.progress.close()
class GmoFile():
  def __init__(self, data = None, offset = 0, filename = None):
    self.data = None
    self.__gim_files = []
    
    self.gimconv = GimConverter()
    
    if not data == None:
      self.load_data(data, offset)
    elif not filename == None:
      self.load_file(filename)
  
  def load_file(self, filename):
    data = BitStream(filename = filename)
    self.load_data(data)
  
  def load_data(self, data, offset = 0):
    if not data[offset * 8 : offset * 8 + GMO_MAGIC.len] == GMO_MAGIC:
      _LOGGER.error("GMO header not found at 0x%04X." % offset)
      return
    
    data.bytepos = offset + GMO_SIZE_OFFSET
    gmo_size = data.read("uintle:32") + GMO_SIZE_DIFF
    
    self.data = BitStream(data[offset * 8 : (offset + gmo_size) * 8])
    
    self.__find_gims()
  
  def save(self, filename):
    with open(filename, "wb") as f:
      self.data.tofile(f)
  
  def __find_gims(self):
    if self.data == None:
      return
    
    self.__gim_files = []
    
    for gim_start in self.data.findall(GIM_MAGIC, bytealigned = True):
      gim_size_pos  = gim_start + (GIM_SIZE_OFFSET * 8) # Bit pos.
      gim_size      = self.data[gim_size_pos : gim_size_pos + 32].uintle + GIM_SIZE_DIFF
      
      # And turn it into a byte position.
      gim_start /= 8
      self.__gim_files.append((gim_start, gim_size))
  
  def gim_count(self):
    return len(self.__gim_files)
  
  def get_gim(self, gim_id):
    if gim_id >= self.gim_count():
      raise GimIndexError("Invalid GIM ID.")
    
    gim_start, gim_size = self.__gim_files[gim_id]
    gim_data = self.data[gim_start * 8 : (gim_start + gim_size) * 8]
    
    return gim_data
  
  def replace_png_file(self, gim_id, filename, quantize_to_fit = True):
  
    if quantize_to_fit:
      quantize_order = [QuantizeType.auto, QuantizeType.index8, QuantizeType.index4]
    else:
      quantize_order = [QuantizeType.auto]
    quantize_id = 0
    
    (fd, temp_gim) = tempfile.mkstemp(suffix = ".gim", prefix = "sdse-")
    os.close(fd) # Don't need the open file handle.
    
    while True:
      self.gimconv.png_to_gim(filename, temp_gim, quantize_order[quantize_id])
      
      try:
        self.replace_gim_file(gim_id, temp_gim)
      except GimSizeError:
        quantize_id += 1
      except GimIndexError:
        os.remove(temp_gim)
        raise
      else:
        # If we didn't except, that means we succeeded, so we can leave.
        _LOGGER.debug("Quantized PNG to %s" % quantize_order[quantize_id])
        break
      
      if quantize_id > len(quantize_order):
        _LOGGER.error("Unable to convert %s into a GIM small enough to insert." % filename)
        break
    
    os.remove(temp_gim)
  
  def replace_gim_file(self, gim_id, filename):
    gim_data = BitStream(filename = filename)
    self.replace_gim(gim_id, gim_data)
  
  def replace_gim(self, gim_id, gim_data):
    if gim_id >= self.gim_count():
      raise GimIndexError("Invalid GIM ID.")
    
    gim_start, gim_size = self.__gim_files[gim_id]
    
    if gim_data.len / 8 > gim_size:
      raise GimSizeError("GIM too large. %d bytes > %d bytes" % (gim_data.len / 8, gim_size))
      # return
    
    self.data.overwrite(gim_data, gim_start * 8)
    
    # Leave the length alone, though, because we know we have that much space
    # to work with from the original GIM file that was there, and there's no
    # point in shrinking that down if someone happens to want to re-replace
    # this GIM file without reloading the whole thing.
  
  def extract(self, directory, to_png = False):
    if not os.path.isdir(directory):
      os.makedirs(directory)
    
    for id in range(self.gim_count()):
      gim = self.get_gim(id)
      
      out_gim = os.path.join(directory, "%04d.gim" % id)
      out_png = os.path.join(directory, "%04d.png" % id)
      
      with open(out_gim, "wb") as f:
        gim.tofile(f)
      
      if to_png:
        self.gimconv.gim_to_png(out_gim, out_png)
        os.remove(out_gim)
  
  eboot = extend_eboot(eboot)
  
  for patch in EBOOT_PATCHES:
  
    enabled = patch[ENABLED]
    if patch[CFG_ID] and patch[CFG_ID] in common.editor_config.hacks:
      enabled = common.editor_config.hacks[patch[CFG_ID]]
    
    # So we can undo patches if they've already been applied.
    key = PATCH if enabled else ORIG
    
    for item in patch[DATA]:
      eboot.overwrite(item[key], item[POS] * 8)
  
  eboot = apply_sys_lang(eboot)
  eboot = apply_clt_patch(eboot)
  
  return eboot

if __name__ == "__main__":
  src = "Y:\\Danganronpa\\Danganronpa2\\EBOOT-DEC.BIN"
  dst = "Y:\\Danganronpa\\Danganronpa2\\EBOOT-TEST.BIN"
  
  test = BitStream(filename = src)
  test = apply_eboot_patches(test)
  
  with open(dst, "wb") as f:
    test.tofile(f)

### EOF ###
 def create_archives(self):
   
   try:
     self.width = self.parent.width()
     self.height = self.parent.height()
     self.x = self.parent.x()
     self.y = self.parent.y()
   except:
     self.width = 1920
     self.height = 1080
     self.x = 0
     self.y = 0
   
   self.progress = QProgressDialog("Reading...", QtCore.QString(), 0, 7600, self.parent)
   self.progress.setWindowModality(Qt.Qt.WindowModal)
   self.progress.setValue(0)
   self.progress.setAutoClose(False)
   self.progress.setMinimumDuration(0)
   
   USRDIR     = os.path.join(common.editor_config.iso_dir, "PSP_GAME", "USRDIR")
   eboot_path = os.path.join(common.editor_config.iso_dir, "PSP_GAME", "SYSDIR", "EBOOT.BIN")
   
   eboot = BitStream(filename = eboot_path)
   eboot = eboot_patch.apply_eboot_patches(eboot)
   
   # So we can loop. :)
   ARCHIVE_INFO = [
     {
       "dir":  common.editor_config.data00_dir,
       "cpk":  os.path.join(USRDIR, "data00.cpk"),
       "csv":  os.path.join("data", "data00.csv" if not common.editor_config.quick_build else "data00-quick.csv"),
       "name": "data00.cpk",
       "pack": common.editor_config.pack_data00,
     },
     {
       "dir":  common.editor_config.data01_dir,
       "cpk":  os.path.join(USRDIR, "data01.cpk"),
       "csv":  os.path.join("data", "data01.csv" if not common.editor_config.quick_build else "data01-quick.csv"),
       "name": "data01.cpk",
       "pack": common.editor_config.pack_data01,
     },
   ]
   
   # temp_dir = tempfile.mkdtemp(prefix = "sdse-")
   temp_dir = common.editor_config.build_cache
   
   for archive in ARCHIVE_INFO:
     
     if not archive["pack"]:
       continue
     
     self.progress.setWindowTitle("Building " + archive["name"])
     
     csv_template_f  = open(archive["csv"], "rb")
     csv_template    = csv.reader(csv_template_f)
     
     csv_out_path    = os.path.join(temp_dir, "cpk.csv")
     csv_out_f       = open(csv_out_path, "wb")
     csv_out         = csv.writer(csv_out_f)
     
     for row in csv_template:
       if len(row) < 4:
         continue
       
       base_path = row[0]
       
       real_path = os.path.join(archive["dir"], base_path)
       out_path  = os.path.join(temp_dir, archive["name"], base_path)
       
       self.progress.setValue(self.progress.value() + 1)
       self.progress.setLabelText("Reading...\n%s" % real_path)
       
       # All items in the CPK list should be files.
       # Therefore, if we have a directory, then it needs to be packed.
       if os.path.isdir(real_path):
         if self.__cache_outdated(real_path, out_path):
           out_dir = os.path.dirname(out_path)
           try:
             os.makedirs(out_dir)
           except:
             pass
           
           data = pack_dir(real_path)
           with open(out_path, "wb") as out_file:
             data.tofile(out_file)
           del data
           
       elif os.path.isfile(real_path):
         # If it's a file, though, we can just use it directly.
         out_path = real_path
         
       row[0] = out_path
       csv_out.writerow(row)
     
     csv_template_f.close()
     csv_out_f.close()
     
     self.__pack_cpk(csv_out_path, archive["cpk"])
   
   self.progress.setWindowTitle("Building...")
   self.progress.setLabelText("Saving EBOOT.BIN...")
   self.progress.setValue(self.progress.maximum())
   
   with open(eboot_path, "wb") as f:
     eboot.tofile(f)
   
   # self.progress.setLabelText("Deleting temporary files...")
   # shutil.rmtree(temp_dir)
   self.progress.close()
    def setup_workspace(self):
        data0 = os.path.join(self.iso_dir, DATA0_CPK)

        self.generate_directories()

        progress = QProgressDialog("", QtCore.QString(), 0, 11000, self)
        progress.setWindowTitle("Setting up workspace...")
        progress.setWindowModality(Qt.Qt.WindowModal)
        progress.setMinimumDuration(0)
        progress.setValue(0)
        progress.setAutoClose(False)
        progress.setAutoReset(False)

        progress.setLabelText("Creating directories...")

        # Do the easy stuff first.
        if not os.path.isdir(self.changes_dir):
            os.makedirs(self.changes_dir)
        progress.setValue(progress.value() + 1)

        if not os.path.isdir(self.backup_dir):
            os.makedirs(self.backup_dir)
        progress.setValue(progress.value() + 1)

        thread_fns = [{"target": extract_cpk, "kwargs": {"filename": data0, "out_dir": self.data0_dir}}]

        # Going to capture stdout because I don't feel like
        # rewriting the extract functions to play nice with GUI.
        stdout = sys.stdout
        sys.stdout = cStringIO.StringIO()

        for thread_fn in thread_fns:
            thread = threading.Thread(**thread_fn)
            thread.start()

            while thread.isAlive():
                thread.join(THREAD_TIMEOUT)

                output = [line for line in sys.stdout.getvalue().split("\n") if len(line) > 0]
                progress.setValue(progress.value() + len(output))
                if len(output) > 0:
                    progress.setLabelText("Extracting %s..." % output[-1])

                sys.stdout = cStringIO.StringIO()

        sys.stdout = stdout

        # Give us an ISO directory for the editor to place modified files in.
        progress.setLabelText("Copying ISO files...")

        # ISO directory needs to not exist for copytree.
        if os.path.isdir(self.edited_iso_dir):
            shutil.rmtree(self.edited_iso_dir)

        # One more thing we want threaded so it doesn't lock up the GUI.
        thread = threading.Thread(target=shutil.copytree, kwargs={"src": self.iso_dir, "dst": self.edited_iso_dir})
        thread.start()

        while thread.isAlive():
            thread.join(THREAD_TIMEOUT)
            progress.setLabelText("Copying ISO files...")
            # It has to increase by some amount or it won't update and the UI will lock up.
            progress.setValue(progress.value() + 1)

        # shutil.copytree(self.iso_dir, self.edited_iso_dir)
        progress.setValue(progress.value() + 1)

        # Files we want to make blank, because they're unnecessary.
        blank_files = [
            os.path.join(self.edited_iso_dir, "PSP_GAME", "SYSDIR", "UPDATE", "DATA.BIN"),
            os.path.join(self.edited_iso_dir, "PSP_GAME", "SYSDIR", "UPDATE", "EBOOT.BIN"),
            os.path.join(self.edited_iso_dir, "PSP_GAME", "SYSDIR", "UPDATE", "PARAM.SFO"),
        ]

        for blank in blank_files:
            with open(blank, "wb") as f:
                pass

        # Copy the decrypted EBOOT into the ISO folder and apply our hacks to it.
        progress.setLabelText("Hacking EBOOT...")
        progress.setValue(progress.value() + 1)

        hacked_eboot = BitStream(filename=self.eboot_path)
        hacked_eboot = apply_eboot_patches(hacked_eboot)
        with open(os.path.join(self.edited_iso_dir, "PSP_GAME", "SYSDIR", "EBOOT.BIN"), "wb") as f:
            hacked_eboot.tofile(f)
        # shutil.copy(self.eboot_path, os.path.join(self.edited_iso_dir, "PSP_GAME", "SYSDIR", "EBOOT.BIN"))

        progress.setLabelText("Extracting editor data...")
        progress.setValue(progress.value() + 1)

        # Extract the editor data.
        editor_data = zipfile.ZipFile("data/editor_data.zip", "r")
        editor_data.extractall(self.editor_data_dir)
        editor_data.close()

        progress.setValue(progress.maximum())
        progress.close()

        self.ui.grpStep4.setEnabled(False)
        self.ui.grpStep5.setEnabled(True)
Exemplo n.º 22
0
class GmoFile():
    def __init__(self, data=None, offset=0, filename=None):
        self.data = None
        self.__gim_files = []

        self.gimconv = GimConverter()

        if not data == None:
            self.load_data(data, offset)
        elif not filename == None:
            self.load_file(filename)

    def load_file(self, filename):
        data = BitStream(filename=filename)
        self.load_data(data)

    def load_data(self, data, offset=0):
        if not data[offset * 8:offset * 8 + GMO_MAGIC.len] == GMO_MAGIC:
            _LOGGER.error("GMO header not found at 0x%04X." % offset)
            return

        data.bytepos = offset + GMO_SIZE_OFFSET
        gmo_size = data.read("uintle:32") + GMO_SIZE_DIFF

        self.data = BitStream(data[offset * 8:(offset + gmo_size) * 8])

        self.__find_gims()

    def save(self, filename):
        with open(filename, "wb") as f:
            self.data.tofile(f)

    def __find_gims(self):
        if self.data == None:
            return

        self.__gim_files = []

        for gim_start in self.data.findall(GIM_MAGIC, bytealigned=True):
            gim_size_pos = gim_start + (GIM_SIZE_OFFSET * 8)  # Bit pos.
            gim_size = self.data[gim_size_pos:gim_size_pos +
                                 32].uintle + GIM_SIZE_DIFF

            # And turn it into a byte position.
            gim_start /= 8
            self.__gim_files.append((gim_start, gim_size))

    def gim_count(self):
        return len(self.__gim_files)

    def get_gim(self, gim_id):
        if gim_id >= self.gim_count():
            raise GimIndexError("Invalid GIM ID.")

        gim_start, gim_size = self.__gim_files[gim_id]
        gim_data = self.data[gim_start * 8:(gim_start + gim_size) * 8]

        return gim_data

    def replace_png_file(self, gim_id, filename, quantize_to_fit=True):

        if quantize_to_fit:
            quantize_order = [
                QuantizeType.auto, QuantizeType.index8, QuantizeType.index4
            ]
        else:
            quantize_order = [QuantizeType.auto]
        quantize_id = 0

        (fd, temp_gim) = tempfile.mkstemp(suffix=".gim", prefix="sdse-")
        os.close(fd)  # Don't need the open file handle.

        while True:
            self.gimconv.png_to_gim(filename, temp_gim,
                                    quantize_order[quantize_id])

            try:
                self.replace_gim_file(gim_id, temp_gim)
            except GimSizeError:
                quantize_id += 1
            except GimIndexError:
                os.remove(temp_gim)
                raise
            else:
                # If we didn't except, that means we succeeded, so we can leave.
                _LOGGER.debug("Quantized PNG to %s" %
                              quantize_order[quantize_id])
                break

            if quantize_id > len(quantize_order):
                _LOGGER.error(
                    "Unable to convert %s into a GIM small enough to insert." %
                    filename)
                break

        os.remove(temp_gim)

    def replace_gim_file(self, gim_id, filename):
        gim_data = BitStream(filename=filename)
        self.replace_gim(gim_id, gim_data)

    def replace_gim(self, gim_id, gim_data):
        if gim_id >= self.gim_count():
            raise GimIndexError("Invalid GIM ID.")

        gim_start, gim_size = self.__gim_files[gim_id]

        if gim_data.len / 8 > gim_size:
            raise GimSizeError("GIM too large. %d bytes > %d bytes" %
                               (gim_data.len / 8, gim_size))
            # return

        self.data.overwrite(gim_data, gim_start * 8)

        # Leave the length alone, though, because we know we have that much space
        # to work with from the original GIM file that was there, and there's no
        # point in shrinking that down if someone happens to want to re-replace
        # this GIM file without reloading the whole thing.

    def extract(self, directory, to_png=False):
        if not os.path.isdir(directory):
            os.makedirs(directory)

        for id in range(self.gim_count()):
            gim = self.get_gim(id)

            out_gim = os.path.join(directory, "%04d.gim" % id)
            out_png = os.path.join(directory, "%04d.png" % id)

            with open(out_gim, "wb") as f:
                gim.tofile(f)

            if to_png:
                self.gimconv.gim_to_png(out_gim, out_png)
                os.remove(out_gim)
Exemplo n.º 23
0
  
  return eboot, HEADER_EXTEND_SIZE

def apply_eboot_patches(eboot):
  
  eboot, offset = extend_eboot(eboot)
  
  for patch in EBOOT_PATCHES:
  
    enabled = patch[ENABLED]
    if patch[CFG_ID] and patch[CFG_ID] in common.editor_config.hacks:
      enabled = common.editor_config.hacks[patch[CFG_ID]]
    
    # So we can undo patches if they've already been applied.
    key = PATCH if enabled else ORIG
    
    for item in patch[DATA]:
      eboot.overwrite(item[key], item[POS] * 8)
  
  eboot = apply_sys_lang(eboot)
  return eboot, offset

if __name__ == "__main__":
  src = "X:\\Danganronpa\\Danganronpa_BEST\\EBOOT-ORIG.BIN"
  dst = "X:\\Danganronpa\\Danganronpa_BEST\\EBOOT-TEST.BIN"
  test = BitStream(filename = src)
  test, offset = apply_eboot_patches(test)
  with open(dst, "wb") as f:
    test.tofile(f)

### EOF ###
    def create_archives(self):

        try:
            self.width = self.parent.width()
            self.height = self.parent.height()
            self.x = self.parent.x()
            self.y = self.parent.y()
        except:
            self.width = 1920
            self.height = 1080
            self.x = 0
            self.y = 0

        self.file_count = 0

        self.progress = QProgressDialog("Reading...", QtCore.QString(), 0,
                                        72000, self.parent)
        self.progress.setWindowModality(Qt.Qt.WindowModal)
        self.progress.setValue(0)
        self.progress.setAutoClose(False)
        self.progress.setMinimumDuration(0)

        # with open(common.editor_config.eboot_orig, "rb") as f:
        with open(
                os.path.join(common.editor_config.iso_dir, "PSP_GAME",
                             "SYSDIR", "EBOOT.BIN"), "rb") as f:
            eboot = BitStream(bytes=f.read())

        eboot, eboot_offset = eboot_patch.apply_eboot_patches(eboot)

        USRDIR = os.path.join(common.editor_config.iso_dir, "PSP_GAME",
                              "USRDIR")

        # So we can loop. :)
        ARCHIVE_INFO = [
            {
                "toc": UMDIMAGES.umdimage,
                "dir": common.editor_config.umdimage_dir,
                "dat": os.path.join(USRDIR, "umdimage.dat"),
                "name": "umdimage.dat",
                "pack": common.editor_config.pack_umdimage,
                "eof": False,
            },
            {
                "toc": UMDIMAGES.umdimage2,
                "dir": common.editor_config.umdimage2_dir,
                "dat": os.path.join(USRDIR, "umdimage2.dat"),
                "name": "umdimage2.dat",
                "pack": common.editor_config.pack_umdimage2,
                "eof": False,
            },
            {
                "toc": None,
                "dir": common.editor_config.voice_dir,
                "dat": os.path.join(USRDIR, "voice.pak"),
                "name": "voice.pak",
                "pack": common.editor_config.pack_voice,
                "eof": True,
            },
            {
                "toc": None,
                "dir": common.editor_config.bgm_dir,
                "dat": os.path.join(USRDIR, "bgm.pak"),
                "name": "bgm.pak",
                "pack": common.editor_config.pack_bgm,
                "eof": True,
            },
        ]

        for archive in ARCHIVE_INFO:

            if not archive["pack"]:
                continue

            self.progress.setWindowTitle("Building " + archive["name"])

            toc_info = {}
            file_list = None

            if archive["toc"]:
                file_list = []

                toc = get_toc(eboot, archive["toc"])

                for entry in toc:
                    filename = entry["filename"]
                    pos_pos = entry["file_pos_pos"]
                    len_pos = entry["file_len_pos"]

                    toc_info[filename] = [pos_pos, len_pos]
                    file_list.append(filename)

            # Causes memory issues if I use the original order, for whatever reason.
            file_list = None

            with io.FileIO(archive["dat"], "w") as handler:
                table_of_contents = self.pack_dir(archive["dir"],
                                                  handler,
                                                  file_list=file_list,
                                                  eof=archive["eof"])

            # We're playing fast and loose with the file count anyway, so why not?
            self.file_count += 1
            self.progress.setValue(self.file_count)
            self.progress.setLabelText("Saving " + archive["name"] + "...")

            if archive["toc"]:
                for entry in table_of_contents:
                    if not entry in toc_info:
                        _LOGGER.warning(
                            "%s missing from %s table of contents." %
                            (entry, archive["name"]))
                        continue

                    file_pos = table_of_contents[entry]["pos"]
                    file_size = table_of_contents[entry]["size"]

                    eboot.overwrite(BitStream(uintle=file_pos, length=32),
                                    toc_info[entry][0] * 8)
                    eboot.overwrite(BitStream(uintle=file_size, length=32),
                                    toc_info[entry][1] * 8)

            del table_of_contents

        self.progress.setLabelText("Saving EBOOT.BIN...")
        self.progress.setValue(self.progress.maximum())

        # Text replacement
        to_replace = eboot_text.get_eboot_text()
        for replacement in to_replace:

            orig = bytearray(replacement.orig, encoding=replacement.enc)

            # If they left something blank, write the original text back.
            if len(replacement.text) == 0:
                data = orig
            else:
                data = bytearray(replacement.text, encoding=replacement.enc)

            pos = replacement.pos.int + eboot_offset

            padding = len(orig) - len(data)
            if padding > 0:
                # Null bytes to fill the rest of the space the original took.
                data.extend(bytearray(padding))

            data = ConstBitStream(bytes=data)
            eboot.overwrite(data, pos * 8)

        eboot_out = os.path.join(common.editor_config.iso_dir, "PSP_GAME",
                                 "SYSDIR", "EBOOT.BIN")

        with open(eboot_out, "wb") as f:
            eboot.tofile(f)

        self.progress.close()
class ModelPak():
  
  def __init__(self, filename = None):
    self.__data = None
    self.__gmo_files = []
    
    if filename:
      self.load_file(filename)
  
  def load_file(self, filename):
    data = BitStream(filename = filename)
    self.load_data(data)
  
  def load_data(self, data):
    files = [entry_data for (entry_name, entry_data) in get_pak_files(data)]
    
    # There are always at least four files in a model pak.
    # The first three I don't know a lot about, and then
    # the GMO files come after that.
    if len(files) < 4:
      _LOGGER.error("Invalid model PAK. %d files found, but at least 4 needed." % len(files))
      return
    
    # The name pak contains a list of null-terminated names for
    # each of the models, stored in our standard pak format.
    name_pak = files[0]
    names    = [entry_data.bytes.strip('\0') for (entry_name, entry_data) in get_pak_files(name_pak)]
    
    # Most of the model paks in SDR2 have a fourth unknown file before the models
    # start, so we'll just take everything from the back end and call it a day.
    models = files[-len(names):]
    
    # Now, we don't get file positions from the unpacker, so let's find those
    # and start filling out our internal list of GMO files.
    file_starts, file_ends = parse_pak_toc(data)
    model_starts = file_starts[-len(names):]
    
    for i, model in enumerate(models):
      # First of all, not all of the "models" present are actually GMO files.
      # It's rare, but there is the occasional other unknown format.
      # So let's make sure we have a GMO file.
      if not model[:GMO_MAGIC.len] == GMO_MAGIC:
        # print i, "Not a GMO."
        continue
      
      name  = names[i]
      gmo   = GmoFile(data = model)
      size  = model.len / 8
      start = model_starts[i]
      
      self.__gmo_files.append({
        _NAME:   name,
        _START:  start,
        _SIZE:   size,
        _DATA:   gmo,
      })
    
    self.__data = BitStream(data)
  
  def save(self, filename):
    self.__update_data()
    with open(filename, "wb") as f:
      self.__data.tofile(f)
  
  def __update_data(self):
    for gmo in self.__gmo_files:
      start = gmo[_START] * 8
      data  = gmo[_DATA].data
      
      self.__data.overwrite(data, start)
  
  def get_data(self):
    self.__update_data()
    return self.__data
  
  def gmo_count(self):
    return len(self.__gmo_files)
    
  def get_gmo(self, index):
    if index >= self.gmo_count() or index == None:
      _LOGGER.error("Invalid GMO ID %d." % index)
      return None
    
    return self.__gmo_files[index][_DATA]
  
  def get_gmos(self):
    return [gmo[_DATA] for gmo in self.__gmo_files]
  
  def get_name(self, index):
    if index >= self.gmo_count():
      _LOGGER.error("Invalid GMO ID %d." % index)
      return None
    
    return self.__gmo_files[index][_NAME]
  
  def get_names(self):
    return [gmo[_NAME] for gmo in self.__gmo_files]
  
  def id_from_name(self, name):
    for i in range(self.gmo_count()):
      if self.__gmo_files[i][_NAME] == name:
        return i
    
    return None
  
  def gmo_from_name(self, name):
    id = self.id_from_name(name)
    
    if id:
      return self.get_gmo(id)
    else:
      return None
  
  def replace_gmo_file(self, index, filename):
    gmo = GmoFile(filename = filename)
    self.replace_gmo(index, gmo)
    
  def replace_gmo(self, index, new_gmo):
    if index >= self.gmo_count():
      _LOGGER.error("Invalid GMO ID %d." % index)
      return None
    
    gmo = self.__gmo_files[index]
    
    if new_gmo.data.len / 8 > gmo[_SIZE]:
      _LOGGER.error("GMO too large to insert. %d bytes > %d bytes" % (new_gmo.data.len / 8, gmo[_SIZE]))
      return
    
    self.__gmo_files[index][_DATA] = new_gmo
    
    # Leave the length alone, though, because we know we have that much space
    # to work with from the original GMO file that was there, and there's no
    # point in shrinking that down if someone happens to want to re-replace
    # this GMO file without reloading the whole thing.
  
  def extract(self, directory, to_png = False):
    if not os.path.isdir(directory):
      os.makedirs(directory)
    
    for id in range(self.gmo_count()):
      gmo  = self.get_gmo(id)
      name = self.get_name(id)
      
      if gmo.gim_count() == 0:
        continue
      
      out_dir = os.path.join(directory, name)
      gmo.extract(out_dir, to_png)
Exemplo n.º 26
0
class Page:
    """
    create a page of size self.pagesize in memory
    """
    def __init__(self, size, capacityFunction, entrySize=15):
        """
        @param capacityFunction: function to find the capacity of the page
        """
        self.pSize = size
        self.memPage = BitStream(self.pSize * BYTESIZE)

        # store a list of entries for this page
        self.entries = []
        # size of (key, rowid), default to 15 bytes
        self.entrySize = entrySize
        self.entriesCapacity = capacityFunction(self.pSize, self.entrySize)
        self.spaceRemaining = self.entriesCapacity

    def write(self, byteContents, bytePos):
        """write the bytecontents to atPos in self.memPage
        -bytePos is relative to the beginning of this page
        -internally, it will also advance the pointer upto the bytePos * 8 + byteLength(byteContents)
        """
        # NOTE: overwrite the contents at bytePos of this memory page with byteContents
        self.memPage.overwrite(byteContents, bytePos * BYTESIZE)

    def writeToFile(self, filePointer):
        """write the page to file"""
        self.memPage.tofile(filePointer)

    def read(self, bytePos, formatString):
        """read the content from bytePos and convert to formatString"""
        self.memPage.bytepos = bytePos
        return self.memPage.read(formatString)

    def tell(self):
        """return the byte position of the page currently in"""
        return self.memPage.bytepos

    def seek(self, absBytePos, relative=0):
        """seek to the certain byte position"""
        if relative:
            self.memPage.bytepos += absBytePos
        else:
            self.memPage.bytepos = absBytePos

    def rawPage(self):
        """return only the contents of a page"""
        return self.memPage.bytes

    def writeEntriesToPage(self, offset=0):
        """write all the raw entries in self.entries to the raw page in this object inheriate from Page"""
        self.seek(offset)
        # note, if the entry list is empty ==> automatically handles
        for entry in self.entries:
            writableEntry = entry.parseToBytes()
            self.write(writableEntry, self.tell())

    def insertEntry(self, entry):
        """
        @param dirEntry: a directory entry object
        """
        # constraint on the capacity
        if self.spaceRemaining <= 0:
            return 0

        self.spaceRemaining -= 1
        self.entries.append(entry)
        return 1

    def replaceEntries(self, entries):
        """overwrite the list of existing entries with entries
        
        - if the original entry list is bigger ==> now is shrinked
        - if the original entry list is smaller ==> now gets bigger
            assume that the size of the entries list is smaller than the entriesCapacity
        """
        size = len(entries)

        assert (size <= self.entriesCapacity)
        self.spaceRemaining = self.entriesCapacity - size

        assert (isinstance(entries, list))
        self.entries = entries

    def getSpaceRemaining(self):
        return self.spaceRemaining

    def getEntriesCapacity(self):
        return self.entriesCapacity

    def getEntrySize(self):
        return self.entrySize

    def allEntries(self):
        return self.entries

    def hasEntries(self):
        return self.getSpaceRemaining() < self.getEntriesCapacity()

    def numRecords(self):
        return len(self.entries)
 def setup_workspace(self):
   umdimage  = os.path.join(self.iso_dir, UMDIMAGE_DAT)
   umdimage2 = os.path.join(self.iso_dir, UMDIMAGE2_DAT)
   voice     = os.path.join(self.iso_dir, VOICE_PAK)
   
   self.generate_directories()
   
   progress = QProgressDialog("", QtCore.QString(), 0, 11000, self)
   progress.setWindowTitle("Setting up workspace...")
   progress.setWindowModality(Qt.Qt.WindowModal)
   progress.setMinimumDuration(0)
   progress.setValue(0)
   progress.setAutoClose(False)
   progress.setAutoReset(False)
   
   progress.setLabelText("Creating directories...")
   
   # Do the easy stuff first.
   if not os.path.isdir(self.changes_dir):
     os.makedirs(self.changes_dir)
   progress.setValue(progress.value() + 1)
   
   if not os.path.isdir(self.backup_dir):
     os.makedirs(self.backup_dir)
   progress.setValue(progress.value() + 1)
   
   if not os.path.isdir(self.editor_data_dir):
     os.makedirs(self.editor_data_dir)
   progress.setValue(progress.value() + 1)
   
   thread_fns = [
     {"target": extract_umdimage, "kwargs": {"filename": umdimage,  "out_dir": self.umdimage_dir,  "eboot": self.eboot_path, "umdimage": UMDIMAGES.umdimage}},
     {"target": extract_umdimage, "kwargs": {"filename": umdimage2, "out_dir": self.umdimage2_dir, "eboot": self.eboot_path, "umdimage": UMDIMAGES.umdimage2}},
     {"target": extract_pak,      "kwargs": {"filename": voice,     "out_dir": self.voice_dir}},
   ]
   
   # Going to capture stdout because I don't feel like
   # rewriting the extract functions to play nice with GUI.
   stdout      = sys.stdout
   sys.stdout  = cStringIO.StringIO()
   
   for thread_fn in thread_fns:
     thread = threading.Thread(**thread_fn)
     thread.start()
     
     while thread.isAlive():
       thread.join(THREAD_TIMEOUT)
       
       output = [line for line in sys.stdout.getvalue().split('\n') if len(line) > 0]
       progress.setValue(progress.value() + len(output))
       if len(output) > 0:
         progress.setLabelText("Extracting %s..." % output[-1])
       
       sys.stdout = cStringIO.StringIO()
   
   sys.stdout = stdout
   
   # Give us an ISO directory for the editor to place modified files in.
   progress.setLabelText("Copying ISO files...")
   
   # ISO directory needs to not exist for copytree.
   if os.path.isdir(self.edited_iso_dir):
     shutil.rmtree(self.edited_iso_dir)
   
   # One more thing we want threaded so it doesn't lock up the GUI.
   thread = threading.Thread(target = shutil.copytree, kwargs = {"src": self.iso_dir, "dst": self.edited_iso_dir})
   thread.start()
   
   while thread.isAlive():
     thread.join(THREAD_TIMEOUT)
     progress.setLabelText("Copying ISO files...")
     # It has to increase by some amount or it won't update and the UI will lock up.
     progress.setValue(progress.value() + 1)
     
   # shutil.copytree(self.iso_dir, self.edited_iso_dir)
   progress.setValue(progress.value() + 1)
   
   # Files we want to make blank, because they're unnecessary.
   blank_files = [
     os.path.join(self.edited_iso_dir, "PSP_GAME", "INSDIR", "UMDIMAGE.DAT"),
     os.path.join(self.edited_iso_dir, "PSP_GAME", "SYSDIR", "UPDATE", "DATA.BIN"),
     os.path.join(self.edited_iso_dir, "PSP_GAME", "SYSDIR", "UPDATE", "EBOOT.BIN"),
     os.path.join(self.edited_iso_dir, "PSP_GAME", "SYSDIR", "UPDATE", "PARAM.SFO"),
   ]
   
   for blank in blank_files:
     with open(blank, "wb") as f:
       pass
   
   # Copy the decrypted EBOOT into the ISO folder and apply our hacks to it.
   progress.setLabelText("Hacking EBOOT...")
   progress.setValue(progress.value() + 1)
   
   hacked_eboot = BitStream(filename = self.eboot_path)
   hacked_eboot, offset = apply_eboot_patches(hacked_eboot)
   with open(os.path.join(self.edited_iso_dir, "PSP_GAME", "SYSDIR", "EBOOT.BIN"), "wb") as f:
     hacked_eboot.tofile(f)
   # shutil.copy(self.eboot_path, os.path.join(self.edited_iso_dir, "PSP_GAME", "SYSDIR", "EBOOT.BIN"))
   
   progress.setLabelText("Extracting editor data...")
   progress.setValue(progress.value() + 1)
   
   # Extract the editor data.
   editor_data = zipfile.ZipFile("data/editor_data.zip", "r")
   editor_data.extractall(self.editor_data_dir)
   editor_data.close()
   
   progress.setValue(progress.maximum())
   progress.close()
   
   self.ui.grpStep4.setEnabled(False)
   self.ui.grpStep5.setEnabled(True)
Exemplo n.º 28
0
# coding: utf-8

from bitstring import BitStream

s = BitStream(filename="t.bmp")
s.pos = 0x436 * 8

while s.pos <= s.len - 7:
    s.insert('0b0', s.pos)
    s.pos += 7

f = file('gen.bmp', 'wb')
s.tofile(f)
f.close()
print 'Done'