Beispiel #1
0
def build_clone_lists(input_dat):
    """ Formats a clone list appriopriately """

    # Import JSON files that have the same name as dat_name + .json
    remove_string = ' \((Parent-Clone|J64|ROM|Decrypted|Encrypted|BigEndian|ByteSwapped)\)'
    if re.search(remove_string, input_dat.name) != None:
        dat_name = re.sub(remove_string, '', input_dat.name)
    else:
        dat_name = input_dat.name

    if 'GameCube' in dat_name and ('NKit GCZ' in dat_name or 'NKit ISO'
                                   in dat_name or 'NASOS' in dat_name):
        clone_file = './clonelists/Nintendo - GameCube.json'
    elif 'Wii U' in dat_name and 'WUX' in dat_name:
        clone_file = './clonelists/Nintendo - Wii U.json'
    elif 'Wii' in dat_name and ('NKit GCZ' in dat_name or 'NKit ISO'
                                in dat_name or 'NASOs' in dat_name):
        clone_file = './clonelists/Nintendo - Wii.json'
    elif ('PlayStation Portable' in dat_name and '(PSN)' not in dat_name
          and '(PSX2PSP)' not in dat_name and '(UMD Music)' not in dat_name
          and '(UMD Video)' not in dat_name):
        if 'no-intro' in input_dat.url:
            clone_file = './clonelists/Sony - PlayStation Portable (No-Intro).json'
        elif 'redump' in input_dat.url:
            clone_file = './clonelists/Sony - PlayStation Portable (Redump).json'
    else:
        clone_file = './clonelists/' + dat_name + '.json'
    if os.path.exists(clone_file) == True and os.path.isfile(
            clone_file) == True:
        try:
            with open(clone_file, 'r') as input_file_read:
                clonedata = json.load(input_file_read)

        except OSError as e:
            print(f'\n{Font.error_bold}* Error: {Font.end}{str(e)}\n')
            raise

        except ValueError as e:
            printwrap(
                f'\n{Font.error_bold}* Error: "{os.path.abspath(clone_file)}"{Font.error} isn\'t valid JSON. Exiting...{Font.end}',
                'error')
            print('\n')
            raise

        compilations = set()
        conditional_overrides = {}
        overrides = {}
        renames = {}

        if 'compilations' in clonedata:
            compilations.update(clonedata['compilations'])
        if 'overrides' in clonedata:
            overrides = clonedata['overrides']
        if 'conditional_overrides' in clonedata:
            conditional_overrides = clonedata['conditional_overrides']
        if 'renames' in clonedata:
            renames = clonedata['renames']

        return CloneList(compilations, overrides, conditional_overrides,
                         renames)
Beispiel #2
0
def build_regions(REGIONS):
    """ Imports regions and languages from a file """

    regions = Regions()

    if (os.path.exists(REGIONS.filename) == True
            and os.path.isfile(REGIONS.filename) == True):
        try:
            with open(REGIONS.filename, 'r') as input_file_read:
                regiondata = json.load(input_file_read)

                # Set the implied languages
                if REGIONS.region_order in regiondata:
                    for region, language in regiondata[
                            REGIONS.region_order].items():
                        regions.all.append(region)
                    regions.implied_language = regiondata[REGIONS.region_order]

                    # Set the default region order
                    regions.region_order = [
                        region for region in regiondata[REGIONS.region_order]
                    ]
                else:
                    printwrap(
                        f'{Font.error_bold}* Error: {Font.error}The {Font.bold}'
                        f'{REGIONS.region_order}{Font.error} key is missing '
                        f'from {Font.bold}{REGIONS.filename}{Font.error}. It\'s needed '
                        'for Retool to know what regions and languages are available.'
                        f'{Font.end}', 'error')
                    sys.exit()

                # Set the other language details
                if REGIONS.languages in regiondata:
                    regions.languages_key = regiondata[REGIONS.languages]
                    regions.languages_long = [
                        language for language in regiondata[REGIONS.languages]
                    ]
                    regions.languages_short = []
                    for long_language, short_language in regiondata[
                            REGIONS.languages].items():
                        regions.languages_short.append(short_language)

                input_file_read.close()
                return regions

        except OSError as e:
            print(f'\n{Font.error_bold}* Error: {Font.end}{str(e)}\n')
            raise
    else:
        printwrap(
            f'{Font.error_bold}* Error: {Font.error}The {Font.bold}'
            f'{REGIONS.filename}{Font.error} file is missing. Retool can\'t continue.'
            f'{Font.end}', 'error')
        sys.exit()
Beispiel #3
0
def build_tags(TAGS):
    """ Imports a list of tags from a file, to strip from titles during processing """

    tag_strings = Tags()

    if (os.path.exists(TAGS.filename) == True
            and os.path.isfile(TAGS.filename) == True):
        try:
            with open(TAGS.filename, 'r') as input_file_read:
                tag_file = json.load(input_file_read)

                if TAGS.ignore in tag_file:
                    tag_strings.ignore = tag_file[TAGS.ignore]
                    for tag in tag_file[TAGS.demote_editions] + tag_file[
                            TAGS.promote_editions]:
                        if tag not in tag_file[TAGS.ignore]:
                            tag_strings.ignore.append(tag)
                if TAGS.disc_rename in tag_file:
                    tag_strings.disc_rename = tag_file[TAGS.disc_rename]
                if TAGS.promote_editions in tag_file:
                    tag_strings.promote_editions = tag_file[
                        TAGS.promote_editions]
                if TAGS.demote_editions in tag_file:
                    tag_strings.demote_editions = tag_file[
                        TAGS.demote_editions]

                # Error handling
                for section in [
                        TAGS.ignore, TAGS.disc_rename, TAGS.promote_editions,
                        TAGS.demote_editions
                ]:
                    if section not in tag_file:
                        printwrap(
                            f'{Font.warning}* The {Font.bold}{section}{Font.warning}'
                            f' key is missing from {Font.bold}{TAGS.filename}'
                            f'{Font.warning}. Clone matching won\'t be accurate.'
                            f'{Font.end}')
                input_file_read.close()

                return tag_strings

        except OSError as e:
            print(f'\n{Font.error_bold}* Error: {Font.end}{str(e)}\n')
            raise
    else:
        printwrap(
            f'{Font.error_bold}* Error:{Font.error} The {Font.bold}{TAGS.filename}'
            f'{Font.error} file is missing.{Font.end}')
        sys.exit()
Beispiel #4
0
def build_clone_lists(dat_name):
    """ Formats a clone list appriopriately """

    # Import JSON files that have the same name as dat_name + .json
    if 'GameCube' in dat_name and ('NKit GCZ' in dat_name or 'NKit ISO'
                                   in dat_name or 'NASOS' in dat_name):
        clone_file = './clonelists/Nintendo - GameCube.json'
    elif 'Wii U' in dat_name and 'WUX' in dat_name:
        clone_file = './clonelists/Nintendo - Wii U.json'
    elif 'Wii' in dat_name and ('NKit GCZ' in dat_name or 'NKit ISO'
                                in dat_name or 'NASOs' in dat_name):
        clone_file = './clonelists/Nintendo - Wii.json'
    else:
        clone_file = './clonelists/' + dat_name + '.json'
    if os.path.exists(clone_file) == True and os.path.isfile(
            clone_file) == True:
        try:
            with open(clone_file, 'r') as input_file_read:
                clonedata = json.load(input_file_read)

        except OSError as e:
            print(f'\n{Font.error_bold}* Error: {Font.end}{str(e)}\n')
            raise

        except ValueError as e:
            printwrap(
                f'\n{Font.error_bold}* Error: "{os.path.abspath(clone_file)}"{Font.error} isn\'t valid JSON. Exiting...{Font.end}',
                'error')
            print('\n')
            raise

        compilations = set()
        conditional_overrides = {}
        overrides = {}
        renames = {}

        if 'compilations' in clonedata:
            compilations.update(clonedata['compilations'])
        if 'overrides' in clonedata:
            overrides = clonedata['overrides']
        if 'conditional_overrides' in clonedata:
            conditional_overrides = clonedata['conditional_overrides']
        if 'renames' in clonedata:
            renames = clonedata['renames']

        return CloneList(compilations, overrides, conditional_overrides,
                         renames)
Beispiel #5
0
def generate_config(region_data):
    if not os.path.isfile('user-config.yaml'):
        try:
            with open('user-config.yaml', 'w') as output_file:
                output_file.writelines(
                    '---\n# If the -l option is used, only include titles with the following languages.'
                )
                output_file.writelines(
                    '\n# Comment out languages you don\'t want.')
                output_file.writelines('\n- language filter:')

                def write_entry(string, comment=False):
                    if comment == True:
                        output_file.writelines(f'\n  # - {string}')
                    else:
                        output_file.writelines(f'\n  - {string}')

                output_file.writelines(f'\n  - English')

                for language in region_data.languages_long:
                    if language != 'English':
                        write_entry(language, True)

                output_file.writelines(
                    '\n\n# The region order Retool follows. Comment out the regions you don\'t want.'
                )
                output_file.writelines('\n- region order:')

                for region in region_data.region_order:
                    write_entry(region)

                printwrap(
                    f'{Font.warning}* The {Font.warning_bold}user-config.yaml '
                    f'{Font.warning}file was missing, so a new one has been generated. '
                    'You might want to edit it to define a custom region order, or to '
                    f'filter specific languages. You can now run Retool '
                    f'normally.{Font.end}', 'error')
                sys.exit()

        except OSError as e:
            print(f'\n{Font.error_bold}* Error: {Font.end}{str(e)}\n')
            raise
Beispiel #6
0
def main():
    # Start a timer from when the process started
    start_time = time.time()

    # Splash screen
    os.system('cls' if os.name == 'nt' else 'clear')
    print(f'{Font.bold}\nRetool {__version__}{Font.end}')
    print('-----------')
    if len(sys.argv) == 1:
        printwrap(
            f'Creates 1G1R versions of Redump ({Font.underline}'
            f'http://redump.org/{Font.end}) dats.', 'no_indent'
        )

    # Generate regions and languages
    region_data = build_regions(RegionKeys())
    LANGUAGES = '|'.join(region_data.languages_short)

    # Regexes
    REGEX = Regex(LANGUAGES)

    # Generate user config file if it's missing
    generate_config(region_data)

    # Check user input -- if none, or there's an error, available options will be shown
    user_input = check_input()

    # Import the user-config.yaml file and assign filtered languages and custom
    # region order.
    user_input = import_user_config(region_data, user_input)

    # Based on region counts from redump.org
    PRIORITY_REGIONS = [
        'USA', 'Japan', 'Europe', 'Germany', 'Poland', 'Italy',
        'France', 'Spain', 'Netherlands', 'Russia', 'Korea']

    # Generate tag strings
    user_input.tag_strings = build_tags(TagKeys())

    if os.path.isfile('.dev'):
        printverbose(
            user_input.verbose,
            f'{Font.warning_bold}* Operating in dev mode{Font.end}')

    # Process the input file or folder
    if os.path.isdir(user_input.input_file_name) == True:
        is_folder = True
        dat_files = glob.glob(os.path.abspath(user_input.input_file_name) + '/*.dat')
        print('Processing folder...')
    else:
        is_folder = False
        dat_files = {user_input.input_file_name}

    file_count = len(dat_files)

    for i, dat_file in enumerate(dat_files):
        if file_count > 1:
            print(f'\n{Font.underline}Processing file '
                  f'{i+1}/{len(dat_files)}{Font.end}\n')

        # Process and get the details we need from the input file
        input_dat = process_input_dat(dat_file, is_folder)

        # Import the system's clone lists, if they exist
        input_dat.clone_lists = build_clone_lists(input_dat.name)

        # Import scraped Redump metadata for titles
        input_dat.metadata = import_metadata(input_dat.name)

        # Get the stats from the original soup object before it's changed later
        print('* Gathering stats... ', sep=' ', end='', flush=True)
        stats = Stats(len(input_dat.soup.find_all('game')), user_input, input_dat)

        print('done.')

        # Provide dat details to reassure the user the correct file is being processed
        print(f'\n|  {Font.bold}DAT DETAILS{Font.end}')
        print(f'|  Description: {input_dat.description}')
        print(f'|  Author: {input_dat.author}')
        print(f'|  URL: {input_dat.url}')
        print(f'|  Version: {input_dat.version}\n')

        # For performance, change the region order so titles with a lot of regions are
        # processed first, and unknown regions are processed last. This doesn't affect
        # the user's region order when it comes to title selection.
        processing_region_order = [
            x for x in user_input.user_region_order if x in PRIORITY_REGIONS]
        processing_region_order.extend(
            [x for x in user_input.user_region_order if x not in PRIORITY_REGIONS and x != 'Unknown'])
        if 'Unknown' in user_input.user_region_order:
            processing_region_order.append('Unknown')

        # Convert each region's XML to dicts so we can more easily work with the data,
        # and determine each region's parent
        titles = Titles()

        compilations_found = set()

        for region in processing_region_order:
            print(
                f'* Checking dat for titles in provided regions... {region}',
                sep='', end='\r', flush=True
            )
            titles.regions[region] = dat_to_dict(
                region, region_data, input_dat, user_input,
                compilations_found, REGEX)

            sys.stdout.write("\033[K")

        # Deal with compilations
        if input_dat.clone_lists != None:
            if user_input.no_compilations == True:
                missing_compilations = {
                    compilation for compilation in input_dat.clone_lists.compilations if compilation not in compilations_found}

                for compilation in missing_compilations:
                    printverbose(
                        user_input.verbose,
                        f'{Font.warning_bold}* Title in compilations list not found in dat or selected regions: '
                        f'{compilation}{Font.end}')

                stats.compilations_count = len(compilations_found)

        print('* Checking dat for titles in provided regions... done.')

        # Combine all regions' titles and choose a parent based on region order
        print('* Finding parents across regions... ', sep='', end='\r', flush=True)

        titles = choose_cross_region_parents(titles, user_input)

        print('* Finding parents across regions... done.')

        # Process clone lists
        if input_dat.clone_lists != None:
            print('* Assigning clones from clone lists... ', sep='', end='\r', flush=True)

            titles = assign_clones(titles, input_dat, region_data, user_input, REGEX)

            sys.stdout.write("\033[K")
            print('* Assigning clones from clone lists... done.')

        # Get the clone count
        stats.clone_count = 0
        for group, disc_titles in titles.all.items():
            for disc_title in disc_titles:
                if disc_title.cloneof != '':
                    stats.clone_count += 1

        # Get final title count
        if user_input.legacy == False:
            stats.final_title_count = get_title_count(titles, is_folder) - stats.clone_count
        else:
            stats.final_title_count = get_title_count(titles, is_folder)

        # Name the output file
        output_file_name = (
            os.path.join(
                user_input.output_folder_name,
                f'{input_dat.name} ({str("{:,}".format(stats.final_title_count))}) ({input_dat.version}) '
                f'[1G1R]{user_input.user_options} (Retool {datetime.datetime.now().strftime("%Y-%m-%d %H-%M-%SS")[:-1]}).dat'))

        # Write the output dat file
        write_dat_file(input_dat, user_input, output_file_name, stats, titles)

        # Report stats
        report_stats(stats, titles, user_input, input_dat, region_data)

        # Start the loop again if processing a folder
        if stats.final_title_count == 0 and is_folder == True: return

    # Stop the timer
    stop_time = time.time()
    total_time_elapsed = str('{0:.2f}'.format(round(stop_time - start_time,2)))

    # Set the summary message if input was a folder and files were found
    if is_folder == True:
        if file_count > 0:
            if file_count == 1:
                file_noun = 'file'
            else:
                file_noun = 'files'

            file_count = str('{:,}'.format(file_count))

            finish_message = (
                f'{Font.success}* Finished processing {file_count} {file_noun} in the '
                f'{Font.bold}"{user_input.input_file_name}{Font.success}" folder in '
                f'{total_time_elapsed}s. 1G1R dats have been created in the '
                f'{Font.bold}"{user_input.output_folder_name}"{Font.success} folder.{Font.end}'
                )
        else:
            # Set the summary message if no files were found in input folder
            finish_message = (
                f'{Font.warning}* No files found to process in the '
                f'{Font.bold}"{user_input.input_file_name}"{Font.warning} folder.{Font.end}'
                )
    else:
        # Set the summary message if input was a single file
        finish_message = (
            f'{Font.success}* Finished adding '
            f'{str("{:,}".format(stats.final_title_count))}'
            f' unique titles to "{Font.bold}{output_file_name}" '
            f'{Font.success}in {total_time_elapsed}s.{Font.end}'
            )

    # Print the summary message
    print('\n')
    printwrap(f'{finish_message}\n')

    return
Beispiel #7
0
def process_input_dat(dat_file, is_folder):
    """ Prepares input dat file and converts to an object

    Returns a Dat object with the following populated:

    .name
    .description
    .version
    .author
    .url
    .soup

    Removes the following from a Dat object:

    .contents
    """

    if is_folder == True:
        next_status = ' Skipping file...'
    else:
        next_status = ''

    printwrap(f'* Reading dat file: "{Font.bold}{dat_file}{Font.end}"')
    try:
        with open(dat_file, 'r') as input_file:
            print('* Validating dat file... ', sep=' ', end='', flush=True)
            input_dat = input_file.read()
    except OSError as e:
        printwrap(
            f'{Font.error_bold}* Error: {Font.error}{str(e)}.{Font.end}{next_status}',
            'error')
        if is_folder == False:
            raise
        else:
            return

    # Check the dat file format -- if it's CLRMAMEPro format, convert it to LogiqX
    clrmame_header = re.search('^clrmamepro \($.*?^\)$', input_dat,
                               re.M | re.S)

    if clrmame_header:
        print('file is a CLRMAMEPro dat file.')
        input_dat = convert_clrmame_dat(clrmame_header, input_dat, is_folder)

        # Go to the next file in a batch operation if something went wrong.
        if input_dat == 'end_batch': return
    else:
        input_dat = Dat(input_dat)

        # Exit if there are entity or element tags to avoid abuse
        if '<!ENTITY' in input_dat.contents or '<!ELEMENT' in input_dat.contents:
            print('failed.')
            printwrap(
                f'{Font.error_bold} Error: {Font.error}Entity and element tags '
                f'aren\'t supported in dat files.{Font.end}{next_status}',
                'error')
            sys.exit()

        # Check for a valid Redump XML dat that follows the Logiqx dtd
        valid_dat_file = False

        if ('<datafile>' in input_dat.contents
                and '<?xml' in input_dat.contents
                and '<game' in input_dat.contents):
            # Remove unexpected XML declarations from the file so we can check validity
            try:
                input_dat.contents = input_dat.contents.replace(
                    re.search('<\?xml.*?>', input_dat.contents)[0],
                    '<?xml version="1.0"?>')
            except:
                print('failed.')
                printwrap(
                    f'{Font.error_bold}* Error: {Font.error}File is missing an XML '
                    f'declaration. It\'s probably not a dat file.'
                    f'{next_status}{Font.end}', 'error')
                if is_folder == False:
                    sys.exit()
                else:
                    return
            try:
                with open('datafile.dtd') as dtdfile:
                    dtd = etree.DTD(dtdfile)
                    try:
                        root = etree.XML(input_dat.contents)

                        if dtd.validate(root) == False:
                            print('failed.')
                            printwrap(
                                f'{Font.error_bold}* Error: {Font.error}XML file'
                                f'doesn\'t conform to Logiqx dtd. '
                                f'{dtd.error_log.last_error}.'
                                f'{next_status}{Font.end}', 'error')
                            if is_folder == False:
                                sys.exit()
                            else:
                                return
                    except etree.XMLSyntaxError as e:
                        print('failed.')
                        printwrap(
                            f'{Font.error_bold}* Error: {Font.error}XML file is '
                            f'malformed. {e}.{next_status}{Font.end}', 'error')
                        if is_folder == False:
                            sys.exit()
                        else:
                            return
                    else:
                        print('file is a Logiqx dat file.')

            except OSError as e:
                printwrap(
                    f'{Font.error_bold}* Error: {str(e)}{next_status}{Font.end}',
                    'error')
                if is_folder == False:
                    raise
                else:
                    return
        else:
            print('failed.')
            printwrap(
                f'{Font.error_bold}* Error: "{dat_file}"{Font.error} '
                f'isn\'t a compatible dat file.{next_status}{Font.end}',
                'error')
            if is_folder == False:
                sys.exit()
            else:
                return

    # Convert contents to BeautifulSoup object, remove original contents attribute
    print('* Converting dat file to a searchable format... ',
          sep=' ',
          end='',
          flush=True)
    input_dat.soup = BeautifulSoup(input_dat.contents, "lxml-xml")
    del input_dat.contents
    print('done.')

    # Set input dat header details
    if input_dat.soup.find('header') != None:
        for key, value in input_dat.__dict__.items():
            if (key != 'soup' and key != 'user_options' and value == 'Unknown'
                    and input_dat.soup.find(key) != None):
                setattr(input_dat, key, input_dat.soup.find(key).string)
            elif value == '':
                setattr(input_dat, key, 'Unknown')

    # Remove Retool tag from name if it exists
    input_dat.name = input_dat.name.replace(' (Retool)', '')

    # Sanitize some header details which are used in the output filename
    characters = [':', '\\', '/', '<', '>', '"', '|', '?', '*']
    reserved_filenames = ['con', 'prn', 'aux', 'nul', 'com[1-9]', 'lpt[1-9]']

    for character in characters:
        if character in input_dat.name:
            input_dat.name = input_dat.name.replace(character, '-')
        if character in input_dat.version:
            input_dat.version = input_dat.version.replace(character, '-')

    for filename in reserved_filenames:
        if re.search('^' + filename + '$', input_dat.name) != None:
            input_dat.name = 'Unknown'
        if re.search('^' + filename + '$', input_dat.version) != None:
            input_dat.version = 'Unknown'

    return input_dat
Beispiel #8
0
def convert_clrmame_dat(clrmame_header, input_dat, is_folder):
    """ Converts CLRMAMEPro dat format to LogiqX dat format """
    def header_details(find_string, replace_string):
        """ Gets values for CLRMAMEPro dat header details """

        search_string = re.search(find_string, clrmame_header[0])

        if search_string != None:
            return re.sub(replace_string, '', search_string.group(0)).strip()
        else:
            return ''

    dat_name = header_details(re.compile('.*?name.*'), 'name |(\")')
    dat_description = header_details(re.compile('.*?description.*'),
                                     'description |(\")')
    dat_category = header_details(re.compile('.*?category.*'),
                                  'category |(\")')
    dat_version = header_details(re.compile('.*?version.*'), 'version |(\")')
    dat_author = header_details(re.compile('.*?author.*'), 'author |(\")')

    convert_dat = []

    convert_dat.append('<?xml version="1.0"?>\n\
        <!DOCTYPE datafile PUBLIC "-//Logiqx//DTD ROM Management Datafile//EN" \
            "http://www.logiqx.com/Dats/datafile.dtd"><datafile>\n\t<header>')
    convert_dat.append(f'\t\t<name>{dat_name}</name>')
    convert_dat.append(f'\t\t<description>{dat_description}</description>')
    convert_dat.append(f'\t\t<version>{dat_version}</version>')
    convert_dat.append(f'\t\t<author>{dat_author}</author>\n\t</header>')

    # Now work through each of the title details
    dat_contents = re.findall('^game \($.*?^\)$', input_dat, re.M | re.S)
    if dat_contents:
        for item in dat_contents:
            xml_node = re.split('\n', item)
            regex = re.sub('name |(\")', '', xml_node[1].strip())
            convert_dat.append(
                f'\t<game name="{regex}">'
                f'\n\t\t<category>{dat_category}</category>\n\t\t<description>'
                f'{regex}</description>')
            for node in xml_node:
                if node.strip().startswith('rom'):
                    node = node
                    node = re.sub('^rom \( name ', '<rom name="', node.strip())
                    node = re.sub(' size ', '" size="', node.strip())
                    node = re.sub(' crc ', '" crc="', node.strip())
                    node = re.sub(' md5 ', '" md5="', node.strip())
                    node = re.sub(' sha1 ', '" sha1="', node.strip())
                    node = re.sub(' \)$', '" />', node.strip())
                    convert_dat.append('\t\t' + node)
            convert_dat.append('\t</game>')
        convert_dat.append('</datafile>')

        convert_dat = '\n'.join(convert_dat)
    else:
        printwrap(
            f'{Font.error_bold} * Error: {Font.error}file isn\'t Logiqx XML or '
            f'CLRMAMEPro dat.{Font.end}', 'error')
        if is_folder == False:
            sys.exit()
        else:
            return 'end_batch'
    return Dat(convert_dat, dat_name, dat_description, dat_version, dat_author)