def build_vdf(self):
     root = vdf.VDFDict()
     root["Updater"] = vdf.VDFDict()
     root["Updater"]["Information"] = vdf.VDFDict()
     root["Updater"]["Files"] = self.files
     root["Updater"]["Information"]["Version"] = vdf.VDFDict(
         {"Latest": str(self.version)})
     for note in self.notes:
         root["Updater"]["Information"]["Notes"] = note
     return root
Beispiel #2
0
    def load_data(self) -> None:
        if self.exists():
            data = vdf.load(open(self.path))
        else:
            data = vdf.VDFDict()
            data['InstallConfigStore'] = {'Software': {'Valve': {'Steam': {}}}}

        steam = data['InstallConfigStore']['Software']['Valve']['Steam']

        if 'CompatToolMapping' not in steam:
            steam['CompatToolMapping'] = {}
        else:
            stale_entries = []

            for game in steam['CompatToolMapping']:
                # remove entries that were disabled in the Steam UI by the user
                if 'name' in steam['CompatToolMapping'][game] \
                    and 'config' in steam['CompatToolMapping'][game] \
                    and steam['CompatToolMapping'][game]['name'] == '' \
                    and steam['CompatToolMapping'][game]['config'] == '':
                    stale_entries.append(game)

                # remove all entries added by Chimera (they will be re-added if still configured)
                elif 'Priority' in steam['CompatToolMapping'][game] \
                    and (steam['CompatToolMapping'][game]['Priority'] == '209' \
                         or steam['CompatToolMapping'][game]['Priority'] == '229'):
                    stale_entries.append(game)

            for entry in stale_entries:
                del steam['CompatToolMapping'][entry]

        self.config_data = data
    def get_files(self):
        self.files = vdf.VDFDict()
        for root, dirs, files in os.walk(str(self.sm_formatter.local_root),
                                         topdown=True):
            root = Path(root)
            for file in files:
                file = root.joinpath(file)
                self.sm_formatter.format(file)
                remote = self.sm_formatter.format(file)
                root_file_folder = self.sm_formatter.strip_local(file).parts[0]
                _type = self.type_map_SM.get(root_file_folder,
                                             self.type_map_SM[None])
                self.files[_type] = str(remote)

        if self.mod_formatter is not None:
            for root, dirs, files in os.walk(str(
                    self.mod_formatter.local_root),
                                             topdown=True):
                root = Path(root)
                for file in files:
                    file = root.joinpath(file)
                    self.mod_formatter.format(file)
                    remote = self.mod_formatter.format(file)
                    file_path_parts = self.mod_formatter.strip_local(
                        file).parts
                    root_file_folder = file_path_parts[0]
                    if root_file_folder != "addons" and file_path_parts.get(
                            1) != "sourcemod":  #skip the sm folder
                        _type = self.type_map_MOD.get(root_file_folder,
                                                      self.type_map_MOD[None])
                        self.files[_type] = str(remote)
Beispiel #4
0
	def test_theater_directives(self,filename=theater_test):
		th = self.test_theater_instance(filename)
		obj = vdf.VDFDict()
		obj['#include'] = 'test.inc'
		th.process_directives(obj=obj)
		th.load_base(filename=filename,obj=th.theater)
		th.load_base(filename=filename,obj=th.theater)
Beispiel #5
0
	def __init__(self, filename=None, path=None, paths=None, data=None):
		self.filename = filename
		self.files = vdf.VDFDict()
		self.processed = vdf.VDFDict()
		self.bases = {}
		self.paths = []
		self.theater_conditions = {}
		if paths is None:
			paths = [os.getcwd()]
		if not path is None:
			paths.append(path)
		for path in paths:
			self.add_path(path)
		self.set_filename(filename=filename)
		if not data is None:
			# TODO: Support loading string or dict as data, rather than a file.
			pass
Beispiel #6
0
	def __init__(self, classname=None, entity=None):
		self.min = None
		self.max = None
		if entity is not None:
			self.parse_entity(entity=entity)
		else:
			self.entity = vdf.VDFDict()
			if classname is not None:
				self.classname = classname
 def __init__(self, data=None):
     if not isinstance(data,
                       vdf.VDFDict):  # checks if data is already a VDFDict
         data = vdf.parse(open(data),
                          mapper=vdf.VDFDict,
                          merge_duplicate_keys=False)
         self.d = vdf.VDFDict(data)
     else:
         self.d = data
Beispiel #8
0
	def __init__(self, parent, name, bsp=None, nav=None, cpsetup_txt=None, overview_txt=None, overview_vtf=None, parsed_file=None, vmf_file=None, do_parse=True, decompile=True, unpack_files=True):
		"""
			Args:
		"""
		self.parent = parent
		self.name = name
		print("Checking map '{}'".format(name))
		# Load propdefs into Entity object
		Entity().load_propdefs(propdefs=self.parent.parent.config['map_entities_props'])
		self.map = vdf.VDFDict()
		self.map_files = {}
		self.map_files_checksums = {}
		self.map_files_paths = {}
		self.map_files_data = vdf.VDFDict()
		self.entities = []
		self.do_parse = do_parse
		self.decompile = decompile
		self.unpack_files = unpack_files
		self.find_map_files()
		self.parse_json()
		self.parse_map_files()
		self.export_parsed()
def get_tag_number(app):
    tag_num = ""

    if "tags" in app and isinstance(app["tags"], dict):
        # Have to create a copy to avoid: "RuntimeError: dictionary changed size during iteration"
        tags = app["tags"].copy()
        for tag in tags:
            # Search to see if a ProtonDB rank is already a tag, if so just overwrite that tag
            if app["tags"][tag].startswith("ProtonDB Ranking:", 0, 17):
                if not tag_num:
                    tag_num = tag
                else:
                    # Delete dupe tags caused by error of previous versions, may remove this check in the future once its no longer an issue
                    del app["tags"][tag]
        if not tag_num:
            # If no ProtonDB tags were found, use the next available number
            tag_num = str(len(app["tags"]))
    # If the tags key wasn't found, that means there are no tags for the game
    else:
        tag_num = "0"
        app["tags"] = vdf.VDFDict()

    return tag_num
Beispiel #10
0
    def load_data(self) -> None:
        if self.exists():
            data = vdf.load(open(self.path))
        else:
            data = vdf.VDFDict()
            data['UserLocalConfigStore'] = {
                'Software': {
                    'Valve': {
                        'Steam': {}
                    }
                }
            }

        steam_input = data['UserLocalConfigStore']
        if 'Apps' not in steam_input:
            steam_input['Apps'] = {}

        launch_options = (
            data['UserLocalConfigStore']['Software']['Valve']['Steam'])
        if 'Apps' not in launch_options:
            launch_options['Apps'] = {}

        self.config_data = data
Beispiel #11
0
def main(argv):
    usage = "Usage: ProtonDB-to-Steam-Library.py \n" \
          + "        -s <absolute path to sharedconfig.vdf> \n" \
          + "        -n (disables saving)"
    sharedconfig_path = ""
    skip_save = False

    ### From here until the comment saying otherwise is just parsing the command line arguements
    try:
        opts, _ = getopt.getopt(argv, "hs:n")

    except getopt.GetoptError:
        print(usage)
        sys.exit()

    for opt, arg in opts:
        if opt == "-h":
            print(usage)
            sys.exit()

        elif opt in "-s":
            if os.path.exists(arg):
                try:
                    vdf.load(open(arg))
                    sharedconfig_path = arg
                except:
                    print(arg)
                    print("Invalid path!")
                    sys.exit()

            # With ~ for user home
            elif os.path.exists(os.path.expanduser(arg)):
                try:
                    vdf.load(open(arg))
                    sharedconfig_path = os.path.expanduser(arg)

                except:
                    print(os.path.expanduser(arg))
                    print("Invalid path!")
                    sys.exit()

            else:
                print(arg)
                print("Invalid path!")
                sys.exit()

        elif opt in "-n":
            skip_save = True
    ### Done with command line arguements

    # If sharedconfig_path was not set with a command line arguement, have get_sharedconfig_path() find it
    if not sharedconfig_path:
        sharedconfig_path = get_sharedconfig_path()

    print("Selected: " + sharedconfig_path)
    sharedconfig = vdf.load(open(sharedconfig_path))

    # Get which version of the configstore you have
    configstore = get_configstore_for_vdf(sharedconfig)

    for app_id in sharedconfig[configstore]["Software"]["Valve"]["Steam"][
            "Apps"]:
        try:
            # This has to be here because some Steam AppID's are strings of text, which ProtonDB does not support. Check test01.vdf line 278 for an example.
            app_id = int(app_id)
            tag_num = ""

            # If the app is native, no need to check ProtonDB
            if is_native(str(app_id)):
                print(str(app_id) + " native")
                continue

            try:
                # Have to create a copy to avoid: "RuntimeError: dictionary changed size during iteration"
                tags = sharedconfig[configstore]["Software"]["Valve"]["Steam"][
                    "Apps"][str(app_id)]["tags"].copy()
                for tag in tags:
                    # Search to see if a ProtonDB rank is already a tag, if so just overwrite that tag
                    if sharedconfig[configstore]["Software"]["Valve"]["Steam"][
                            "Apps"][str(app_id)]["tags"][tag].startswith(
                                "ProtonDB Ranking:", 0, 17):
                        if not tag_num:
                            tag_num = tag
                        else:
                            # Delete dupe tags caused by error of previous versions, may remove this check in the future once its no longer an issue
                            del sharedconfig[configstore]["Software"]["Valve"][
                                "Steam"]["Apps"][str(app_id)]["tags"][tag]
                if not tag_num:
                    # If no ProtonDB tags were found, use the next available number
                    tag_num = str(
                        len(sharedconfig[configstore]["Software"]["Valve"]
                            ["Steam"]["Apps"][str(app_id)]["tags"]))
            # If the tags key wasn't found, that means there are no tags for the game
            except KeyError:
                tag_num = "0"
                sharedconfig[configstore]["Software"]["Valve"]["Steam"][
                    "Apps"][str(app_id)]["tags"] = vdf.VDFDict()

            protondb_rating = get_protondb_rating(app_id)
            print(str(app_id) + " " + protondb_rating)

            # The 1,2,etc. force the better ranks to be at the top, as Steam sorts these alphabetically
            possible_ranks = {
                "platinum": "ProtonDB Ranking: 1 Platinum",
                "gold": "ProtonDB Ranking: 2 Gold",
                "silver": "ProtonDB Ranking: 3 Silver",
                "bronze": "ProtonDB Ranking: 4 Bronze",
                "pending": "ProtonDB Ranking: 5 Pending",
                "unrated": "ProtonDB Ranking: 6 Unrated",
                "borked": "ProtonDB Ranking: 7 Borked",
            }

            # Try to inject the tag into the vdfDict, if the returned rating from ProtonDB isn't a key above it will error out
            try:
                sharedconfig[configstore]["Software"]["Valve"]["Steam"][
                    "Apps"][str(app_id)]["tags"][tag_num] = possible_ranks[
                        protondb_rating]
            except KeyError:
                print("Unknown ProtonDB rating: " + protondb_rating +
                      "\n Please report this on GitHub!")

        except urllib.error.HTTPError:
            continue
        except ValueError:
            continue

    # skip_save will be True if -n is passed
    if not skip_save:
        print("WARNING: This may clear your current tags on Steam!")
        check = input("Would you like to save sharedconfig.vdf? (y/N)")
        if check.lower() in ("yes", "y"):
            # Output the edited vdfDict back to the origional location
            vdf.dump(sharedconfig, open(sharedconfig_path, 'w'), pretty=True)
Beispiel #12
0
class VDFItem(object):
    def __init__(self, key, text):
        self.key = key
        self.text = text

    @cached_property
    def location(self):
        return '{}'.format(self.key)

    def getid(self):
        return self.key

defaultCodec = 'UTF-16-LE'

vdfTranslationBase = vdf.VDFDict()
vdfTranslationBase['lang'] = vdf.VDFDict()
vdfTranslationBase['lang']['Language'] = 'English' # TODO
vdfTranslationBase['lang']['Tokens'] = vdf.VDFDict()

class VDFSerializer(object):
    def __init__(self, units, language):
        self.units = units
        self.language = language
    
    def __call__(self, handle):
        # language = pycountry.languages.get(alpha_2=self.language)

        # file_base = copy.deepcopy(vdfTranslationBase)
        file_base = vdf.loads(vdf.dumps(vdfTranslationBase)) # copy structure
        # file_base['lang']['Language'] = language.name
Beispiel #13
0
print 'This program converts form and to the .txt format used by Source Engine to / from .json, ready to be used by POEditor. It is also capable of converting from the exported' \
      ' .json to an usable .txt by Source. It first asks for a language. You must enter the lowercase, english name of the language, so it will search for that language file.' \
      ' If you want to go from .json to .txt, you must name your .json with the language\'s name it contains. Encoding a file will also create a "_ref_exp" file.'

lang = raw_input("Language?\n")
option = raw_input("DECODE (D) (.txt to .json) or ENCODE (E) (.json to .txt)?\n")

if option == "DECODE" or option == "D":
    print "Decoding..."
    d = vdf.load(open("momentum_" + lang + '.txt'), mapper=vdf.VDFDict)
    tokens = []
    for key, value in d['lang']['Tokens'].items():
        tokens.append({'term': key, 'definition': value})
    json.dump(tokens, open("momentum_" + lang + '.json', 'w'), indent=4, sort_keys=True)
    print 'Tokens dumped to .json'

elif option == "ENCODE" or option == "E":
    print "Encoding..."
    with open(lang + '.json') as filez:
        jos = json.load(filez)
        mom = vdf.VDFDict([('lang', vdf.VDFDict([('Language', lang.title()), ('Tokens', vdf.VDFDict())]))])
        for key in jos:
            mom['lang']['Tokens'][key['term']] = key['definition']
        vdf.dump(mom, open('momentum_' + lang + '.txt', 'w', encoding='utf_16_le'), pretty=True)
        print 'momentum_%s exported.' % lang
        if lang == 'english':
            vdf.dump(mom, open('momentum_english_ref_exp.txt', 'w', encoding='utf-8'), pretty=True)
            print 'momentum_english_ref_exp exported.'

else:
    print "Unknown command. DECODE/D or ENCODE/E"