コード例 #1
0
 def LoadConfig(self,
                config_yml_path: str = 'config.yml',
                paths_yml_path: str = 'paths.yml') -> None:
     self.config.cfg = YAMLConfig(config_yml_path)
     self.pathcfg.cfg = YAMLConfig(paths_yml_path).cfg[self.config.get(
         'codebase', 'vg')]
     self.lexicon = self.ParseLexiconText('lexicon.txt')
コード例 #2
0
ファイル: hhvm_build.py プロジェクト: N3X15/hhvm-build
          log.info('HHVM Version set: %s',HHVM_VERSION)
          cmake.setFlag('HHVM_VERSION_OVERRIDE',HHVM_VERSION)
          #with open('hphp/system/idl/constants.idl.json', 'w') as f:
          #  f.write(REG_VERSION.sub('\1+' + NIGHTLY_DATE, version_file))
        else:
          branch = 'HHVM-{}'.format(HHVM_VERSION)

        repo.quiet = False
        repo.CheckForUpdates(remote='origin', branch=branch, quiet=False)
        repo.Pull(remote='origin', branch=branch, cleanup=not args.disable_git_clean)
        repo.UpdateSubmodules()

        distro_info = os.path.join(HHVMBUILD_DIR, 'hhvm', DISTRO_NAME, DISTRO_RELEASE, 'package.yml')
        distro_cfg = YAMLConfig(distro_info, template_dir='/', variables={
            'SOURCE_DIR': SOURCE_DIR,
            'DISTRO_DIR': os.path.join(HHVMBUILD_DIR, 'hhvm', DISTRO_NAME, DISTRO_RELEASE),
            'HHVMBUILD_DIR': HHVMBUILD_DIR
        })
        pbsteps = distro_cfg.get('prebuild', [])
        if pbsteps is None:
          pbsteps = []
        if len(pbsteps) > 0:
          log.info('Performing prebuild steps...')
          origpath = os.path.abspath(os.getcwd())
          RunCommandsIn(distro_cfg.get('prebuild', []), distro_cfg, {
              'SOURCE_DIR': SOURCE_DIR,
              'DISTRO_DIR': os.path.join(HHVMBUILD_DIR, 'hhvm', DISTRO_NAME, DISTRO_RELEASE),
              'HHVMBUILD_DIR': HHVMBUILD_DIR
          })
          os.chdir(origpath)
コード例 #3
0
def main():
    argp = argparse.ArgumentParser(description='Generation script for ss13-vox.')
    #argp.add_argument('--codebase', choices=['vg', 'tg'], default='vg', help='Which codebase to generate for. (Affects output code and paths.)')
    argp.add_argument('--threads', '-j', type=int, default=multiprocessing.cpu_count(), help='How many threads to use in ffmpeg.')
    #argp.add_argument('phrasefiles', nargs='+', type=str, help='A list of phrase files.')
    args = argp.parse_args()

    if not os.path.isdir('tmp'):
        os.makedirs('tmp')

    DIST_DIR = 'dist'
    PREEX_SOUND = 'sound/vox/{ID}.wav'
    NUVOX_SOUND = 'sound/vox_{SEX}/{ID}.wav'
    voices = []
    vox_sounds_path = ''
    templatefile = ''

    config = BaseConfig()
    config.cfg = YAMLConfig('config.yml')
    pathcfg = BaseConfig()
    pathcfg.cfg = YAMLConfig('paths.yml').cfg[config.get('codebase', 'vg')]

    PREEX_SOUND = pathcfg.get('sound.old-vox', PREEX_SOUND)
    NUVOX_SOUND = pathcfg.get('sound.new-vox', NUVOX_SOUND)

    voice_assignments = {}
    all_voices = []
    default_voice: Voice = VoiceRegistry.Get(USSLTFemale.ID)
    sfx_voice: SFXVoice = SFXVoice()
    configured_voices: Dict[str, dict] = {}
    for sexID, voiceid in config.get('voices', {'fem': USSLTFemale.ID}).items():
        voice = VoiceRegistry.Get(voiceid)
        assert sexID != ''
        voice.assigned_sex = sexID
        if sexID in ('fem', 'mas'):
            sex = EVoiceSex(sexID)
            assert voice.SEX == sex
            voices += [voice]
        elif sexID == 'default':
            default_voice = voice
        voice_assignments[voice.assigned_sex] = []
        all_voices += [voice]
        configured_voices[sexID] = voice.serialize()

    voice_assignments[sfx_voice.assigned_sex] = []
    all_voices += [sfx_voice]
    configured_voices[sfx_voice.assigned_sex] = sfx_voice.serialize()

    vox_sounds_path = os.path.join(DIST_DIR, pathcfg.get('vox_sounds.path'))
    templatefile = pathcfg.get('vox_sounds.template')
    vox_data_path = os.path.join(DIST_DIR, pathcfg.get('vox_data'))

    DATA_DIR = os.path.join(DIST_DIR, 'data')
    os_utils.ensureDirExists(DATA_DIR)
    with log.info('Parsing lexicon...'):
        lexicon = ParseLexiconText('lexicon.txt')

    phrases=[]
    phrasesByID = {}
    broked = False
    for filename in config.get('phrasefiles', ['announcements.txt', 'voxwords.txt']):
        for p in ParsePhraseListFrom(filename):
            if p.id in phrasesByID:
                duplicated = phrasesByID[p.id]
                log.critical('Duplicate phrase with ID %s in file %s on line %d! First instance in file %s on line %d.', p.id, p.deffile, p.defline, duplicated.deffile, duplicated.defline)
                broked = True
                continue
            phrases += [p]
            phrasesByID[p.id] = p
        if broked:
            sys.exit(1)

    soundsToKeep = set()
    for sound in OTHERSOUNDS:
        soundsToKeep.add(os.path.join(DIST_DIR, sound + '.ogg'))

    phrases.sort(key=lambda x: x.id)

    overrides = config.get('overrides', {})
    for phrase in phrases:
        if phrase.id in overrides:
            phrase.fromOverrides(overrides.get(phrase.id))
        phrase_voices = list(voices)
        # If it has a path, it's being manually specified.
        if '/' in phrase.id:
            phrase.filename = phrase.id + '.ogg'
            phrase_voices = [default_voice]
            soundsToKeep.add(os.path.abspath(os.path.join(DIST_DIR, phrase.filename)))
        else:
            phrase.filename = ''+NUVOX_SOUND
            if phrase.hasFlag(EPhraseFlags.OLD_VOX):
                phrase_voices = [default_voice]
                phrase.filename = PREEX_SOUND.format(ID=phrase.id)
                for voice in ['fem', 'mas']:
                    phrase.files[voice] = FileData()
                    phrase.files[voice].filename = phrase.filename
                    phrase.files[voice].checksum = ''
                    phrase.files[voice].duration = phrase.override_duration or -1
                    phrase.files[voice].size     = phrase.override_size or -1
                    #voice_assignments[voice].append(phrase)
                soundsToKeep.add(os.path.abspath(os.path.join(DIST_DIR, phrase.filename)))
                continue

        if phrase.hasFlag(EPhraseFlags.SFX):
            phrase_voices = [sfx_voice]

        if not phrase.hasFlag(EPhraseFlags.OLD_VOX):
            log.info('%s - %r', phrase.id, [x.assigned_sex for x in phrase_voices])
            for v in phrase_voices:
                voice_assignments[v.assigned_sex].append(phrase)
                #phrase.files[v.assigned_sex] = fd
    #sys.exit(1)
    for voice in all_voices:
        print(voice.ID, voice.assigned_sex)
        DumpLexiconScript(voice.FESTIVAL_VOICE_ID, lexicon.values(), 'tmp/VOXdict.lisp')
        for phrase in voice_assignments[voice.assigned_sex]:
            GenerateForWord(phrase, voice, soundsToKeep, args)
            sexes=set()
            for vk, fd in phrase.files.items():
                soundsToKeep.add(os.path.abspath(os.path.join(DIST_DIR, fd.filename)))

    jenv = jinja2.Environment(loader=jinja2.FileSystemLoader(['./templates']))
    jenv.add_extension('jinja2.ext.do') # {% do ... %}
    templ = jenv.get_template(templatefile)
    with log.info('Writing sound list to %s...', vox_sounds_path):
        os_utils.ensureDirExists(os.path.dirname(vox_sounds_path))
        assetcache={}
        sound2id={}
        with open(vox_sounds_path, 'w') as f:
            sexes = {
                'fem': [],
                'mas': [],
                'default': [],
                #'sfx': [],
            }
            for p in phrases:
                for k in p.files.keys():
                    assetcache[p.getAssetKey(k)] = p.files[k].filename
                    sound2id[p.files[k].filename] = p.getAssetKey(k)
                if p.hasFlag(EPhraseFlags.NOT_VOX):
                    continue
                for k in p.files.keys():
                    if p.hasFlag(EPhraseFlags.SFX):
                        for sid in ('fem', 'mas'):
                            if p not in sexes[sid]:
                                sexes[sid].append(p)
                    else:
                        sexes[k].append(p)
            f.write(templ.render(
                InitClass=InitClass,
                SEXES=sexes,
                ASSETCACHE=assetcache,
                SOUND2ID=sound2id,
                PHRASES=[p for p in phrases if not p.hasFlag(EPhraseFlags.NOT_VOX)]))
    soundsToKeep.add(os.path.abspath(vox_sounds_path))

    os_utils.ensureDirExists(DATA_DIR)
    with open(os.path.join(DATA_DIR, 'vox_data.json'), 'w') as f:
        data = {
            'version': 2,
            'compiled': time.time(),
            'voices': configured_voices,
            'words': collections.OrderedDict({w.id: w.serialize() for w in phrases if '/' not in w.id}),
        }
        json.dump(data, f, indent=2)
    soundsToKeep.add(os.path.abspath(os.path.join(DATA_DIR, 'vox_data.json')))

    with open('tmp/written.txt', 'w') as f:
        for filename in sorted(soundsToKeep):
            f.write(f'{filename}\n')

    for root, _, files in os.walk(DIST_DIR, topdown=False):
        for name in files:
            filename = os.path.abspath(os.path.join(root, name))
            if filename not in soundsToKeep:
                log.warning('Removing {0} (no longer defined)'.format(filename))
                os.remove(filename)
コード例 #4
0
    def build(self):
        with os_utils.Chdir(os.path.join(self.base_path)):
            os_utils.cmd([os_utils.which('yarn')],
                         echo=True,
                         show_output=True,
                         critical=True)
            os_utils.cmd([os_utils.which('grunt'), 'requirejs', 'uglify:main'],
                         echo=True,
                         show_output=True,
                         critical=True)


os_utils.ENV.prependTo('PATH', os.path.join('node_modules', '.bin'))

cfg = YAMLConfig('buildconf.yml')

bm = BuildMaestro()

# We add additional arguments to the script here
argp = bm.build_argparser()
argp.add_argument('--no-minify',
                  action='store_true',
                  default=False,
                  help="Don't minify anything.")
argp.add_argument('--deploy',
                  action='store_true',
                  help='Also run rsync/SSH deployment steps.')
# Parse args
args = bm.parse_args(argp)
コード例 #5
0
            "7za": os_utils.which("7z.exe"),
            "cmake": os_utils.which("cmake.exe"),
            "git": os_utils.which("git.exe"),
            "graphviz": "C:\\Program Files (x86)\\Graphviz2.38\\bin\\dot.exe",  # Default Graphviz2 install
            "hg": os_utils.which("hg.exe"),
            "perl": "C:\\Perl64\\bin\\perl.exe",  # ActiveState
            "python": os_utils.which("python.exe"),
            "ruby": os_utils.which("ruby.exe"),
            "svn": os_utils.which("svn.exe"),
        },
        "qt-base": "C:\\Qt\\Qt5.5.1\\5.5\\msvc2013_64",  # Not used. Yet.
    },
    "build": {"job-count": multiprocessing.cpu_count() * 2},
}

config = YAMLConfig("build.yml", config, variables={"nbits": "32"})
config.Load("user-config.yml", merge=True, defaults=userconfig)
EXECUTABLES = config.get("paths.executables")

ENV.appendTo("PATH", os.path.dirname(EXECUTABLES["7za"]))
ENV.set("QMAKESPEC", config.get("qt-makespec", "win32-msvc2013"))

#: x64 or x86
short_arch = "x64" if config["architecture"] == "x86_64" else "x86"
#: 64 or 32
nbits = "64" if config["architecture"] == "x86_64" else "32"

superrepo = os.path.join("build", "modorganizer_super")
if not os.path.isdir(superrepo):
    os_utils.ensureDirExists(superrepo)
    with os_utils.Chdir(superrepo):
コード例 #6
0
ファイル: config.py プロジェクト: N3X15/modorganizer-umbrella
    'graphviz':      os_utils.which('dot.exe'), #path_or_default("dot.exe",   "Graphviz2.38", "bin"),
    'cmake':         os_utils.which('cmake.exe'), #path_or_default("cmake.exe", "CMake", "bin"),
    'git':           os_utils.which('git.exe'), #path_or_default("git.exe",   "Git", "bin"),
    'hg':            os_utils.which('hg.exe'),
    'perl':          os_utils.which('perl.exe'),
    'ruby':          os_utils.which('ruby.exe'),
    'svn':           os_utils.which('svn.exe'),
    '7z':            os_utils.which('7z.exe'),
    # we need a python that matches the build architecture
    'python':        os_utils.which('python27.exe'),
    'visual_studio': os.path.realpath(
        os.path.join(get_from_hklm(r"SOFTWARE\Microsoft\VisualStudio\{}".format(config['vc_version']),
                                   "InstallDir", True),
                     "..", "..", "VC"
                     )
    )
}
config = YAMLConfig('build.yml',config)
assert config.get('paths.graphviz') is not None
assert config.get('paths.cmake') is not None
assert config.get('paths.git') is not None
assert config.get('paths.hg') is not None
assert config.get('paths.perl') is not None
assert config.get('paths.ruby') is not None
assert config.get('paths.svn') is not None
assert config.get('paths.7z') is not None
assert config.get('paths.python') is not None
if missing_prerequisites:
    print '\nMissing prerequisites listed above - cannot continue'
    exit(1)