示例#1
0
def run_on_chunk(command):
  try:
    if JS_OPTIMIZER in command: # XXX hackish
      index = command.index(JS_OPTIMIZER)
      filename = command[index + 1]
    else:
      filename = command[1]
    if os.environ.get('EMCC_SAVE_OPT_TEMP') and os.environ.get('EMCC_SAVE_OPT_TEMP') != '0':
      saved = 'save_' + os.path.basename(filename)
      while os.path.exists(saved): saved = 'input' + str(int(saved.replace('input', '').replace('.txt', ''))+1) + '.txt'
      print('running js optimizer command', ' '.join([c if c != filename else saved for c in command]), file=sys.stderr)
      shutil.copyfile(filename, os.path.join(shared.get_emscripten_temp_dir(), saved))
    if shared.EM_BUILD_VERBOSE >= 3: print('run_on_chunk: ' + str(command), file=sys.stderr)
    proc = shared.run_process(command, stdout=subprocess.PIPE)
    output = proc.stdout
    assert proc.returncode == 0, 'Error in optimizer (return code ' + str(proc.returncode) + '): ' + output
    assert len(output) and not output.startswith('Assertion failed'), 'Error in optimizer: ' + output
    filename = temp_files.get(os.path.basename(filename) + '.jo.js').name
    f = open(filename, 'w')
    f.write(output)
    f.close()
    if DEBUG and not shared.WINDOWS: print('.', file=sys.stderr) # Skip debug progress indicator on Windows, since it doesn't buffer well with multiple threads printing to console.
    return filename
  except KeyboardInterrupt:
    # avoid throwing keyboard interrupts from a child process
    raise Exception()
示例#2
0
def run_on_chunk(command):
  try:
    if JS_OPTIMIZER in command: # XXX hackish
      index = command.index(JS_OPTIMIZER)
      filename = command[index + 1]
    else:
      filename = command[1]
    if os.environ.get('EMCC_SAVE_OPT_TEMP') and os.environ.get('EMCC_SAVE_OPT_TEMP') != '0':
      saved = 'save_' + os.path.basename(filename)
      while os.path.exists(saved): saved = 'input' + str(int(saved.replace('input', '').replace('.txt', ''))+1) + '.txt'
      print('running js optimizer command', ' '.join([c if c != filename else saved for c in command]), file=sys.stderr)
      shutil.copyfile(filename, os.path.join(shared.get_emscripten_temp_dir(), saved))
    if shared.EM_BUILD_VERBOSE >= 3: print('run_on_chunk: ' + str(command), file=sys.stderr)
    proc = shared.run_process(command, stdout=subprocess.PIPE)
    output = proc.stdout
    assert proc.returncode == 0, 'Error in optimizer (return code ' + str(proc.returncode) + '): ' + output
    assert len(output) and not output.startswith('Assertion failed'), 'Error in optimizer: ' + output
    filename = temp_files.get(os.path.basename(filename) + '.jo.js').name
    with open(filename, 'w') as f:
      f.write(output)
    if DEBUG and not shared.WINDOWS: print('.', file=sys.stderr) # Skip debug progress indicator on Windows, since it doesn't buffer well with multiple threads printing to console.
    return filename
  except KeyboardInterrupt:
    # avoid throwing keyboard interrupts from a child process
    raise Exception()
示例#3
0
def run():
    if shared.Settings.WASM_BACKEND:
        # The wasm backend does suffer from the same probllem as fastcomp so doesn't
        # need the filename hashing.
        cmd = [shared.LLVM_AR] + sys.argv[1:]
        return shared.run_process(cmd, stdin=sys.stdin, check=False).returncode

    try:
        args = substitute_response_files(sys.argv)
    except IOError as e:
        shared.exit_with_error(e)
    newargs = [shared.LLVM_AR] + args[1:]

    tmpdir = None
    response_filename = None

    # The 3 argmuent form of ar doesn't involve other files. For example
    # 'ar x libfoo.a'.
    if len(newargs) > 3:
        tmpdir = tempfile.mkdtemp(prefix='emar-')
        cmd = newargs[1]
        if 'r' in cmd or 'q' in cmd:
            # We are adding files to the archive.
            # Normally the output file is then arg 2, except in the case were the
            # a or b modifiers are used in which case its arg 3.
            if 'a' in cmd or 'b' in cmd:
                out_arg_index = 3
            else:
                out_arg_index = 2

            # Add a hash to colliding basename, to make them unique.
            for j in range(out_arg_index + 1, len(newargs)):
                orig_name = newargs[j]
                full_name = os.path.abspath(orig_name)
                basename = os.path.basename(full_name)

                h = hashlib.md5(full_name.encode('utf-8')).hexdigest()[:8]
                parts = basename.split('.')
                parts[0] += '_' + h
                newname = '.'.join(parts)
                full_newname = os.path.join(tmpdir, newname)
                shutil.copyfile(orig_name, full_newname)
                newargs[j] = full_newname

        if shared.DEBUG:
            print('emar:', sys.argv, '  ==>  ', newargs, file=sys.stderr)
        response_filename = create_response_file(
            newargs[3:], shared.get_emscripten_temp_dir())
        newargs = newargs[:3] + ['@' + response_filename]

    if shared.DEBUG:
        print('emar:', sys.argv, '  ==>  ', newargs, file=sys.stderr)

    rtn = shared.run_process(newargs, stdin=sys.stdin, check=False).returncode
    if tmpdir:
        shutil.rmtree(tmpdir)
        shared.try_delete(response_filename)
    return rtn
示例#4
0
def run():
    try:
        args = substitute_response_files(sys.argv)
    except IOError as e:
        shared.exit_with_error(e)
    newargs = [shared.LLVM_AR] + args[1:]

    to_delete = []

    # The 3 argmuent form of ar doesn't involve other files. For example
    # 'ar x libfoo.a'.
    if len(newargs) > 3:
        cmd = newargs[1]
        if 'r' in cmd or 'q' in cmd:
            # We are adding files to the archive.
            # Normally the output file is then arg 2, except in the case were the
            # a or b modifiers are used in which case its arg 3.
            if 'a' in cmd or 'b' in cmd:
                out_arg_index = 3
            else:
                out_arg_index = 2

            # Add a hash to colliding basename, to make them unique.
            for j in range(out_arg_index + 1, len(newargs)):
                orig_name = newargs[j]
                full_name = os.path.abspath(orig_name)
                dirname = os.path.dirname(full_name)
                basename = os.path.basename(full_name)

                h = hashlib.md5(full_name.encode('utf-8')).hexdigest()[:8]
                parts = basename.split('.')
                parts[0] += '_' + h
                newname = '.'.join(parts)
                full_newname = os.path.join(dirname, newname)
                try:
                    shutil.copyfile(orig_name, full_newname)
                    newargs[j] = full_newname
                    to_delete.append(full_newname)
                except Exception:
                    # it is ok to fail here, we just don't get hashing
                    pass

        if shared.DEBUG:
            print('emar:', sys.argv, '  ==>  ', newargs, file=sys.stderr)

        response_filename = create_response_file(
            newargs[3:], shared.get_emscripten_temp_dir())
        to_delete += [response_filename]
        newargs = newargs[:3] + ['@' + response_filename]

    if shared.DEBUG:
        print('emar:', sys.argv, '  ==>  ', newargs, file=sys.stderr)

    rtn = shared.run_process(newargs, stdin=sys.stdin, check=False).returncode
    for d in to_delete:
        shared.try_delete(d)
    return rtn
def run_on_chunk(command):
    try:
        file_suffix = '.js'
        index = command.index(DUPLICATE_FUNCTION_ELIMINATOR)
        filename = command[index + 1]

        if '--gen-hash-info' in command:
            file_suffix = '.json'

        if os.environ.get('EMCC_SAVE_OPT_TEMP'
                          ) and os.environ.get('EMCC_SAVE_OPT_TEMP') != '0':
            saved = 'save_' + os.path.basename(filename)
            while os.path.exists(saved):
                saved = 'input' + str(
                    int(saved.replace('input', '').replace('.txt', '')) +
                    1) + '.txt'
            print('running DFE command',
                  ' '.join([c if c != filename else saved for c in command]),
                  file=sys.stderr)
            shutil.copyfile(
                filename, os.path.join(shared.get_emscripten_temp_dir(),
                                       saved))

        if shared.EM_BUILD_VERBOSE_LEVEL >= 3:
            print('run_on_chunk: ' + str(command), file=sys.stderr)

        proc = shared.run_process(command, stdout=subprocess.PIPE)
        output = proc.stdout
        assert proc.returncode == 0, 'Error in optimizer (return code ' + str(
            proc.returncode) + '): ' + output
        assert len(output) > 0 and not output.startswith(
            'Assertion failed'), 'Error in optimizer: ' + output
        filename = temp_files.get(
            os.path.basename(filename) + '.dfjo' + file_suffix).name

        f = open(filename, 'w')
        f.write(output)
        f.close()
        if DEBUG and not shared.WINDOWS:
            print(
                '.', file=sys.stderr
            )  # Skip debug progress indicator on Windows, since it doesn't buffer well with multiple threads printing to console.
        return filename
    except KeyboardInterrupt:
        # avoid throwing keyboard interrupts from a child process
        raise Exception()
    except (TypeError, ValueError) as e:
        formatted_lines = traceback.format_exc().splitlines()

        print(">>>>>>>>>>>>>>>>>", file=sys.stderr)
        for formatted_line in formatted_lines:
            print(formatted_line, file=sys.stderr)
        print("<<<<<<<<<<<<<<<<<", file=sys.stderr)

        raise
示例#6
0
def run():
  args = substitute_response_files(sys.argv)
  newargs = [shared.LLVM_AR] + args[1:]

  to_delete = []

  # The 3 argment form of ar doesn't involve other files. For example
  # 'ar x libfoo.a'.
  if len(newargs) > 3:
    cmd = newargs[1]
    if 'r' in cmd:
      # we are adding files to the archive.
      # normally the output file is then arg 2, except in the case were the
      # a or b modifiers are used in which case its arg 3.
      if 'a' in cmd or 'b' in cmd:
        new_member_args_start = 4
      else:
        new_member_args_start = 3

      # we add a hash to each input, to make them unique as
      # possible, as llvm-ar cannot extract duplicate names
      # (and only the basename is used!)
      for j in range(new_member_args_start, len(newargs)):
        orig_name = newargs[j]
        full_name = os.path.abspath(orig_name)
        dir_name = os.path.dirname(full_name)
        base_name = os.path.basename(full_name)
        h = hashlib.md5(full_name.encode('utf-8')).hexdigest()[:8]
        parts = base_name.split('.')
        parts[0] += '_' + h
        newname = '.'.join(parts)
        full_newname = os.path.join(dir_name, newname)
        if not os.path.exists(full_newname):
          try: # it is ok to fail here, we just don't get hashing
            shutil.copyfile(orig_name, full_newname)
            newargs[j] = full_newname
            to_delete.append(full_newname)
          except:
            pass

    if shared.DEBUG:
      print('emar:', sys.argv, '  ==>  ', newargs, file=sys.stderr)

    response_filename = create_response_file(newargs[3:], shared.get_emscripten_temp_dir())
    to_delete += [response_filename]
    newargs = newargs[:3] + ['@' + response_filename]

  if shared.DEBUG:
    print('emar:', sys.argv, '  ==>  ', newargs, file=sys.stderr)

  try:
    return shared.run_process(newargs, stdin=sys.stdin, check=False).returncode
  finally:
    for d in to_delete:
      shared.try_delete(d)
示例#7
0
def run_on_chunk(command):
  try:
    file_suffix = '.js'
    index = command.index(DUPLICATE_FUNCTION_ELIMINATOR)
    filename = command[index + 1]

    if '--gen-hash-info' in command:
      file_suffix = '.json'

    if os.environ.get('EMCC_SAVE_OPT_TEMP') and os.environ.get('EMCC_SAVE_OPT_TEMP') != '0':
      saved = 'save_' + os.path.basename(filename)
      while os.path.exists(saved):
        saved = 'input' + str(int(saved.replace('input', '').replace('.txt', '')) + 1) + '.txt'
      print('running DFE command', ' '.join([c if c != filename else saved for c in command]), file=sys.stderr)
      shutil.copyfile(filename, os.path.join(shared.get_emscripten_temp_dir(), saved))

    if shared.EM_BUILD_VERBOSE >= 3:
      print('run_on_chunk: ' + str(command), file=sys.stderr)

    proc = shared.run_process(command, stdout=subprocess.PIPE)
    output = proc.stdout
    assert proc.returncode == 0, 'Error in optimizer (return code ' + str(proc.returncode) + '): ' + output
    assert len(output) and not output.startswith('Assertion failed'), 'Error in optimizer: ' + output
    filename = temp_files.get(os.path.basename(filename) + '.dfjo' + file_suffix).name

    f = open(filename, 'w')
    f.write(output)
    f.close()
    if DEBUG and not shared.WINDOWS:
      print('.', file=sys.stderr) # Skip debug progress indicator on Windows, since it doesn't buffer well with multiple threads printing to console.
    return filename
  except KeyboardInterrupt:
    # avoid throwing keyboard interrupts from a child process
    raise Exception()
  except (TypeError, ValueError):
    formatted_lines = traceback.format_exc().splitlines()

    print(">>>>>>>>>>>>>>>>>", file=sys.stderr)
    for formatted_line in formatted_lines:
        print(formatted_line, file=sys.stderr)
    print("<<<<<<<<<<<<<<<<<", file=sys.stderr)

    raise
示例#8
0
def run_on_js(filename,
              passes,
              extra_info=None,
              just_split=False,
              just_concat=False):
    with ToolchainProfiler.profile_block('js_optimizer.split_markers'):
        if not isinstance(passes, list):
            passes = [passes]

        js = utils.read_file(filename)
        if os.linesep != '\n':
            js = js.replace(os.linesep,
                            '\n')  # we assume \n in the splitting code

        # Find suffix
        suffix_marker = '// EMSCRIPTEN_GENERATED_FUNCTIONS'
        suffix_start = js.find(suffix_marker)
        suffix = ''
        if suffix_start >= 0:
            suffix_end = js.find('\n', suffix_start)
            suffix = js[suffix_start:suffix_end] + '\n'
            # if there is metadata, we will run only on the generated functions. If there isn't, we will run on everything.

        # Find markers
        start_funcs = js.find(start_funcs_marker)
        end_funcs = js.rfind(end_funcs_marker)

        if start_funcs < 0 or end_funcs < start_funcs or not suffix:
            shared.exit_with_error(
                'Invalid input file. Did not contain appropriate markers. (start_funcs: %s, end_funcs: %s, suffix_start: %s'
                % (start_funcs, end_funcs, suffix_start))

        minify_globals = 'minifyNames' in passes
        if minify_globals:
            passes = [
                p if p != 'minifyNames' else 'minifyLocals' for p in passes
            ]
            start_asm = js.find(start_asm_marker)
            end_asm = js.rfind(end_asm_marker)
            assert (start_asm >= 0) == (end_asm >= 0)

        closure = 'closure' in passes
        if closure:
            passes = [p for p in passes
                      if p != 'closure']  # we will do it manually

        cleanup = 'cleanup' in passes
        if cleanup:
            passes = [p for p in passes
                      if p != 'cleanup']  # we will do it manually

    if not minify_globals:
        with ToolchainProfiler.profile_block('js_optimizer.no_minify_globals'):
            pre = js[:start_funcs + len(start_funcs_marker)]
            post = js[end_funcs + len(end_funcs_marker):]
            js = js[start_funcs + len(start_funcs_marker):end_funcs]
            if 'asm' not in passes:
                # can have Module[..] and inlining prevention code, push those to post
                class Finals:
                    buf = []

                def process(line):
                    if len(line) and (line.startswith(
                        ('Module[', 'if (globalScope)'))
                                      or line.endswith('["X"]=1;')):
                        Finals.buf.append(line)
                        return False
                    return True

                js = '\n'.join(filter(process, js.split('\n')))
                post = '\n'.join(Finals.buf) + '\n' + post
            post = end_funcs_marker + post
    else:
        with ToolchainProfiler.profile_block('js_optimizer.minify_globals'):
            # We need to split out the asm shell as well, for minification
            pre = js[:start_asm + len(start_asm_marker)]
            post = js[end_asm:]
            asm_shell = js[start_asm + len(start_asm_marker):start_funcs +
                           len(start_funcs_marker)] + '''
EMSCRIPTEN_FUNCS();
''' + js[end_funcs + len(end_funcs_marker):end_asm + len(end_asm_marker)]
            js = js[start_funcs + len(start_funcs_marker):end_funcs]

            # we assume there is a maximum of one new name per line
            minifier = Minifier(js)

            def check_symbol_mapping(p):
                if p.startswith('symbolMap='):
                    minifier.symbols_file = p.split('=', 1)[1]
                    return False
                if p == 'profilingFuncs':
                    minifier.profiling_funcs = True
                    return False
                return True

            passes = list(filter(check_symbol_mapping, passes))
            asm_shell_pre, asm_shell_post = minifier.minify_shell(
                asm_shell, 'minifyWhitespace'
                in passes).split('EMSCRIPTEN_FUNCS();')
            asm_shell_post = asm_shell_post.replace('});', '})')
            pre += asm_shell_pre + '\n' + start_funcs_marker
            post = end_funcs_marker + asm_shell_post + post

            minify_info = minifier.serialize()

            if extra_info:
                for key, value in extra_info.items():
                    assert key not in minify_info or value == minify_info[
                        key], [key, value, minify_info[key]]
                    minify_info[key] = value

            # if DEBUG:
            #   print >> sys.stderr, 'minify info:', minify_info

    with ToolchainProfiler.profile_block(
            'js_optimizer.remove_suffix_and_split'):
        # remove suffix if no longer needed
        if suffix and 'last' in passes:
            suffix_start = post.find(suffix_marker)
            suffix_end = post.find('\n', suffix_start)
            post = post[:suffix_start] + post[suffix_end:]

        total_size = len(js)
        funcs = split_funcs(js, just_split)
        js = None

    with ToolchainProfiler.profile_block('js_optimizer.split_to_chunks'):
        # if we are making source maps, we want our debug numbering to start from the
        # top of the file, so avoid breaking the JS into chunks
        cores = shared.get_num_cores()

        if not just_split:
            intended_num_chunks = int(round(cores * NUM_CHUNKS_PER_CORE))
            chunk_size = min(
                MAX_CHUNK_SIZE,
                max(MIN_CHUNK_SIZE, total_size / intended_num_chunks))
            chunks = chunkify(funcs, chunk_size)
        else:
            # keep same chunks as before
            chunks = [f[1] for f in funcs]

        chunks = [chunk for chunk in chunks if len(chunk)]
        if DEBUG and len(chunks):
            print('chunkification: num funcs:',
                  len(funcs),
                  'actual num chunks:',
                  len(chunks),
                  'chunk size range:',
                  max(map(len, chunks)),
                  '-',
                  min(map(len, chunks)),
                  file=sys.stderr)
        funcs = None

        if len(chunks):
            serialized_extra_info = suffix_marker + '\n'
            if minify_globals:
                serialized_extra_info += '// EXTRA_INFO:' + json.dumps(
                    minify_info)
            elif extra_info:
                serialized_extra_info += '// EXTRA_INFO:' + json.dumps(
                    extra_info)
            with ToolchainProfiler.profile_block('js_optimizer.write_chunks'):

                def write_chunk(chunk, i):
                    temp_file = temp_files.get('.jsfunc_%d.js' % i).name
                    with open(temp_file, 'w') as f:
                        f.write(chunk)
                        f.write(serialized_extra_info)
                    return temp_file

                filenames = [
                    write_chunk(chunks[i], i) for i in range(len(chunks))
                ]
        else:
            filenames = []

    with ToolchainProfiler.profile_block('run_optimizer'):
        if len(filenames):
            commands = [
                config.NODE_JS + [ACORN_OPTIMIZER, f] + passes
                for f in filenames
            ]

            if os.environ.get('EMCC_SAVE_OPT_TEMP') and os.environ.get(
                    'EMCC_SAVE_OPT_TEMP') != '0':
                for filename in filenames:
                    saved = 'save_' + os.path.basename(filename)
                    while os.path.exists(saved):
                        saved = 'input' + str(
                            int(
                                saved.replace('input', '').replace('.txt', ''))
                            + 1) + '.txt'
                    shutil.copyfile(
                        filename,
                        os.path.join(shared.get_emscripten_temp_dir(), saved))

            filenames = shared.run_multiple_processes(
                commands, route_stdout_to_temp_files_suffix='js_opt.jo.js')

        for filename in filenames:
            temp_files.note(filename)

    with ToolchainProfiler.profile_block('split_closure_cleanup'):
        if closure or cleanup:
            # run on the shell code, everything but what we acorn-optimize
            start_asm = '// EMSCRIPTEN_START_ASM\n'
            end_asm = '// EMSCRIPTEN_END_ASM\n'
            cl_sep = 'wakaUnknownBefore(); var asm=wakaUnknownAfter(wakaGlobal,wakaEnv,wakaBuffer)\n'

            with temp_files.get_file('.cl.js') as cle:
                pre_1, pre_2 = pre.split(start_asm)
                post_1, post_2 = post.split(end_asm)
                with open(cle, 'w') as f:
                    f.write(pre_1)
                    f.write(cl_sep)
                    f.write(post_2)
                cld = cle
                if closure:
                    if DEBUG:
                        print('running closure on shell code', file=sys.stderr)
                    cld = building.closure_compiler(cld,
                                                    pretty='minifyWhitespace'
                                                    not in passes)
                    temp_files.note(cld)
                elif cleanup:
                    if DEBUG:
                        print('running cleanup on shell code', file=sys.stderr)
                    acorn_passes = ['JSDCE']
                    if 'minifyWhitespace' in passes:
                        acorn_passes.append('minifyWhitespace')
                    cld = building.acorn_optimizer(cld, acorn_passes)
                    temp_files.note(cld)
                coutput = utils.read_file(cld)

            coutput = coutput.replace('wakaUnknownBefore();', start_asm)
            after = 'wakaUnknownAfter'
            start = coutput.find(after)
            end = coutput.find(')', start)
            # If the closure comment to suppress useless code is present, we need to look one
            # brace past it, as the first is in there. Otherwise, the first brace is the
            # start of the function body (what we want).
            USELESS_CODE_COMMENT = '/** @suppress {uselessCode} */ '
            USELESS_CODE_COMMENT_BODY = 'uselessCode'
            brace = pre_2.find('{') + 1
            has_useless_code_comment = False
            if pre_2[brace:brace + len(USELESS_CODE_COMMENT_BODY
                                       )] == USELESS_CODE_COMMENT_BODY:
                brace = pre_2.find('{', brace) + 1
                has_useless_code_comment = True
            pre = coutput[:start] + '(' + (
                USELESS_CODE_COMMENT if has_useless_code_comment else
                '') + 'function(global,env,buffer) {\n' + pre_2[brace:]
            post = post_1 + end_asm + coutput[end + 1:]

    with ToolchainProfiler.profile_block('write_pre'):
        filename += '.jo.js'
        temp_files.note(filename)
        f = open(filename, 'w')
        f.write(pre)
        pre = None

    with ToolchainProfiler.profile_block('sort_or_concat'):
        if not just_concat:
            # sort functions by size, to make diffing easier and to improve aot times
            funcses = []
            for out_file in filenames:
                funcses.append(split_funcs(utils.read_file(out_file), False))
            funcs = [item for sublist in funcses for item in sublist]
            funcses = None
            if not os.environ.get('EMCC_NO_OPT_SORT'):
                funcs.sort(key=lambda x: (len(x[1]), x[0]), reverse=True)

            if 'last' in passes and len(funcs):
                count = funcs[0][1].count('\n')
                if count > 3000:
                    print(
                        'warning: Output contains some very large functions (%s lines in %s), consider building source files with -Os or -Oz)'
                        % (count, funcs[0][0]),
                        file=sys.stderr)

            for func in funcs:
                f.write(func[1])
            funcs = None
        else:
            # just concat the outputs
            for out_file in filenames:
                f.write(utils.read_file(out_file))

    with ToolchainProfiler.profile_block('write_post'):
        f.write('\n')
        f.write(post)
        # No need to write suffix: if there was one, it is inside post which exists when suffix is there
        f.write('\n')
        f.close()

    return filename
示例#9
0
# found in the LICENSE file.

"""Updates the arm_neon.h header taken from SIMDe
(https://github.com/simd-everywhere/simde) in system/include/neon
"""

import os
import subprocess
import sys

from os import path

sys.path.insert(1, path.dirname(path.dirname(path.abspath(__file__))))
from tools.shared import get_emscripten_temp_dir

tmpdir = get_emscripten_temp_dir()
emdir = path.join(path.dirname(path.realpath(__file__)), "..")


def main():
  if len(sys.argv) == 2:
    simde_dir = sys.argv[1]
  elif len(sys.argv) == 1:
    simde_dir = None
  else:
    print('''USAGE:
./simde_update.py [SIMDE_REPO_DIRECTORY]''')

  if not simde_dir:
    os.mkdir(path.join(tmpdir, "simde"))
    os.system("git clone [email protected]:simd-everywhere/simde " + path.join(tmpdir, "simde"))