Esempio n. 1
0
    def test_emconfig(self):
        restore_and_set_up()

        fd, custom_config_filename = tempfile.mkstemp(
            prefix='.emscripten_config_')

        orig_config = open(CONFIG_FILE, 'r').read()

        # Move the ~/.emscripten to a custom location.
        with os.fdopen(fd, "w") as f:
            f.write(orig_config)

        # Make a syntax error in the original config file so that attempting to access it would fail.
        open(CONFIG_FILE, 'w').write('asdfasdfasdfasdf\n\'\'\'' + orig_config)

        temp_dir = tempfile.mkdtemp(prefix='emscripten_temp_')

        with chdir(temp_dir):
            run_process([EMCC, '--em-config', custom_config_filename] +
                        MINIMAL_HELLO_WORLD + ['-O2'])
            result = run_js('a.out.js')

        self.assertContained('hello, world!', result)

        # Clean up created temp files.
        os.remove(custom_config_filename)
        if Settings.WASM_BACKEND:
            os.remove(custom_config_filename + "_sanity_wasm")
        else:
            os.remove(custom_config_filename + "_sanity")
        shutil.rmtree(temp_dir)
Esempio n. 2
0
def emscript(infile, settings, outfile):
  """Runs the emscripten LLVM-to-JS compiler.

  Args:
    infile: The path to the input LLVM assembly file.
    settings: JSON-formatted string of settings that overrides the values
      defined in src/settings.js.
    outfile: The file where the output is written.
  """
  settings_file = get_temp_file('.txt').name # Save settings to a file to work around v8 issue 1579
  s = open(settings_file, 'w')
  s.write(settings)
  s.close()
  compiler = path_from_root('src', 'compiler.js')
  shared.run_js(shared.COMPILER_ENGINE, compiler, [settings_file, infile], stdout=outfile, cwd=path_from_root('src'))
  outfile.close()
Esempio n. 3
0
def process_funcs(args):
  i, ll, settings_file, compiler, forwarded_file, libraries = args
  funcs_file = temp_files.get('.func_%d.ll' % i).name
  open(funcs_file, 'w').write(ll)
  out = shared.run_js(compiler, compiler_engine, [settings_file, funcs_file, 'funcs', forwarded_file] + libraries, stdout=subprocess.PIPE, cwd=path_from_root('src'))
  shared.try_delete(funcs_file)
  return out.split('//FORWARDED_DATA:')
Esempio n. 4
0
  def test_emconfig(self):
    restore_and_set_up()

    fd, custom_config_filename = tempfile.mkstemp(prefix='.emscripten_config_')

    orig_config = open(CONFIG_FILE, 'r').read()

    # Move the ~/.emscripten to a custom location.
    with os.fdopen(fd, "w") as f:
      f.write(orig_config)

    # Make a syntax error in the original config file so that attempting to access it would fail.
    open(CONFIG_FILE, 'w').write('asdfasdfasdfasdf\n\'\'\'' + orig_config)

    temp_dir = tempfile.mkdtemp(prefix='emscripten_temp_')

    with chdir(temp_dir):
      self.do([PYTHON, EMCC, '--em-config', custom_config_filename] + MINIMAL_HELLO_WORLD + ['-O2'])
      result = run_js('a.out.js')

    self.assertContained('hello, world!', result)

    # Clean up created temp files.
    os.remove(custom_config_filename)
    if Settings.WASM_BACKEND:
      os.remove(custom_config_filename + "_sanity_wasm")
    else:
      os.remove(custom_config_filename + "_sanity")
    shutil.rmtree(temp_dir)
Esempio n. 5
0
def process_funcs(args):
  i, funcs, meta, settings_file, compiler, forwarded_file, libraries = args
  ll = ''.join(funcs) + '\n' + meta
  funcs_file = temp_files.get('.func_%d.ll' % i).name
  open(funcs_file, 'w').write(ll)
  out = shared.run_js(compiler, compiler_engine, [settings_file, funcs_file, 'funcs', forwarded_file] + libraries, stdout=subprocess.PIPE, cwd=path_from_root('src'))
  shared.try_delete(funcs_file)
  return out
Esempio n. 6
0
def emscript(infile, settings, outfile, libraries=[]):
    """Runs the emscripten LLVM-to-JS compiler.

  Args:
    infile: The path to the input LLVM assembly file.
    settings: JSON-formatted string of settings that overrides the values
      defined in src/settings.js.
    outfile: The file where the output is written.
  """
    settings_file = temp_files.get(".txt").name  # Save settings to a file to work around v8 issue 1579
    s = open(settings_file, "w")
    s.write(settings)
    s.close()
    compiler = path_from_root("src", "compiler.js")
    shared.run_js(
        compiler, shared.COMPILER_ENGINE, [settings_file, infile] + libraries, stdout=outfile, cwd=path_from_root("src")
    )
    outfile.close()
Esempio n. 7
0
def emscript(infile, settings, outfile):
    """Runs the emscripten LLVM-to-JS compiler.

  Args:
    infile: The path to the input LLVM assembly file.
    settings: JSON-formatted string of settings that overrides the values
      defined in src/settings.js.
    outfile: The file where the output is written.
  """
    settings_file = temp_files.get(
        '.txt').name  # Save settings to a file to work around v8 issue 1579
    s = open(settings_file, 'w')
    s.write(settings)
    s.close()
    compiler = path_from_root('src', 'compiler.js')
    shared.run_js(compiler,
                  shared.COMPILER_ENGINE, [settings_file, infile],
                  stdout=outfile,
                  cwd=path_from_root('src'))
    outfile.close()
Esempio n. 8
0
def process_funcs(args):
    i, ll, settings_file, compiler, forwarded_file, libraries = args
    funcs_file = temp_files.get('.func_%d.ll' % i).name
    open(funcs_file, 'w').write(ll)
    out = shared.run_js(compiler,
                        compiler_engine,
                        [settings_file, funcs_file, 'funcs', forwarded_file] +
                        libraries,
                        stdout=subprocess.PIPE,
                        cwd=path_from_root('src'))
    shared.try_delete(funcs_file)
    return out.split('//FORWARDED_DATA:')
Esempio n. 9
0
  def test_emcc_caching(self):
    INCLUDING_MESSAGE = 'including X'
    BUILDING_MESSAGE = 'building X for cache'
    ERASING_MESSAGE = 'clearing cache'

    EMCC_CACHE = Cache.dirname

    restore_and_set_up()

    Cache.erase()
    assert not os.path.exists(EMCC_CACHE)

    with env_modify({'EMCC_DEBUG': '1'}):
      # Building a file that *does* need something *should* trigger cache
      # generation, but only the first time
      for filename, libname in [('hello_libcxx.cpp', 'libcxx')]:
        for i in range(3):
          print(filename, libname, i)
          self.clear()
          output = self.do([EMCC, '-O' + str(i), '-s', '--llvm-lto', '0', path_from_root('tests', filename), '--save-bc', 'a.bc', '-s', 'DISABLE_EXCEPTION_CATCHING=0'])
          # print '\n\n\n', output
          assert INCLUDING_MESSAGE.replace('X', libname) in output
          if libname == 'libc':
            assert INCLUDING_MESSAGE.replace('X', 'libcxx') not in output # we don't need libcxx in this code
          else:
            assert INCLUDING_MESSAGE.replace('X', 'libc') in output # libcxx always forces inclusion of libc
          assert (BUILDING_MESSAGE.replace('X', libname) in output) == (i == 0), 'Must only build the first time'
          self.assertContained('hello, world!', run_js('a.out.js'))
          assert os.path.exists(EMCC_CACHE)
          full_libname = libname + '.bc' if libname != 'libcxx' else libname + '.a'
          assert os.path.exists(os.path.join(EMCC_CACHE, full_libname))

    try_delete(CANONICAL_TEMP_DIR)
    restore_and_set_up()

    def ensure_cache():
      self.do([PYTHON, EMCC, '-O2', path_from_root('tests', 'hello_world.c')])

    # Manual cache clearing
    ensure_cache()
    self.assertTrue(os.path.exists(EMCC_CACHE))
    output = self.do([PYTHON, EMCC, '--clear-cache'])
    self.assertIn(ERASING_MESSAGE, output)
    self.assertFalse(os.path.exists(EMCC_CACHE))
    self.assertIn(SANITY_MESSAGE, output)

    # Changing LLVM_ROOT, even without altering .emscripten, clears the cache
    ensure_cache()
    with env_modify({'LLVM': 'waka'}):
      self.assertTrue(os.path.exists(EMCC_CACHE))
      output = self.do([PYTHON, EMCC])
      self.assertIn(ERASING_MESSAGE, output)
      self.assertFalse(os.path.exists(EMCC_CACHE))
Esempio n. 10
0
def process_funcs(args):
    i, funcs, meta, settings_file, compiler, forwarded_file, libraries = args
    ll = ''.join(funcs) + '\n' + meta
    funcs_file = temp_files.get('.func_%d.ll' % i).name
    open(funcs_file, 'w').write(ll)
    out = shared.run_js(compiler,
                        compiler_engine,
                        [settings_file, funcs_file, 'funcs', forwarded_file] +
                        libraries,
                        stdout=subprocess.PIPE,
                        cwd=path_from_root('src'))
    shared.try_delete(funcs_file)
    return out
Esempio n. 11
0
    def test_emcc_caching(self):
        BUILDING_MESSAGE = 'generating system library: X'
        ERASING_MESSAGE = 'clearing cache'

        restore_and_set_up()
        self.erase_cache()

        # Building a file that *does* need something *should* trigger cache
        # generation, but only the first time
        libname = 'libc++'
        for i in range(3):
            print(i)
            self.clear()
            output = self.do([
                EMCC, '-O' + str(i), '-s', '--llvm-lto', '0',
                path_from_root('tests', 'hello_libcxx.cpp'), '--save-bc',
                'a.bc', '-s', 'DISABLE_EXCEPTION_CATCHING=0'
            ])
            print('\n\n\n', output)
            assert (BUILDING_MESSAGE.replace('X', libname)
                    in output) == (i == 0), 'Must only build the first time'
            self.assertContained('hello, world!', run_js('a.out.js'))
            self.assertExists(Cache.dirname)
            full_libname = libname + '.bc' if libname != 'libc++' else libname + '.a'
            self.assertExists(os.path.join(Cache.dirname, full_libname))

        restore_and_set_up()

        # Manual cache clearing
        self.ensure_cache()
        self.assertTrue(os.path.exists(Cache.dirname))
        self.assertTrue(os.path.exists(Cache.root_dirname))
        output = self.do([PYTHON, EMCC, '--clear-cache'])
        self.assertIn(ERASING_MESSAGE, output)
        self.assertFalse(os.path.exists(Cache.dirname))
        self.assertFalse(os.path.exists(Cache.root_dirname))
        self.assertIn(SANITY_MESSAGE, output)

        # Changing LLVM_ROOT, even without altering .emscripten, clears the cache
        self.ensure_cache()
        make_fake_clang(path_from_root('tests', 'fake', 'bin', 'clang'),
                        expected_llvm_version())
        make_fake_llc(path_from_root('tests', 'fake', 'bin', 'llc'),
                      'js - JavaScript (asm.js, emscripten)')
        with env_modify({'LLVM': path_from_root('tests', 'fake', 'bin')}):
            self.assertTrue(os.path.exists(Cache.dirname))
            output = self.do([PYTHON, EMCC])
            self.assertIn(ERASING_MESSAGE, output)
            self.assertFalse(os.path.exists(Cache.dirname))
Esempio n. 12
0
 def test():
     self.assertContained('hello, world!', run_js('a.out.js'))
Esempio n. 13
0
def emscript(infile, settings, outfile, libraries=[]):
  """Runs the emscripten LLVM-to-JS compiler. We parallelize as much as possible

  Args:
    infile: The path to the input LLVM assembly file.
    settings: JSON-formatted settings that override the values
      defined in src/settings.js.
    outfile: The file where the output is written.
  """

  compiler = path_from_root('src', 'compiler.js')

  # Parallelization: We run 3 phases:
  #   1 aka 'pre'  : Process types and metadata and so forth, and generate the preamble.
  #   2 aka 'funcs': Process functions. We can parallelize this, working on each function independently.
  #   3 aka 'post' : Process globals, generate postamble and finishing touches.

  if DEBUG: print >> sys.stderr, 'emscript: ll=>js'

  if jcache: shared.JCache.ensure()

  # Pre-scan ll and alter settings as necessary
  if DEBUG: t = time.time()
  ll = open(infile).read()
  scan(ll, settings)
  total_ll_size = len(ll)
  ll = None # allow collection
  if DEBUG: print >> sys.stderr, '  emscript: scan took %s seconds' % (time.time() - t)

  # Split input into the relevant parts for each phase
  pre = []
  funcs = [] # split up functions here, for parallelism later
  func_idents = []
  meta = [] # needed by each function XXX

  if DEBUG: t = time.time()
  in_func = False
  ll_lines = open(infile).readlines()
  for line in ll_lines:
    if in_func:
      funcs[-1][1].append(line)
      if line.startswith('}'):
        in_func = False
        funcs[-1] = (funcs[-1][0], ''.join(funcs[-1][1]))
        pre.append(line) # pre needs it to, so we know about all implemented functions
    else:
      if line.startswith(';'): continue
      if line.startswith('define '):
        in_func = True
        funcs.append((line, [line])) # use the entire line as the identifier
        pre.append(line) # pre needs it to, so we know about all implemented functions
      elif line.find(' = type { ') > 0:
        pre.append(line) # type
      elif line.startswith('!'):
        if line.startswith('!llvm.module'): continue # we can ignore that
        meta.append(line) # metadata
      else:
        pre.append(line) # pre needs it so we know about globals in pre and funcs. So emit globals there
  ll_lines = None
  meta = ''.join(meta)
  if DEBUG and len(meta) > 1024*1024: print >> sys.stderr, 'emscript warning: large amounts of metadata, will slow things down'
  if DEBUG: print >> sys.stderr, '  emscript: split took %s seconds' % (time.time() - t)

  #if DEBUG:
  #  print >> sys.stderr, '========= pre ================\n'
  #  print >> sys.stderr, ''.join(pre)
  #  print >> sys.stderr, '========== funcs ===============\n'
  #  for func in funcs:
  #    print >> sys.stderr, '\n// ===\n\n', ''.join(func)
  #  print >> sys.stderr, '=========================\n'

  # Save settings to a file to work around v8 issue 1579
  settings_file = temp_files.get('.txt').name
  def save_settings():
    global settings_text
    settings_text = json.dumps(settings)
    s = open(settings_file, 'w')
    s.write(settings_text)
    s.close()
  save_settings()

  # Phase 1 - pre
  if DEBUG: t = time.time()
  pre_file = temp_files.get('.pre.ll').name
  pre_input = ''.join(pre) + '\n' + meta
  out = None
  if jcache:
    keys = [pre_input, settings_text, ','.join(libraries)]
    shortkey = shared.JCache.get_shortkey(keys)
    out = shared.JCache.get(shortkey, keys)
    if out and DEBUG: print >> sys.stderr, '  loading pre from jcache'
  if not out:
    open(pre_file, 'w').write(pre_input)
    out = shared.run_js(compiler, shared.COMPILER_ENGINE, [settings_file, pre_file, 'pre'] + libraries, stdout=subprocess.PIPE, cwd=path_from_root('src'))
    if jcache:
      if DEBUG: print >> sys.stderr, '  saving pre to jcache'
      shared.JCache.set(shortkey, keys, out)
  pre, forwarded_data = out.split('//FORWARDED_DATA:')
  forwarded_file = temp_files.get('.json').name
  open(forwarded_file, 'w').write(forwarded_data)
  if DEBUG: print >> sys.stderr, '  emscript: phase 1 took %s seconds' % (time.time() - t)

  # Phase 2 - func

  cores = multiprocessing.cpu_count()
  assert cores >= 1
  if cores > 1:
    intended_num_chunks = cores * NUM_CHUNKS_PER_CORE
    chunk_size = max(MIN_CHUNK_SIZE, total_ll_size / intended_num_chunks)
    chunk_size += 3*len(meta) # keep ratio of lots of function code to meta (expensive to process, and done in each parallel task)
    chunk_size = min(MAX_CHUNK_SIZE, chunk_size)
  else:
    chunk_size = MAX_CHUNK_SIZE # if 1 core, just use the max chunk size

  if DEBUG: t = time.time()
  forwarded_json = json.loads(forwarded_data)
  indexed_functions = set()
  if settings.get('ASM_JS'):
    settings['EXPORTED_FUNCTIONS'] = forwarded_json['EXPORTED_FUNCTIONS']
    save_settings()

  chunks = shared.JCache.chunkify(funcs, chunk_size, 'emscript_files' if jcache else None)

  if jcache:
    # load chunks from cache where we can # TODO: ignore small chunks
    cached_outputs = []
    def load_from_cache(chunk):
      keys = [settings_text, forwarded_data, chunk]
      shortkey = shared.JCache.get_shortkey(keys) # TODO: share shortkeys with later code
      out = shared.JCache.get(shortkey, keys) # this is relatively expensive (pickling?)
      if out:
        cached_outputs.append(out)
        return False
      return True
    chunks = filter(load_from_cache, chunks)
    if len(cached_outputs) > 0:
      if out and DEBUG: print >> sys.stderr, '  loading %d funcchunks from jcache' % len(cached_outputs)
    else:
      cached_outputs = []

  # TODO: minimize size of forwarded data from funcs to what we actually need

  if cores == 1 and total_ll_size < MAX_CHUNK_SIZE: assert len(chunks) == 1, 'no point in splitting up without multiple cores'

  if len(chunks) > 0:
    if DEBUG: print >> sys.stderr, '  emscript: phase 2 working on %d chunks %s (intended chunk size: %.2f MB, meta: %.2f MB, forwarded: %.2f MB, total: %.2f MB)' % (len(chunks), ('using %d cores' % cores) if len(chunks) > 1 else '', chunk_size/(1024*1024.), len(meta)/(1024*1024.), len(forwarded_data)/(1024*1024.), total_ll_size/(1024*1024.))

    commands = [(i, chunks[i], meta, settings_file, compiler, forwarded_file, libraries) for i in range(len(chunks))]

    if len(chunks) > 1:
      pool = multiprocessing.Pool(processes=cores)
      outputs = pool.map(process_funcs, commands, chunksize=1)
    elif len(chunks) == 1:
      outputs = [process_funcs(commands[0])]
  else:
    outputs = []

  if jcache:
    # save chunks to cache
    for i in range(len(chunks)):
      chunk = chunks[i]
      keys = [settings_text, forwarded_data, chunk]
      shortkey = shared.JCache.get_shortkey(keys)
      shared.JCache.set(shortkey, keys, outputs[i])
    if out and DEBUG and len(chunks) > 0: print >> sys.stderr, '  saving %d funcchunks to jcache' % len(chunks)

  if jcache: outputs += cached_outputs # TODO: preserve order

  outputs = [output.split('//FORWARDED_DATA:') for output in outputs]

  if DEBUG: print >> sys.stderr, '  emscript: phase 2 took %s seconds' % (time.time() - t)
  if DEBUG: t = time.time()

  # merge forwarded data
  if settings.get('ASM_JS'):
    all_exported_functions = set(settings['EXPORTED_FUNCTIONS']) # both asm.js and otherwise
    for additional_export in ['_malloc', '_free']: # additional functions to export from asm, if they are implemented
      all_exported_functions.add(additional_export)
    exported_implemented_functions = set()
  for func_js, curr_forwarded_data in outputs:
    curr_forwarded_json = json.loads(curr_forwarded_data)
    forwarded_json['Types']['preciseI64MathUsed'] = forwarded_json['Types']['preciseI64MathUsed'] or curr_forwarded_json['Types']['preciseI64MathUsed']
    for key, value in curr_forwarded_json['Functions']['blockAddresses'].iteritems():
      forwarded_json['Functions']['blockAddresses'][key] = value
    for key in curr_forwarded_json['Functions']['indexedFunctions'].iterkeys():
      indexed_functions.add(key)
    if settings.get('ASM_JS'):
      for key in curr_forwarded_json['Functions']['implementedFunctions'].iterkeys():
        if key in all_exported_functions: exported_implemented_functions.add(key)
    for key, value in curr_forwarded_json['Functions']['unimplementedFunctions'].iteritems():
      forwarded_json['Functions']['unimplementedFunctions'][key] = value

  if settings.get('ASM_JS'):
    parts = pre.split('// ASM_LIBRARY FUNCTIONS\n')
    if len(parts) > 1:
      pre = parts[0]
      outputs.append([parts[1]])
  funcs_js = ''.join([output[0] for output in outputs])

  outputs = None
  if DEBUG: print >> sys.stderr, '  emscript: phase 2b took %s seconds' % (time.time() - t)
  if DEBUG: t = time.time()

  # calculations on merged forwarded data
  forwarded_json['Functions']['indexedFunctions'] = {}
  i = 2
  for indexed in indexed_functions:
    #print >> sys.stderr, 'indaxx', indexed, i
    forwarded_json['Functions']['indexedFunctions'][indexed] = i # make sure not to modify this python object later - we use it in indexize
    i += 2
  forwarded_json['Functions']['nextIndex'] = i

  indexing = forwarded_json['Functions']['indexedFunctions']
  def indexize(js):
    return re.sub(r"'{{ FI_([\w\d_$]+) }}'", lambda m: str(indexing.get(m.groups(0)[0]) or 0), js)

  blockaddrs = forwarded_json['Functions']['blockAddresses']
  def blockaddrsize(js):
    return re.sub(r'{{{ BA_([\w\d_$]+)\|([\w\d_$]+) }}}', lambda m: str(blockaddrs[m.groups(0)[0]][m.groups(0)[1]]), js)

  #if DEBUG: outfile.write('// pre\n')
  outfile.write(blockaddrsize(indexize(pre)))
  pre = None

  #if DEBUG: outfile.write('// funcs\n')

  # forward
  forwarded_data = json.dumps(forwarded_json)
  forwarded_file = temp_files.get('.2.json').name
  open(forwarded_file, 'w').write(indexize(forwarded_data))
  if DEBUG: print >> sys.stderr, '  emscript: phase 2c took %s seconds' % (time.time() - t)

  # Phase 3 - post
  if DEBUG: t = time.time()
  post_file = temp_files.get('.post.ll').name
  open(post_file, 'w').write('\n') # no input, just processing of forwarded data
  out = shared.run_js(compiler, shared.COMPILER_ENGINE, [settings_file, post_file, 'post', forwarded_file] + libraries, stdout=subprocess.PIPE, cwd=path_from_root('src'))
  post, last_forwarded_data = out.split('//FORWARDED_DATA:')
  last_forwarded_json = json.loads(last_forwarded_data)

  if settings.get('ASM_JS'):
    simple = os.environ.get('EMCC_SIMPLE_ASM')
    class Counter:
      i = 0
    pre_tables = last_forwarded_json['Functions']['tables']['pre']
    del last_forwarded_json['Functions']['tables']['pre']

    # Find function table calls without function tables generated for them
    for use in set(re.findall(r'{{{ FTM_[\w\d_$]+ }}}', funcs_js)):
      sig = use[8:len(use)-4]
      if sig not in last_forwarded_json['Functions']['tables']:
        if DEBUG: print >> sys.stderr, 'add empty function table', sig
        last_forwarded_json['Functions']['tables'][sig] = 'var FUNCTION_TABLE_' + sig + ' = [0,0];\n'

    def make_table(sig, raw):
      i = Counter.i
      Counter.i += 1
      bad = 'b' + str(i)
      params = ','.join(['p%d' % p for p in range(len(sig)-1)])
      coercions = ';'.join(['p%d = %sp%d%s' % (p, '+' if sig[p+1] != 'i' else '', p, '' if sig[p+1] != 'i' else '|0') for p in range(len(sig)-1)]) + ';'
      ret = '' if sig[0] == 'v' else ('return %s0' % ('+' if sig[0] != 'i' else ''))
      return ('function %s(%s) { %s abort(%d); %s };' % (bad, params, coercions, i, ret), raw.replace('[0,', '[' + bad + ',').replace(',0,', ',' + bad + ',').replace(',0,', ',' + bad + ',').replace(',0]', ',' + bad + ']').replace(',0]', ',' + bad + ']'))
    infos = [make_table(sig, raw) for sig, raw in last_forwarded_json['Functions']['tables'].iteritems()]
    function_tables_defs = '\n'.join([info[0] for info in infos] + [info[1] for info in infos])

    maths = ['Math.' + func for func in ['floor', 'abs', 'sqrt', 'pow', 'cos', 'sin', 'tan', 'acos', 'asin', 'atan', 'atan2', 'exp', 'log', 'ceil']]
    if settings['USE_MATH_IMUL']:
      maths += ['Math.imul']
    fundamentals = ['Math', 'Int8Array', 'Int16Array', 'Int32Array', 'Uint8Array', 'Uint16Array', 'Uint32Array', 'Float32Array', 'Float64Array']
    math_envs = ['Runtime.bitshift64', 'Math.min'] # TODO: move min to maths
    asm_setup = '\n'.join(['var %s = %s;' % (f.replace('.', '_'), f) for f in math_envs])
    basic_funcs = ['abort', 'assert', 'asmPrintInt', 'asmPrintFloat'] + [m.replace('.', '_') for m in math_envs]
    basic_vars = ['STACKTOP', 'STACK_MAX', 'tempDoublePtr', 'ABORT']
    basic_float_vars = ['NaN', 'Infinity']
    if forwarded_json['Types']['preciseI64MathUsed']:
      basic_funcs += ['i64Math_' + op for op in ['add', 'subtract', 'multiply', 'divide', 'modulo']]
      asm_setup += '''
var i64Math_add = function(a, b, c, d) { i64Math.add(a, b, c, d) };
var i64Math_subtract = function(a, b, c, d) { i64Math.subtract(a, b, c, d) };
var i64Math_multiply = function(a, b, c, d) { i64Math.multiply(a, b, c, d) };
var i64Math_divide = function(a, b, c, d, e) { i64Math.divide(a, b, c, d, e) };
var i64Math_modulo = function(a, b, c, d, e) { i64Math.modulo(a, b, c, d, e) };
'''
    asm_runtime_funcs = ['stackAlloc', 'stackSave', 'stackRestore', 'setThrew'] + ['setTempRet%d' % i for i in range(10)]
    # function tables
    def asm_coerce(value, sig):
      if sig == 'v': return value
      return ('+' if sig != 'i' else '') + value + ('|0' if sig == 'i' else '')
        
    function_tables = ['dynCall_' + table for table in last_forwarded_json['Functions']['tables']]
    function_tables_impls = []
    for sig in last_forwarded_json['Functions']['tables'].iterkeys():
      args = ','.join(['a' + str(i) for i in range(1, len(sig))])
      arg_coercions = ' '.join(['a' + str(i) + '=' + asm_coerce('a' + str(i), sig[i]) + ';' for i in range(1, len(sig))])
      coerced_args = ','.join([asm_coerce('a' + str(i), sig[i]) for i in range(1, len(sig))])
      ret = ('return ' if sig[0] != 'v' else '') + asm_coerce('FUNCTION_TABLE_%s[index&{{{ FTM_%s }}}](%s)' % (sig, sig, coerced_args), sig[0])
      function_tables_impls.append('''
  function dynCall_%s(index%s%s) {
    index = index|0;
    %s
    %s;
  }
''' % (sig, ',' if len(sig) > 1 else '', args, arg_coercions, ret))

    # calculate exports
    exported_implemented_functions = list(exported_implemented_functions)
    exports = []
    if not simple:
      for export in exported_implemented_functions + asm_runtime_funcs + function_tables:
        exports.append("%s: %s" % (export, export))
      exports = '{ ' + ', '.join(exports) + ' }'
    else:
      exports = '_main'
    # calculate globals
    try:
      del forwarded_json['Variables']['globals']['_llvm_global_ctors'] # not a true variable
    except:
      pass
    # If no named globals, only need externals
    global_vars = map(lambda g: g['name'], filter(lambda g: settings['NAMED_GLOBALS'] or g.get('external') or g.get('unIndexable'), forwarded_json['Variables']['globals'].values()))
    global_funcs = ['_' + x for x in forwarded_json['Functions']['libraryFunctions'].keys()]
    def math_fix(g):
      return g if not g.startswith('Math_') else g.split('_')[1];
    asm_global_funcs = ''.join(['  var ' + g.replace('.', '_') + '=global.' + g + ';\n' for g in maths]) + \
                       ''.join(['  var ' + g + '=env.' + math_fix(g) + ';\n' for g in basic_funcs + global_funcs])
    asm_global_vars = ''.join(['  var ' + g + '=env.' + g + '|0;\n' for g in basic_vars + global_vars]) + \
                      ''.join(['  var ' + g + '=+env.' + g + ';\n' for g in basic_float_vars])
    # sent data
    the_global = '{ ' + ', '.join([math_fix(s) + ': ' + s for s in fundamentals]) + ' }'
    sending = '{ ' + ', '.join([math_fix(s) + ': ' + s for s in basic_funcs + global_funcs + basic_vars + basic_float_vars + global_vars]) + ' }'
    # received
    if not simple:
      receiving = ';\n'.join(['var ' + s + ' = Module["' + s + '"] = asm.' + s for s in exported_implemented_functions + function_tables])
    else:
      receiving = 'var _main = Module["_main"] = asm;'
    # finalize
    funcs_js = '''
%s
function asmPrintInt(x) {
  Module.print('int ' + x);// + ' ' + new Error().stack);
}
function asmPrintFloat(x) {
  Module.print('float ' + x);// + ' ' + new Error().stack);
}
var asm = (function(global, env, buffer) {
  'use asm';
  var HEAP8 = new global.Int8Array(buffer);
  var HEAP16 = new global.Int16Array(buffer);
  var HEAP32 = new global.Int32Array(buffer);
  var HEAPU8 = new global.Uint8Array(buffer);
  var HEAPU16 = new global.Uint16Array(buffer);
  var HEAPU32 = new global.Uint32Array(buffer);
  var HEAPF32 = new global.Float32Array(buffer);
  var HEAPF64 = new global.Float64Array(buffer);
''' % (asm_setup,) + '\n' + asm_global_vars + '''
  var __THREW__ = 0;
  var undef = 0;
  var tempInt = 0, tempBigInt = 0, tempBigIntP = 0, tempBigIntS = 0, tempBigIntR = 0.0, tempBigIntI = 0, tempBigIntD = 0, tempValue = 0, tempDouble = 0.0;
''' + ''.join(['''
  var tempRet%d = 0;''' % i for i in range(10)]) + '\n' + asm_global_funcs + '''
  function stackAlloc(size) {
    size = size|0;
    var ret = 0;
    ret = STACKTOP;
    STACKTOP = (STACKTOP + size)|0;
    STACKTOP = ((STACKTOP + 3)>>2)<<2;
    return ret|0;
  }
  function stackSave() {
    return STACKTOP|0;
  }
  function stackRestore(top) {
    top = top|0;
    STACKTOP = top;
  }
  function setThrew(threw) {
    threw = threw|0;
    __THREW__ = threw;
  }
''' + ''.join(['''
  function setTempRet%d(value) {
    value = value|0;
    tempRet%d = value;
  }
''' % (i, i) for i in range(10)]) + funcs_js.replace('\n', '\n  ') + '''

  %s

  return %s;
})(%s, %s, buffer);
%s;
Runtime.stackAlloc = function(size) { return asm.stackAlloc(size) };
Runtime.stackSave = function() { return asm.stackSave() };
Runtime.stackRestore = function(top) { asm.stackRestore(top) };
''' % (pre_tables + '\n'.join(function_tables_impls) + '\n' + function_tables_defs.replace('\n', '\n  '), exports, the_global, sending, receiving)

    # Set function table masks
    def function_table_maskize(js):
      masks = {}
      default = None
      for sig, table in last_forwarded_json['Functions']['tables'].iteritems():
        masks[sig] = str(table.count(','))
        default = sig
      def fix(m):
        sig = m.groups(0)[0]
        return masks[sig]
      return re.sub(r'{{{ FTM_([\w\d_$]+) }}}', lambda m: fix(m), js) # masks[m.groups(0)[0]]
    funcs_js = function_table_maskize(funcs_js)
  else:
    function_tables_defs = '\n'.join([table for table in last_forwarded_json['Functions']['tables'].itervalues()])
    outfile.write(function_tables_defs)
  outfile.write(blockaddrsize(indexize(funcs_js)))
  funcs_js = None

  outfile.write(indexize(post))
  if DEBUG: print >> sys.stderr, '  emscript: phase 3 took %s seconds' % (time.time() - t)

  outfile.close()
Esempio n. 14
0
 def test():
   self.assertContained('hello, world!', run_js('a.out.js'))
def inspect_code(headers, cpp_opts, structs, defines):
    show('Generating C code...')

    code = ['#include <stdio.h>', '#include <stddef.h>']
    # Include all the needed headers.
    for path in headers:
        code.append('#include "' + path + '"')

    code.append('int main() {')
    c_descent('structs', code)
    for name, struct in structs.items():
        gen_inspect_code([name], struct, code)

    c_ascent(code)
    c_descent('defines', code)
    for name, type_ in defines.items():
        # Add the necessary python type, if missing.
        if '%' not in type_:
            if type_[-1] in ('d', 'i', 'u'):
                # integer
                type_ = 'i%' + type_
            elif type_[-1] in ('f', 'F', 'e', 'E', 'g', 'G'):
                # float
                type_ = 'f%' + type_
            elif type_[-1] in ('x', 'X', 'a', 'A', 'c', 's'):
                # hexadecimal or string
                type_ = 's%' + type_

        c_set(name, type_, name, code)

    code.append('return 0;')
    code.append('}')

    # Write the source code to a temporary file.
    src_file = tempfile.mkstemp('.c')
    js_file = tempfile.mkstemp('.js')

    os.write(src_file[0], '\n'.join(code))

    # Close all unneeded FDs.
    os.close(src_file[0])
    os.close(js_file[0])

    # Remove dangerous env modifications
    safe_env = os.environ.copy()
    for opt in ['EMCC_FORCE_STDLIBS', 'EMCC_ONLY_FORCED_STDLIBS']:
        if opt in safe_env:
            del safe_env[opt]

    # Use binaryen, if necessary
    binaryen = os.environ.get('EMCC_WASM_BACKEND_BINARYEN')
    if binaryen:
        cpp_opts += ['-s', 'BINARYEN=1']

    info = []

    try:
        try:
            # Compile the program.
            show('Compiling generated code...')
            subprocess.check_call(
                [shared.PYTHON, shared.EMCC] + cpp_opts + [
                    '-o', js_file[1], src_file[1], '-s',
                    'BOOTSTRAPPING_STRUCT_INFO=1', '-s',
                    'WARN_ON_UNDEFINED_SYMBOLS=0', '-Oz', '--js-opts', '0',
                    '--memory-init-file', '0'
                ],
                env=safe_env
            )  # -Oz optimizes enough to avoid warnings on code size/num locals
        except:
            sys.stderr.write('FAIL: Compilation failed!\n')
            sys.exit(1)

        # Run the compiled program.
        show('Calling generated program...')
        try:
            info = shared.run_js(js_file[1]).splitlines()
        except subprocess.CalledProcessError:
            sys.stderr.write('FAIL: Running the generated program failed!\n')
            sys.exit(1)

    finally:
        # Remove all temporary files.
        os.unlink(src_file[1])

        if os.path.exists(js_file[1]):
            os.unlink(js_file[1])

    # Parse the output of the program into a dict.
    return parse_c_output(info)
Esempio n. 16
0
def emscript(infile, settings, outfile, libraries=[]):
    """Runs the emscripten LLVM-to-JS compiler. We parallelize as much as possible

  Args:
    infile: The path to the input LLVM assembly file.
    settings: JSON-formatted settings that override the values
      defined in src/settings.js.
    outfile: The file where the output is written.
  """

    compiler = path_from_root('src', 'compiler.js')

    # Parallelization: We run 3 phases:
    #   1 aka 'pre'  : Process types and metadata and so forth, and generate the preamble.
    #   2 aka 'funcs': Process functions. We can parallelize this, working on each function independently.
    #   3 aka 'post' : Process globals, generate postamble and finishing touches.

    if DEBUG: print >> sys.stderr, 'emscript: ll=>js'

    if jcache: shared.JCache.ensure()

    # Pre-scan ll and alter settings as necessary
    if DEBUG: t = time.time()
    ll = open(infile).read()
    scan(ll, settings)
    total_ll_size = len(ll)
    ll = None  # allow collection
    if DEBUG:
        print >> sys.stderr, '  emscript: scan took %s seconds' % (
            time.time() - t)

    # Split input into the relevant parts for each phase
    pre = []
    funcs = []  # split up functions here, for parallelism later
    func_idents = []
    meta = []  # needed by each function XXX

    if DEBUG: t = time.time()
    in_func = False
    ll_lines = open(infile).readlines()
    for line in ll_lines:
        if in_func:
            funcs[-1][1].append(line)
            if line.startswith('}'):
                in_func = False
                funcs[-1] = (funcs[-1][0], ''.join(funcs[-1][1]))
                pre.append(
                    line
                )  # pre needs it to, so we know about all implemented functions
        else:
            if line.startswith(';'): continue
            if line.startswith('define '):
                in_func = True
                funcs.append(
                    (line, [line]))  # use the entire line as the identifier
                pre.append(
                    line
                )  # pre needs it to, so we know about all implemented functions
            elif line.find(' = type { ') > 0:
                pre.append(line)  # type
            elif line.startswith('!'):
                meta.append(line)  # metadata
            else:
                pre.append(
                    line
                )  # pre needs it so we know about globals in pre and funcs. So emit globals there
    ll_lines = None
    meta = ''.join(meta)
    if DEBUG and len(meta) > 1024 * 1024:
        print >> sys.stderr, 'emscript warning: large amounts of metadata, will slow things down'
    if DEBUG:
        print >> sys.stderr, '  emscript: split took %s seconds' % (
            time.time() - t)

    #if DEBUG:
    #  print >> sys.stderr, '========= pre ================\n'
    #  print >> sys.stderr, ''.join(pre)
    #  print >> sys.stderr, '========== funcs ===============\n'
    #  for func in funcs:
    #    print >> sys.stderr, '\n// ===\n\n', ''.join(func)
    #  print >> sys.stderr, '=========================\n'

    # Save settings to a file to work around v8 issue 1579
    settings_file = temp_files.get('.txt').name
    settings_text = json.dumps(settings)
    s = open(settings_file, 'w')
    s.write(settings_text)
    s.close()

    # Phase 1 - pre
    if DEBUG: t = time.time()
    pre_file = temp_files.get('.pre.ll').name
    pre_input = ''.join(pre) + '\n' + meta
    out = None
    if jcache:
        keys = [pre_input, settings_text, ','.join(libraries)]
        shortkey = shared.JCache.get_shortkey(keys)
        out = shared.JCache.get(shortkey, keys)
        if out and DEBUG: print >> sys.stderr, '  loading pre from jcache'
    if not out:
        open(pre_file, 'w').write(pre_input)
        out = shared.run_js(compiler,
                            shared.COMPILER_ENGINE,
                            [settings_file, pre_file, 'pre'] + libraries,
                            stdout=subprocess.PIPE,
                            cwd=path_from_root('src'))
        if jcache:
            if DEBUG: print >> sys.stderr, '  saving pre to jcache'
            shared.JCache.set(shortkey, keys, out)
    pre, forwarded_data = out.split('//FORWARDED_DATA:')
    forwarded_file = temp_files.get('.json').name
    open(forwarded_file, 'w').write(forwarded_data)
    if DEBUG:
        print >> sys.stderr, '  emscript: phase 1 took %s seconds' % (
            time.time() - t)

    # Phase 2 - func

    cores = multiprocessing.cpu_count()
    assert cores >= 1
    if cores > 1:
        intended_num_chunks = cores * NUM_CHUNKS_PER_CORE
        chunk_size = max(MIN_CHUNK_SIZE, total_ll_size / intended_num_chunks)
        chunk_size += 3 * len(
            meta
        )  # keep ratio of lots of function code to meta (expensive to process, and done in each parallel task)
        chunk_size = min(MAX_CHUNK_SIZE, chunk_size)
    else:
        chunk_size = MAX_CHUNK_SIZE  # if 1 core, just use the max chunk size

    if DEBUG: t = time.time()
    forwarded_json = json.loads(forwarded_data)
    indexed_functions = set()

    chunks = shared.JCache.chunkify(funcs, chunk_size,
                                    'emscript_files' if jcache else None)

    if jcache:
        # load chunks from cache where we can # TODO: ignore small chunks
        cached_outputs = []

        def load_from_cache(chunk):
            keys = [settings_text, forwarded_data, chunk]
            shortkey = shared.JCache.get_shortkey(
                keys)  # TODO: share shortkeys with later code
            out = shared.JCache.get(
                shortkey, keys)  # this is relatively expensive (pickling?)
            if out:
                cached_outputs.append(out)
                return False
            return True

        chunks = filter(load_from_cache, chunks)
        if len(cached_outputs) > 0:
            if out and DEBUG:
                print >> sys.stderr, '  loading %d funcchunks from jcache' % len(
                    cached_outputs)
        else:
            cached_outputs = []

    # TODO: minimize size of forwarded data from funcs to what we actually need

    if cores == 1 and total_ll_size < MAX_CHUNK_SIZE:
        assert len(
            chunks) == 1, 'no point in splitting up without multiple cores'

    if len(chunks) > 0:
        if DEBUG:
            print >> sys.stderr, '  emscript: phase 2 working on %d chunks %s (intended chunk size: %.2f MB, meta: %.2f MB, forwarded: %.2f MB, total: %.2f MB)' % (
                len(chunks),
                ('using %d cores' % cores) if len(chunks) > 1 else '',
                chunk_size / (1024 * 1024.), len(meta) /
                (1024 * 1024.), len(forwarded_data) /
                (1024 * 1024.), total_ll_size / (1024 * 1024.))

        commands = [(i, chunks[i], meta, settings_file, compiler,
                     forwarded_file, libraries) for i in range(len(chunks))]

        if len(chunks) > 1:
            pool = multiprocessing.Pool(processes=cores)
            outputs = pool.map(process_funcs, commands, chunksize=1)
        elif len(chunks) == 1:
            outputs = [process_funcs(commands[0])]
    else:
        outputs = []

    if jcache:
        # save chunks to cache
        for i in range(len(chunks)):
            chunk = chunks[i]
            keys = [settings_text, forwarded_data, chunk]
            shortkey = shared.JCache.get_shortkey(keys)
            shared.JCache.set(shortkey, keys, outputs[i])
        if out and DEBUG and len(chunks) > 0:
            print >> sys.stderr, '  saving %d funcchunks to jcache' % len(
                chunks)

    if jcache: outputs += cached_outputs  # TODO: preserve order

    outputs = [output.split('//FORWARDED_DATA:') for output in outputs]

    if DEBUG:
        print >> sys.stderr, '  emscript: phase 2 took %s seconds' % (
            time.time() - t)
    if DEBUG: t = time.time()

    funcs_js = ''.join([output[0] for output in outputs])

    for func_js, curr_forwarded_data in outputs:
        # merge forwarded data
        curr_forwarded_json = json.loads(curr_forwarded_data)
        forwarded_json['Types']['preciseI64MathUsed'] = forwarded_json[
            'Types']['preciseI64MathUsed'] or curr_forwarded_json['Types'][
                'preciseI64MathUsed']
        for key, value in curr_forwarded_json['Functions'][
                'blockAddresses'].iteritems():
            forwarded_json['Functions']['blockAddresses'][key] = value
        for key in curr_forwarded_json['Functions'][
                'indexedFunctions'].iterkeys():
            indexed_functions.add(key)
    outputs = None
    if DEBUG:
        print >> sys.stderr, '  emscript: phase 2b took %s seconds' % (
            time.time() - t)
    if DEBUG: t = time.time()

    # calculations on merged forwarded data
    forwarded_json['Functions']['indexedFunctions'] = {}
    i = 2
    for indexed in indexed_functions:
        forwarded_json['Functions']['indexedFunctions'][
            indexed] = i  # make sure not to modify this python object later - we use it in indexize
        i += 2
    forwarded_json['Functions']['nextIndex'] = i

    indexing = forwarded_json['Functions']['indexedFunctions']

    def indexize(js):
        return re.sub(r'{{{ FI_([\w\d_$]+) }}}',
                      lambda m: str(indexing[m.groups(0)[0]]), js)

    blockaddrs = forwarded_json['Functions']['blockAddresses']

    def blockaddrsize(js):
        return re.sub(
            r'{{{ BA_([\w\d_$]+)\|([\w\d_$]+) }}}',
            lambda m: str(blockaddrs[m.groups(0)[0]][m.groups(0)[1]]), js)

    #if DEBUG: outfile.write('// pre\n')
    outfile.write(blockaddrsize(indexize(pre)))
    pre = None

    #if DEBUG: outfile.write('// funcs\n')
    outfile.write(blockaddrsize(indexize(funcs_js)))
    funcs_js = None

    # forward
    forwarded_data = json.dumps(forwarded_json)
    forwarded_file = temp_files.get('.2.json').name
    open(forwarded_file, 'w').write(indexize(forwarded_data))
    if DEBUG:
        print >> sys.stderr, '  emscript: phase 2c took %s seconds' % (
            time.time() - t)

    # Phase 3 - post
    if DEBUG: t = time.time()
    post_file = temp_files.get('.post.ll').name
    open(post_file,
         'w').write('\n')  # no input, just processing of forwarded data
    out = shared.run_js(compiler,
                        shared.COMPILER_ENGINE,
                        [settings_file, post_file, 'post', forwarded_file] +
                        libraries,
                        stdout=subprocess.PIPE,
                        cwd=path_from_root('src'))
    #if DEBUG: outfile.write('// post\n')
    outfile.write(indexize(out))
    if DEBUG:
        print >> sys.stderr, '  emscript: phase 3 took %s seconds' % (
            time.time() - t)

    outfile.close()
Esempio n. 17
0
def inspect_code(headers, cpp_opts, structs, defines):
  show('Generating C code...')
  
  code = ['#include <stdio.h>', '#include <stddef.h>']
  # Include all the needed headers.
  for path in headers:
    code.append('#include "' + path + '"')
  
  code.append('int main() {')
  c_descent('structs', code)
  for name, struct in structs.items():
    gen_inspect_code([name], struct, code)
  
  c_ascent(code)
  c_descent('defines', code)
  for name, type_ in defines.items():
    # Add the necessary python type, if missing.
    if '%' not in type_:
      if type_[-1] in ('d', 'i', 'u'):
        # integer
        type_ = 'i%' + type_
      elif type_[-1] in ('f', 'F', 'e', 'E', 'g', 'G'):
        # float
        type_ = 'f%' + type_
      elif type_[-1] in ('x', 'X', 'a', 'A', 'c', 's'):
        # hexadecimal or string
        type_ = 's%' + type_
    
    c_set(name, type_, name, code)
  
  code.append('return 0;')
  code.append('}')
  
  # Write the source code to a temporary file.
  src_file = tempfile.mkstemp('.c')
  js_file = tempfile.mkstemp('.js')
  
  os.write(src_file[0], shared.asbytes('\n'.join(code)))
  
  # Close all unneeded FDs.
  os.close(src_file[0])
  os.close(js_file[0])

  # Remove dangerous env modifications
  safe_env = os.environ.copy()
  for opt in ['EMCC_FORCE_STDLIBS', 'EMCC_ONLY_FORCED_STDLIBS']:
    if opt in safe_env:
      del safe_env[opt]

  # Use binaryen, if necessary
  binaryen = os.environ.get('EMCC_WASM_BACKEND_BINARYEN')
  if binaryen:
    cpp_opts += ['-s', 'BINARYEN=1']

  info = []

  try:
    try:
      # Compile the program.
      show('Compiling generated code...')
      subprocess.check_call([shared.PYTHON, shared.EMCC] + cpp_opts + ['-o', js_file[1], src_file[1], '-s', 'BOOTSTRAPPING_STRUCT_INFO=1', '-s', 'WARN_ON_UNDEFINED_SYMBOLS=0', '-Oz', '--js-opts', '0', '--memory-init-file', '0'], env=safe_env) # -Oz optimizes enough to avoid warnings on code size/num locals
    except:
      sys.stderr.write('FAIL: Compilation failed!\n')
      sys.exit(1)

    # Run the compiled program.
    show('Calling generated program...')
    try:
      info = shared.run_js(js_file[1]).splitlines()
    except subprocess.CalledProcessError:
      sys.stderr.write('FAIL: Running the generated program failed!\n')
      sys.exit(1)

  finally:
    # Remove all temporary files.
    os.unlink(src_file[1])
    
    if os.path.exists(js_file[1]):
      os.unlink(js_file[1])
  
  # Parse the output of the program into a dict.
  return parse_c_output(info)
Esempio n. 18
0
  def test_binaryen(self):
    import tools.ports.binaryen as binaryen
    tag_file = Cache.get_path('binaryen_tag_' + binaryen.TAG + '.txt')

    assert not os.environ.get('BINARYEN') # must not have binaryen env var set

    # test in 2 modes - with BINARYEN_ROOT in the config file, set to '', and without it entirely
    for binaryen_root_in_config in [1, 0]:
      print('binaryen_root_in_config:', binaryen_root_in_config)

      def prep():
        restore_and_set_up()
        print('clearing ports...')
        print(self.do([PYTHON, EMCC, '--clear-ports']))
        wipe()
        self.do([PYTHON, EMCC]) # first run stage
        try_delete(tag_file)
        # if BINARYEN_ROOT is set, we don't build the port. Check we do build it if not
        if binaryen_root_in_config:
          config = open(CONFIG_FILE).read()
          assert '''BINARYEN_ROOT = os.path.expanduser(os.getenv('BINARYEN', ''))''' in config, config # setup created it to be ''
          print('created config:')
          print(config)
          restore_and_set_up()
          config = open(CONFIG_FILE).read()
          config = config.replace('BINARYEN_ROOT', '''BINARYEN_ROOT = os.path.expanduser(os.getenv('BINARYEN', '')) # ''')
        else:
          restore_and_set_up()
          config = open(CONFIG_FILE).read()
          config = config.replace('BINARYEN_ROOT', '#')
        print('modified config:')
        print(config)
        open(CONFIG_FILE, 'w').write(config)

      print('build using embuilder')
      prep()
      run_process([PYTHON, EMBUILDER, 'build', 'binaryen'])
      assert os.path.exists(tag_file)
      run_process([PYTHON, EMCC] + MINIMAL_HELLO_WORLD + ['-s', 'BINARYEN=1', '-s', 'BINARYEN_METHOD="interpret-binary"'])
      self.assertContained('hello, world!', run_js('a.out.js'))

      print('see we show an error for emmake (we cannot build natively under emmake)')
      prep()
      try_delete('a.out.js')
      out = self.do([PYTHON, path_from_root('emmake.py'), EMCC] + MINIMAL_HELLO_WORLD + ['-s', 'BINARYEN=1', '-s', 'BINARYEN_METHOD="interpret-binary"'])
      assert not os.path.exists(tag_file)
      assert not os.path.exists('a.out.js')
      self.assertContained('For example, for binaryen, do "python embuilder.py build binaryen"', out)

      if not binaryen_root_in_config:
        print('build on demand')
        for side_module in (False, True):
          print(side_module)
          prep()
          assert not os.path.exists(tag_file)
          try_delete('a.out.js')
          try_delete('a.out.wasm')
          cmd = [PYTHON, EMCC]
          if not side_module:
            cmd += MINIMAL_HELLO_WORLD
          else:
            # EM_ASM doesn't work in a wasm side module, build a normal program
            cmd += [path_from_root('tests', 'hello_world.c'), '-s', 'SIDE_MODULE=1']
          cmd += ['-s', 'BINARYEN=1', '-s', 'BINARYEN_METHOD="interpret-binary"']
          run_process(cmd)
          assert os.path.exists(tag_file)
          assert os.path.exists('a.out.wasm')
          if not side_module:
            assert os.path.exists('a.out.js')
            self.assertContained('hello, world!', run_js('a.out.js'))
Esempio n. 19
0
def inspect_code(headers, cpp_opts, structs, defines):
  code = ['#include <stdio.h>', '#include <stddef.h>']
  # Include all the needed headers.
  for path in headers:
    code.append('#include "' + path + '"')

  code.append('int main() {')
  c_descent('structs', code)
  for name, struct in structs.items():
    gen_inspect_code([name], struct, code)

  c_ascent(code)
  c_descent('defines', code)
  for name, type_ in defines.items():
    # Add the necessary python type, if missing.
    if '%' not in type_:
      if type_[-1] in ('d', 'i', 'u'):
        # integer
        type_ = 'i%' + type_
      elif type_[-1] in ('f', 'F', 'e', 'E', 'g', 'G'):
        # float
        type_ = 'f%' + type_
      elif type_[-1] in ('x', 'X', 'a', 'A', 'c', 's'):
        # hexadecimal or string
        type_ = 's%' + type_

    c_set(name, type_, name, code)

  code.append('return 0;')
  code.append('}')

  # Write the source code to a temporary file.
  src_file = tempfile.mkstemp('.c')
  show('Generating C code... ' + src_file[1])
  os.write(src_file[0], shared.asbytes('\n'.join(code)))

  js_file = tempfile.mkstemp('.js')

  # Close all unneeded FDs.
  os.close(src_file[0])
  os.close(js_file[0])

  # Remove dangerous env modifications
  env = os.environ.copy()
  env['EMCC_FORCE_STDLIBS'] = 'libcompiler_rt'
  env['EMCC_ONLY_FORCED_STDLIBS'] = '1'

  info = []
  # Compile the program.
  show('Compiling generated code...')
  # -Oz optimizes enough to avoid warnings on code size/num locals
  cmd = [shared.PYTHON, shared.EMCC] + cpp_opts + ['-o', js_file[1], src_file[1],
                                                   '-O0', '--js-opts', '0', '--memory-init-file', '0',
                                                   '-Werror', '-Wno-format',
                                                   '-s', 'BOOTSTRAPPING_STRUCT_INFO=1',
                                                   '-s', 'WARN_ON_UNDEFINED_SYMBOLS=0',
                                                   '-s', 'STRICT=1',
                                                   '-s', 'SINGLE_FILE=1']

  if not shared.Settings.WASM_BACKEND:
    # Avoid the binaryen dependency if we are only using fastcomp
    cmd += ['-s', 'WASM=0']
  if shared.Settings.LTO:
    cmd += ['-flto=' + shared.Settings.LTO]

  show(cmd)
  try:
    subprocess.check_call(cmd, env=env)
  except subprocess.CalledProcessError as e:
    sys.stderr.write('FAIL: Compilation failed!: %s\n' % e.cmd)
    sys.exit(1)

  # Run the compiled program.
  show('Calling generated program... ' + js_file[1])
  try:
    info = shared.run_js(js_file[1]).splitlines()
  except subprocess.CalledProcessError:
    sys.stderr.write('FAIL: Running the generated program failed!\n')
    sys.exit(1)

  # Remove all temporary files.
  os.unlink(src_file[1])

  if os.path.exists(js_file[1]):
    os.unlink(js_file[1])

  # Parse the output of the program into a dict.
  return parse_c_output(info)
Esempio n. 20
0
    def test_emcc(self):
        SANITY_FAIL_MESSAGE = 'sanity check failed to run'

        # emcc should check sanity if no ${EM_CONFIG}_sanity
        restore_and_set_up()
        time.sleep(1)
        assert not os.path.exists(
            SANITY_FILE)  # restore is just the settings, not the sanity
        output = self.check_working(EMCC)
        self.assertContained(SANITY_MESSAGE, output)
        # EMCC should have checked sanity successfully
        old_sanity = open(SANITY_FILE).read()
        self.assertNotContained(SANITY_FAIL_MESSAGE, output)

        # emcc run again should not sanity check, because the sanity file is newer
        output = self.check_working(EMCC)
        self.assertNotContained(SANITY_MESSAGE, output)
        self.assertNotContained(SANITY_FAIL_MESSAGE, output)

        # incorrect sanity contents mean we *must* check
        open(SANITY_FILE, 'w').write('wakawaka')
        output = self.check_working(EMCC)
        self.assertContained(SANITY_MESSAGE, output)

        # correct sanity contents mean we need not check
        open(SANITY_FILE, 'w').write(old_sanity)
        output = self.check_working(EMCC)
        self.assertNotContained(SANITY_MESSAGE, output)

        # but with EMCC_DEBUG=1 we should check
        with env_modify({'EMCC_DEBUG': '1'}):
            output = self.check_working(EMCC)
        try_delete(CANONICAL_TEMP_DIR)

        self.assertContained(SANITY_MESSAGE, output)
        output = self.check_working(EMCC)
        self.assertNotContained(SANITY_MESSAGE, output)

        # also with -v, with or without inputs
        output = self.check_working([EMCC, '-v'], SANITY_MESSAGE)
        output = self.check_working([EMCC, '-v'] + MINIMAL_HELLO_WORLD + [],
                                    SANITY_MESSAGE)

        # Make sure the test runner didn't do anything to the setup
        output = self.check_working(EMCC)
        self.assertNotContained(SANITY_MESSAGE, output)
        self.assertNotContained(SANITY_FAIL_MESSAGE, output)

        # emcc should also check sanity if the file is outdated
        open(CONFIG_FILE, 'a').write('# extra stuff\n')
        output = self.check_working(EMCC)
        self.assertContained(SANITY_MESSAGE, output)
        self.assertNotContained(SANITY_FAIL_MESSAGE, output)

        # emcc should be configurable directly from EM_CONFIG without any config file
        restore_and_set_up()
        config = open(CONFIG_FILE, 'r').read()
        open('main.cpp', 'w').write('''
      #include <stdio.h>
      int main() {
        printf("hello from emcc with no config file\\n");
        return 0;
      }
    ''')

        wipe()
        with env_modify({'EM_CONFIG': config}):
            run_process([EMCC, 'main.cpp', '-o', 'a.out.js'])

        self.assertContained('hello from emcc with no config file',
                             run_js('a.out.js'))
Esempio n. 21
0
def emscript(infile, settings, outfile, libraries=[]):
  """Runs the emscripten LLVM-to-JS compiler. We parallelize as much as possible

  Args:
    infile: The path to the input LLVM assembly file.
    settings: JSON-formatted settings that override the values
      defined in src/settings.js.
    outfile: The file where the output is written.
  """

  compiler = path_from_root('src', 'compiler.js')

  # Parallelization: We run 3 phases:
  #   1 aka 'pre'  : Process types and metadata and so forth, and generate the preamble.
  #   2 aka 'funcs': Process functions. We can parallelize this, working on each function independently.
  #   3 aka 'post' : Process globals, generate postamble and finishing touches.

  if DEBUG: print >> sys.stderr, 'emscript: ll=>js'

  # Pre-scan ll and alter settings as necessary
  if DEBUG: t = time.time()
  ll = open(infile).read()
  scan(ll, settings)
  total_ll_size = len(ll)
  ll = None # allow collection
  if DEBUG: print >> sys.stderr, '  emscript: scan took %s seconds' % (time.time() - t)

  # Split input into the relevant parts for each phase
  pre = []
  funcs = [] # split up functions here, for parallelism later
  meta = [] # needed by each function XXX

  if DEBUG: t = time.time()
  in_func = False
  ll_lines = open(infile).readlines()
  for line in ll_lines:
    if in_func:
      funcs[-1].append(line)
      if line.startswith('}'):
        in_func = False
        funcs[-1] = ''.join(funcs[-1])
        pre.append(line) # pre needs it to, so we know about all implemented functions
    else:
      if line.startswith('define '):
        in_func = True
        funcs.append([line])
        pre.append(line) # pre needs it to, so we know about all implemented functions
      elif line.find(' = type { ') > 0:
        pre.append(line) # type
      elif line.startswith('!'):
        meta.append(line) # metadata
      else:
        pre.append(line) # pre needs it so we know about globals in pre and funcs. So emit globals there
  ll_lines = None
  meta = ''.join(meta)
  if DEBUG and len(meta) > 1024*1024: print >> sys.stderr, 'emscript warning: large amounts of metadata, will slow things down'
  if DEBUG: print >> sys.stderr, '  emscript: split took %s seconds' % (time.time() - t)

  #if DEBUG:
  #  print >> sys.stderr, '========= pre ================\n'
  #  print >> sys.stderr, ''.join(pre)
  #  print >> sys.stderr, '========== funcs ===============\n'
  #  for func in funcs:
  #    print >> sys.stderr, '\n// ===\n\n', ''.join(func)
  #  print >> sys.stderr, '=========================\n'

  # Save settings to a file to work around v8 issue 1579
  settings_file = temp_files.get('.txt').name
  s = open(settings_file, 'w')
  s.write(json.dumps(settings))
  s.close()

  # Phase 1 - pre
  if DEBUG: t = time.time()
  pre_file = temp_files.get('.pre.ll').name
  open(pre_file, 'w').write(''.join(pre) + '\n' + meta)
  out = shared.run_js(compiler, shared.COMPILER_ENGINE, [settings_file, pre_file, 'pre'] + libraries, stdout=subprocess.PIPE, cwd=path_from_root('src'))
  pre, forwarded_data = out.split('//FORWARDED_DATA:')
  forwarded_file = temp_files.get('.json').name
  open(forwarded_file, 'w').write(forwarded_data)
  if DEBUG: print >> sys.stderr, '  emscript: phase 1 took %s seconds' % (time.time() - t)

  # Phase 2 - func

  cores = multiprocessing.cpu_count()
  assert cores >= 1
  if cores > 1:
    intended_num_chunks = cores * NUM_CHUNKS_PER_CORE
    chunk_size = max(MIN_CHUNK_SIZE, total_ll_size / intended_num_chunks)
    chunk_size += 3*len(meta) # keep ratio of lots of function code to meta (expensive to process, and done in each parallel task)
    chunk_size = min(MAX_CHUNK_SIZE, chunk_size)
  else:
    chunk_size = MAX_CHUNK_SIZE # if 1 core, just use the max chunk size

  if DEBUG: t = time.time()
  forwarded_json = json.loads(forwarded_data)
  indexed_functions = set()
  chunks = [] # bundles of functions
  curr = ''
  for i in range(len(funcs)):
    func = funcs[i]
    if len(curr) + len(func) < chunk_size:
      curr += func
    else:
      chunks.append(curr)
      curr = func
  if curr:
    chunks.append(curr)
    curr = ''
  if cores == 1 and total_ll_size < MAX_CHUNK_SIZE: assert len(chunks) == 1, 'no point in splitting up without multiple cores'
  if DEBUG: print >> sys.stderr, '  emscript: phase 2 working on %d chunks %s (intended chunk size: %.2f MB, meta: %.2f MB, forwarded: %.2f MB, total: %.2f MB)' % (len(chunks), ('using %d cores' % cores) if len(chunks) > 1 else '', chunk_size/(1024*1024.), len(meta)/(1024*1024.), len(forwarded_data)/(1024*1024.), total_ll_size/(1024*1024.))

  commands = [(i, chunks[i] + '\n' + meta, settings_file, compiler, forwarded_file, libraries) for i in range(len(chunks))]

  if len(chunks) > 1:
    pool = multiprocessing.Pool(processes=cores)
    outputs = pool.map(process_funcs, commands, chunksize=1)
  else:
    outputs = [process_funcs(commands[0])]

  funcs_js = ''.join([output[0] for output in outputs])

  for func_js, curr_forwarded_data in outputs:
    # merge forwarded data
    curr_forwarded_json = json.loads(curr_forwarded_data)
    forwarded_json['Types']['preciseI64MathUsed'] = forwarded_json['Types']['preciseI64MathUsed'] or curr_forwarded_json['Types']['preciseI64MathUsed']
    for key, value in curr_forwarded_json['Functions']['blockAddresses'].iteritems():
      forwarded_json['Functions']['blockAddresses'][key] = value
    for key in curr_forwarded_json['Functions']['indexedFunctions'].iterkeys():
      indexed_functions.add(key)
  outputs = None
  if DEBUG: print >> sys.stderr, '  emscript: phase 2 took %s seconds' % (time.time() - t)
  if DEBUG: t = time.time()

  # calculations on merged forwarded data
  forwarded_json['Functions']['indexedFunctions'] = {}
  i = 2
  for indexed in indexed_functions:
    forwarded_json['Functions']['indexedFunctions'][indexed] = i # make sure not to modify this python object later - we use it in indexize
    i += 2
  forwarded_json['Functions']['nextIndex'] = i

  indexing = forwarded_json['Functions']['indexedFunctions']
  def indexize(js):
    return re.sub(r'{{{ FI_([\w\d_$]+) }}}', lambda m: str(indexing[m.groups(0)[0]]), js)

  blockaddrs = forwarded_json['Functions']['blockAddresses']
  def blockaddrsize(js):
    return re.sub(r'{{{ BA_([\w\d_$]+)\|([\w\d_$]+) }}}', lambda m: str(blockaddrs[m.groups(0)[0]][m.groups(0)[1]]), js)

  if DEBUG: outfile.write('// pre\n')
  outfile.write(blockaddrsize(indexize(pre)))
  pre = None

  if DEBUG: outfile.write('// funcs\n')
  outfile.write(blockaddrsize(indexize(funcs_js)))
  funcs_js = None

  # forward
  forwarded_data = json.dumps(forwarded_json)
  forwarded_file = temp_files.get('.2.json').name
  open(forwarded_file, 'w').write(indexize(forwarded_data))
  if DEBUG: print >> sys.stderr, '  emscript: phase 2b took %s seconds' % (time.time() - t)

  # Phase 3 - post
  if DEBUG: t = time.time()
  post_file = temp_files.get('.post.ll').name
  open(post_file, 'w').write('\n') # no input, just processing of forwarded data
  out = shared.run_js(compiler, shared.COMPILER_ENGINE, [settings_file, post_file, 'post', forwarded_file] + libraries, stdout=subprocess.PIPE, cwd=path_from_root('src'))
  if DEBUG: outfile.write('// post\n')
  outfile.write(indexize(out))
  if DEBUG: print >> sys.stderr, '  emscript: phase 3 took %s seconds' % (time.time() - t)

  outfile.close()
Esempio n. 22
0
import os, sys, re, json, shutil
from subprocess import Popen, PIPE, STDOUT

exec(open(os.path.expanduser('~/.emscripten'), 'r').read())

sys.path.append(EMSCRIPTEN_ROOT)
import tools.shared as emscripten

for name, suffix, x, y, ref in [['syntensity_lobby_s', 'j2k', 40, 30, 'reference.raw'],
                                ['relax', 'jp2', 400, 300, 'relax.raw']]:
  print 'testing: ' + name + '.' + suffix
  data = str(map(ord, open(name + '.' + suffix, 'r').read()))
  raw = emscripten.run_js('test.js', SPIDERMONKEY_ENGINE, [sys.argv[1], data, suffix])
  sets = raw.split('*')
  output = eval('[' + sets[1] + ']')
  width = output[0]
  height = output[1]
  data = ''.join([chr(item) for item in output[2:]])

  out = open('generated.raw', 'wb')
  out.write(data)
  out.close()

  # check

  assert width == x, 'Failed to generate proper width: %d' % width
  assert height == y, 'Failed to generate proper height: %d' % height

  reference = open(ref, 'rb').read()
  assert reference == data, 'Failed to generate proper output :('
Esempio n. 23
0
import os, sys, re, json, shutil
from subprocess import Popen, PIPE, STDOUT, call

exec(open(os.path.expanduser('~/.emscripten'), 'r').read())

sys.path.append(EMSCRIPTEN_ROOT)
import tools.shared as emscripten

output = emscripten.run_js('test.js', emscripten.NODE_JS)
Esempio n. 24
0
  def test_nodejs_sockets_echo(self):
    # This test checks that sockets work when the client code is run in Node.js
    # Run with ./runner.py sockets.test_nodejs_sockets_echo
    if NODE_JS not in JS_ENGINES:
      self.skipTest('node is not present')

    sockets_include = '-I' + path_from_root('tests', 'sockets')

    harnesses = [
      (CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0'], 59162), 0),
      (CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1'], 59164), 1)
    ]

    if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
      harnesses += [(WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 59160), 0)]

    # Basic test of node client against both a Websockified and compiled echo server.
    for harness, datagram in harnesses:
      with harness:
        run_process([PYTHON, EMCC, path_from_root('tests', 'sockets', 'test_sockets_echo_client.c'), '-o', 'client.js', '-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram], stdout=PIPE, stderr=PIPE)

        out = run_js('client.js', engine=NODE_JS, full_output=True)
        self.assertContained('do_msg_read: read 14 bytes', out)

    if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
      # Test against a Websockified server with compile time configured WebSocket subprotocol. We use a Websockified
      # server because as long as the subprotocol list contains binary it will configure itself to accept binary
      # This test also checks that the connect url contains the correct subprotocols.
      print("\nTesting compile time WebSocket configuration.\n")
      for harness in [
        WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 59166)
      ]:
        with harness:
          run_process([PYTHON, EMCC, path_from_root('tests', 'sockets', 'test_sockets_echo_client.c'), '-o', 'client.js', '-s', 'SOCKET_DEBUG=1', '-s', 'WEBSOCKET_SUBPROTOCOL="base64, binary"', '-DSOCKK=59166'], stdout=PIPE, stderr=PIPE)

          out = run_js('client.js', engine=NODE_JS, full_output=True)
          self.assertContained('do_msg_read: read 14 bytes', out)
          self.assertContained(['connect: ws://127.0.0.1:59166, base64,binary', 'connect: ws://127.0.0.1:59166/, base64,binary'], out)

      # Test against a Websockified server with runtime WebSocket configuration. We specify both url and subprotocol.
      # In this test we have *deliberately* used the wrong port '-DSOCKK=12345' to configure the echo_client.c, so
      # the connection would fail without us specifying a valid WebSocket URL in the configuration.
      print("\nTesting runtime WebSocket configuration.\n")
      for harness in [
        WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 59168)
      ]:
        with harness:
          open(os.path.join(self.get_dir(), 'websocket_pre.js'), 'w').write('''
          var Module = {
            websocket: {
              url: 'ws://localhost:59168/testA/testB',
              subprotocol: 'text, base64, binary',
            }
          };
          ''')

          run_process([PYTHON, EMCC, path_from_root('tests', 'sockets', 'test_sockets_echo_client.c'), '-o', 'client.js', '--pre-js', 'websocket_pre.js', '-s', 'SOCKET_DEBUG=1', '-DSOCKK=12345'], stdout=PIPE, stderr=PIPE)

          out = run_js('client.js', engine=NODE_JS, full_output=True)
          self.assertContained('do_msg_read: read 14 bytes', out)
          self.assertContained('connect: ws://localhost:59168/testA/testB, text,base64,binary', out)
Esempio n. 25
0
    def test_nodejs_sockets_echo(self):
        # This test checks that sockets work when the client code is run in Node.js
        # Run with ./runner.py sockets.test_nodejs_sockets_echo
        if NODE_JS not in JS_ENGINES:
            self.skipTest('node is not present')

        sockets_include = '-I' + path_from_root('tests', 'sockets')

        harnesses = [(CompiledServerHarness(
            os.path.join('sockets', 'test_sockets_echo_server.c'),
            [sockets_include, '-DTEST_DGRAM=0'], 59162), 0),
                     (CompiledServerHarness(
                         os.path.join('sockets', 'test_sockets_echo_server.c'),
                         [sockets_include, '-DTEST_DGRAM=1'], 59164), 1)]

        if not WINDOWS:  # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
            harnesses += [(WebsockifyServerHarness(
                os.path.join('sockets', 'test_sockets_echo_server.c'),
                [sockets_include], 59160), 0)]

        # Basic test of node client against both a Websockified and compiled echo server.
        for harness, datagram in harnesses:
            with harness:
                run_process([
                    PYTHON, EMCC,
                    path_from_root('tests', 'sockets',
                                   'test_sockets_echo_client.c'), '-o',
                    'client.js',
                    '-DSOCKK=%d' % harness.listen_port,
                    '-DTEST_DGRAM=%d' % datagram
                ],
                            stdout=PIPE,
                            stderr=PIPE)

                out = run_js('client.js', engine=NODE_JS, full_output=True)
                self.assertContained('do_msg_read: read 14 bytes', out)

        if not WINDOWS:  # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
            # Test against a Websockified server with compile time configured WebSocket subprotocol. We use a Websockified
            # server because as long as the subprotocol list contains binary it will configure itself to accept binary
            # This test also checks that the connect url contains the correct subprotocols.
            print("\nTesting compile time WebSocket configuration.\n")
            for harness in [
                    WebsockifyServerHarness(
                        os.path.join('sockets', 'test_sockets_echo_server.c'),
                        [sockets_include], 59166)
            ]:
                with harness:
                    run_process([
                        PYTHON, EMCC,
                        path_from_root('tests', 'sockets',
                                       'test_sockets_echo_client.c'), '-o',
                        'client.js', '-s', 'SOCKET_DEBUG=1', '-s',
                        'WEBSOCKET_SUBPROTOCOL="base64, binary"',
                        '-DSOCKK=59166'
                    ],
                                stdout=PIPE,
                                stderr=PIPE)

                    out = run_js('client.js', engine=NODE_JS, full_output=True)
                    self.assertContained('do_msg_read: read 14 bytes', out)
                    self.assertContained([
                        'connect: ws://127.0.0.1:59166, base64,binary',
                        'connect: ws://127.0.0.1:59166/, base64,binary'
                    ], out)

            # Test against a Websockified server with runtime WebSocket configuration. We specify both url and subprotocol.
            # In this test we have *deliberately* used the wrong port '-DSOCKK=12345' to configure the echo_client.c, so
            # the connection would fail without us specifying a valid WebSocket URL in the configuration.
            print("\nTesting runtime WebSocket configuration.\n")
            for harness in [
                    WebsockifyServerHarness(
                        os.path.join('sockets', 'test_sockets_echo_server.c'),
                        [sockets_include], 59168)
            ]:
                with harness:
                    open(os.path.join(self.get_dir(), 'websocket_pre.js'),
                         'w').write('''
          var Module = {
            websocket: {
              url: 'ws://localhost:59168/testA/testB',
              subprotocol: 'text, base64, binary',
            }
          };
          ''')

                    run_process([
                        PYTHON, EMCC,
                        path_from_root('tests', 'sockets',
                                       'test_sockets_echo_client.c'), '-o',
                        'client.js', '--pre-js', 'websocket_pre.js', '-s',
                        'SOCKET_DEBUG=1', '-DSOCKK=12345'
                    ],
                                stdout=PIPE,
                                stderr=PIPE)

                    out = run_js('client.js', engine=NODE_JS, full_output=True)
                    self.assertContained('do_msg_read: read 14 bytes', out)
                    self.assertContained(
                        'connect: ws://localhost:59168/testA/testB, text,base64,binary',
                        out)
Esempio n. 26
0
  def test_emcc(self):
    SANITY_FAIL_MESSAGE = 'sanity check failed to run'

    # emcc should check sanity if no ${EM_CONFIG}_sanity
    restore_and_set_up()
    time.sleep(1)
    assert not os.path.exists(SANITY_FILE) # restore is just the settings, not the sanity
    output = self.check_working(EMCC)
    self.assertContained(SANITY_MESSAGE, output)
    assert os.path.exists(SANITY_FILE) # EMCC should have checked sanity successfully
    assert mtime(SANITY_FILE) > mtime(CONFIG_FILE)
    assert generate_sanity() == open(SANITY_FILE).read()
    self.assertNotContained(SANITY_FAIL_MESSAGE, output)

    # emcc run again should not sanity check, because the sanity file is newer
    output = self.check_working(EMCC)
    self.assertNotContained(SANITY_MESSAGE, output)
    self.assertNotContained(SANITY_FAIL_MESSAGE, output)

    # correct sanity contents mean we need not check
    open(SANITY_FILE, 'w').write(generate_sanity())
    output = self.check_working(EMCC)
    self.assertNotContained(SANITY_MESSAGE, output)

    # incorrect sanity contents mean we *must* check
    open(SANITY_FILE, 'w').write('wakawaka')
    output = self.check_working(EMCC)
    self.assertContained(SANITY_MESSAGE, output)

    # but with EMCC_DEBUG=1 we should check
    with env_modify({'EMCC_DEBUG': '1'}):
      output = self.check_working(EMCC)
    try_delete(CANONICAL_TEMP_DIR)

    self.assertContained(SANITY_MESSAGE, output)
    output = self.check_working(EMCC)
    self.assertNotContained(SANITY_MESSAGE, output)

    # also with -v, with or without inputs
    output = self.check_working([EMCC, '-v'], SANITY_MESSAGE)
    output = self.check_working([EMCC, '-v'] + MINIMAL_HELLO_WORLD + [], SANITY_MESSAGE)

    # Make sure the test runner didn't do anything to the setup
    output = self.check_working(EMCC)
    self.assertNotContained(SANITY_MESSAGE, output)
    self.assertNotContained(SANITY_FAIL_MESSAGE, output)

    # emcc should also check sanity if the file is outdated
    time.sleep(0.1)
    restore_and_set_up()
    assert mtime(SANITY_FILE) < mtime(CONFIG_FILE)
    output = self.check_working(EMCC)
    self.assertContained(SANITY_MESSAGE, output)
    assert mtime(SANITY_FILE) >= mtime(CONFIG_FILE)
    self.assertNotContained(SANITY_FAIL_MESSAGE, output)

    # emcc should be configurable directly from EM_CONFIG without any config file
    restore_and_set_up()
    config = open(CONFIG_FILE, 'r').read()
    open('main.cpp', 'w').write('''
      #include <stdio.h>
      int main() {
        printf("hello from emcc with no config file\\n");
        return 0;
      }
    ''')

    wipe()
    with env_modify({'EM_CONFIG': config}):
      run_process([PYTHON, EMCC, 'main.cpp', '-o', 'a.out.js'])

    self.assertContained('hello from emcc with no config file', run_js('a.out.js'))
Esempio n. 27
0
import os, sys, re, json, shutil
from subprocess import Popen, PIPE, STDOUT, call

exec(open(os.path.expanduser('~/.emscripten'), 'r').read())


sys.path.append(EMSCRIPTEN_ROOT)
import tools.shared as emscripten

output = emscripten.run_js('test.js', emscripten.NODE_JS)


Esempio n. 28
0
def emscript(infile, settings, outfile, libraries=[]):
    """Runs the emscripten LLVM-to-JS compiler. We parallelize as much as possible

  Args:
    infile: The path to the input LLVM assembly file.
    settings: JSON-formatted settings that override the values
      defined in src/settings.js.
    outfile: The file where the output is written.
  """

    compiler = path_from_root('src', 'compiler.js')

    # Parallelization: We run 3 phases:
    #   1 aka 'pre'  : Process types and metadata and so forth, and generate the preamble.
    #   2 aka 'funcs': Process functions. We can parallelize this, working on each function independently.
    #   3 aka 'post' : Process globals, generate postamble and finishing touches.

    if DEBUG: print >> sys.stderr, 'emscript: ll=>js'

    if jcache: shared.JCache.ensure()

    # Pre-scan ll and alter settings as necessary
    if DEBUG: t = time.time()
    ll = open(infile).read()
    scan(ll, settings)
    total_ll_size = len(ll)
    ll = None  # allow collection
    if DEBUG:
        print >> sys.stderr, '  emscript: scan took %s seconds' % (
            time.time() - t)

    # Split input into the relevant parts for each phase
    pre = []
    funcs = []  # split up functions here, for parallelism later
    func_idents = []
    meta = []  # needed by each function XXX

    if DEBUG: t = time.time()
    in_func = False
    ll_lines = open(infile).readlines()
    for line in ll_lines:
        if in_func:
            funcs[-1][1].append(line)
            if line.startswith('}'):
                in_func = False
                funcs[-1] = (funcs[-1][0], ''.join(funcs[-1][1]))
                pre.append(
                    line
                )  # pre needs it to, so we know about all implemented functions
        else:
            if line.startswith(';'): continue
            if line.startswith('define '):
                in_func = True
                funcs.append(
                    (line, [line]))  # use the entire line as the identifier
                pre.append(
                    line
                )  # pre needs it to, so we know about all implemented functions
            elif line.find(' = type { ') > 0:
                pre.append(line)  # type
            elif line.startswith('!'):
                if line.startswith('!llvm.module'):
                    continue  # we can ignore that
                meta.append(line)  # metadata
            else:
                pre.append(
                    line
                )  # pre needs it so we know about globals in pre and funcs. So emit globals there
    ll_lines = None
    meta = ''.join(meta)
    if DEBUG and len(meta) > 1024 * 1024:
        print >> sys.stderr, 'emscript warning: large amounts of metadata, will slow things down'
    if DEBUG:
        print >> sys.stderr, '  emscript: split took %s seconds' % (
            time.time() - t)

    #if DEBUG:
    #  print >> sys.stderr, '========= pre ================\n'
    #  print >> sys.stderr, ''.join(pre)
    #  print >> sys.stderr, '========== funcs ===============\n'
    #  for func in funcs:
    #    print >> sys.stderr, '\n// ===\n\n', ''.join(func)
    #  print >> sys.stderr, '=========================\n'

    # Save settings to a file to work around v8 issue 1579
    settings_file = temp_files.get('.txt').name

    def save_settings():
        global settings_text
        settings_text = json.dumps(settings)
        s = open(settings_file, 'w')
        s.write(settings_text)
        s.close()

    save_settings()

    # Phase 1 - pre
    if DEBUG: t = time.time()
    pre_file = temp_files.get('.pre.ll').name
    pre_input = ''.join(pre) + '\n' + meta
    out = None
    if jcache:
        keys = [pre_input, settings_text, ','.join(libraries)]
        shortkey = shared.JCache.get_shortkey(keys)
        out = shared.JCache.get(shortkey, keys)
        if out and DEBUG: print >> sys.stderr, '  loading pre from jcache'
    if not out:
        open(pre_file, 'w').write(pre_input)
        out = shared.run_js(compiler,
                            shared.COMPILER_ENGINE,
                            [settings_file, pre_file, 'pre'] + libraries,
                            stdout=subprocess.PIPE,
                            cwd=path_from_root('src'))
        if jcache:
            if DEBUG: print >> sys.stderr, '  saving pre to jcache'
            shared.JCache.set(shortkey, keys, out)
    pre, forwarded_data = out.split('//FORWARDED_DATA:')
    forwarded_file = temp_files.get('.json').name
    open(forwarded_file, 'w').write(forwarded_data)
    if DEBUG:
        print >> sys.stderr, '  emscript: phase 1 took %s seconds' % (
            time.time() - t)

    # Phase 2 - func

    cores = multiprocessing.cpu_count()
    assert cores >= 1
    if cores > 1:
        intended_num_chunks = cores * NUM_CHUNKS_PER_CORE
        chunk_size = max(MIN_CHUNK_SIZE, total_ll_size / intended_num_chunks)
        chunk_size += 3 * len(
            meta
        )  # keep ratio of lots of function code to meta (expensive to process, and done in each parallel task)
        chunk_size = min(MAX_CHUNK_SIZE, chunk_size)
    else:
        chunk_size = MAX_CHUNK_SIZE  # if 1 core, just use the max chunk size

    if DEBUG: t = time.time()
    forwarded_json = json.loads(forwarded_data)
    indexed_functions = set()
    if settings.get('ASM_JS'):
        settings['EXPORTED_FUNCTIONS'] = forwarded_json['EXPORTED_FUNCTIONS']
        save_settings()

    chunks = shared.JCache.chunkify(funcs, chunk_size,
                                    'emscript_files' if jcache else None)

    if jcache:
        # load chunks from cache where we can # TODO: ignore small chunks
        cached_outputs = []

        def load_from_cache(chunk):
            keys = [settings_text, forwarded_data, chunk]
            shortkey = shared.JCache.get_shortkey(
                keys)  # TODO: share shortkeys with later code
            out = shared.JCache.get(
                shortkey, keys)  # this is relatively expensive (pickling?)
            if out:
                cached_outputs.append(out)
                return False
            return True

        chunks = filter(load_from_cache, chunks)
        if len(cached_outputs) > 0:
            if out and DEBUG:
                print >> sys.stderr, '  loading %d funcchunks from jcache' % len(
                    cached_outputs)
        else:
            cached_outputs = []

    # TODO: minimize size of forwarded data from funcs to what we actually need

    if cores == 1 and total_ll_size < MAX_CHUNK_SIZE:
        assert len(
            chunks) == 1, 'no point in splitting up without multiple cores'

    if len(chunks) > 0:
        if DEBUG:
            print >> sys.stderr, '  emscript: phase 2 working on %d chunks %s (intended chunk size: %.2f MB, meta: %.2f MB, forwarded: %.2f MB, total: %.2f MB)' % (
                len(chunks),
                ('using %d cores' % cores) if len(chunks) > 1 else '',
                chunk_size / (1024 * 1024.), len(meta) /
                (1024 * 1024.), len(forwarded_data) /
                (1024 * 1024.), total_ll_size / (1024 * 1024.))

        commands = [(i, chunks[i], meta, settings_file, compiler,
                     forwarded_file, libraries) for i in range(len(chunks))]

        if len(chunks) > 1:
            pool = multiprocessing.Pool(processes=cores)
            outputs = pool.map(process_funcs, commands, chunksize=1)
        elif len(chunks) == 1:
            outputs = [process_funcs(commands[0])]
    else:
        outputs = []

    if jcache:
        # save chunks to cache
        for i in range(len(chunks)):
            chunk = chunks[i]
            keys = [settings_text, forwarded_data, chunk]
            shortkey = shared.JCache.get_shortkey(keys)
            shared.JCache.set(shortkey, keys, outputs[i])
        if out and DEBUG and len(chunks) > 0:
            print >> sys.stderr, '  saving %d funcchunks to jcache' % len(
                chunks)

    if jcache: outputs += cached_outputs  # TODO: preserve order

    outputs = [output.split('//FORWARDED_DATA:') for output in outputs]

    if DEBUG:
        print >> sys.stderr, '  emscript: phase 2 took %s seconds' % (
            time.time() - t)
    if DEBUG: t = time.time()

    # merge forwarded data
    if settings.get('ASM_JS'):
        all_exported_functions = set(
            settings['EXPORTED_FUNCTIONS'])  # both asm.js and otherwise
        for additional_export in settings[
                'DEFAULT_LIBRARY_FUNCS_TO_INCLUDE']:  # additional functions to export from asm, if they are implemented
            all_exported_functions.add('_' + additional_export)
        exported_implemented_functions = set()
    for func_js, curr_forwarded_data in outputs:
        curr_forwarded_json = json.loads(curr_forwarded_data)
        forwarded_json['Types']['preciseI64MathUsed'] = forwarded_json[
            'Types']['preciseI64MathUsed'] or curr_forwarded_json['Types'][
                'preciseI64MathUsed']
        for key, value in curr_forwarded_json['Functions'][
                'blockAddresses'].iteritems():
            forwarded_json['Functions']['blockAddresses'][key] = value
        for key in curr_forwarded_json['Functions'][
                'indexedFunctions'].iterkeys():
            indexed_functions.add(key)
        if settings.get('ASM_JS'):
            for key in curr_forwarded_json['Functions'][
                    'implementedFunctions'].iterkeys():
                if key in all_exported_functions:
                    exported_implemented_functions.add(key)
        for key, value in curr_forwarded_json['Functions'][
                'unimplementedFunctions'].iteritems():
            forwarded_json['Functions']['unimplementedFunctions'][key] = value

    if settings.get('ASM_JS'):
        parts = pre.split('// ASM_LIBRARY FUNCTIONS\n')
        if len(parts) > 1:
            pre = parts[0]
            outputs.append([parts[1]])
    funcs_js = ''.join([output[0] for output in outputs])

    outputs = None
    if DEBUG:
        print >> sys.stderr, '  emscript: phase 2b took %s seconds' % (
            time.time() - t)
    if DEBUG: t = time.time()

    # calculations on merged forwarded data
    forwarded_json['Functions']['indexedFunctions'] = {}
    i = 2
    for indexed in indexed_functions:
        #print >> sys.stderr, 'indaxx', indexed, i
        forwarded_json['Functions']['indexedFunctions'][
            indexed] = i  # make sure not to modify this python object later - we use it in indexize
        i += 2
    forwarded_json['Functions']['nextIndex'] = i

    indexing = forwarded_json['Functions']['indexedFunctions']

    def indexize(js):
        return re.sub(r"'{{ FI_([\w\d_$]+) }}'",
                      lambda m: str(indexing.get(m.groups(0)[0]) or 0), js)

    blockaddrs = forwarded_json['Functions']['blockAddresses']

    def blockaddrsize(js):
        return re.sub(
            r'{{{ BA_([\w\d_$]+)\|([\w\d_$]+) }}}',
            lambda m: str(blockaddrs[m.groups(0)[0]][m.groups(0)[1]]), js)

    #if DEBUG: outfile.write('// pre\n')
    outfile.write(blockaddrsize(indexize(pre)))
    pre = None

    #if DEBUG: outfile.write('// funcs\n')

    # forward
    forwarded_data = json.dumps(forwarded_json)
    forwarded_file = temp_files.get('.2.json').name
    open(forwarded_file, 'w').write(indexize(forwarded_data))
    if DEBUG:
        print >> sys.stderr, '  emscript: phase 2c took %s seconds' % (
            time.time() - t)

    # Phase 3 - post
    if DEBUG: t = time.time()
    post_file = temp_files.get('.post.ll').name
    open(post_file,
         'w').write('\n')  # no input, just processing of forwarded data
    out = shared.run_js(compiler,
                        shared.COMPILER_ENGINE,
                        [settings_file, post_file, 'post', forwarded_file] +
                        libraries,
                        stdout=subprocess.PIPE,
                        cwd=path_from_root('src'))
    post, last_forwarded_data = out.split('//FORWARDED_DATA:')
    last_forwarded_json = json.loads(last_forwarded_data)

    if settings.get('ASM_JS'):
        simple = os.environ.get('EMCC_SIMPLE_ASM')

        class Counter:
            i = 0

        pre_tables = last_forwarded_json['Functions']['tables']['pre']
        del last_forwarded_json['Functions']['tables']['pre']

        # Find function table calls without function tables generated for them
        for use in set(re.findall(r'{{{ FTM_[\w\d_$]+ }}}', funcs_js)):
            sig = use[8:len(use) - 4]
            if sig not in last_forwarded_json['Functions']['tables']:
                if DEBUG: print >> sys.stderr, 'add empty function table', sig
                last_forwarded_json['Functions']['tables'][
                    sig] = 'var FUNCTION_TABLE_' + sig + ' = [0,0];\n'

        def make_table(sig, raw):
            i = Counter.i
            Counter.i += 1
            bad = 'b' + str(i)
            params = ','.join(['p%d' % p for p in range(len(sig) - 1)])
            coercions = ';'.join([
                'p%d = %sp%d%s' % (p, '+' if sig[p + 1] != 'i' else '', p,
                                   '' if sig[p + 1] != 'i' else '|0')
                for p in range(len(sig) - 1)
            ]) + ';'
            ret = '' if sig[0] == 'v' else ('return %s0' %
                                            ('+' if sig[0] != 'i' else ''))
            return ('function %s(%s) { %s abort(%d); %s };' %
                    (bad, params, coercions, i, ret),
                    raw.replace('[0,', '[' + bad + ',').replace(
                        ',0,', ',' + bad +
                        ',').replace(',0,', ',' + bad + ',').replace(
                            ',0]',
                            ',' + bad + ']').replace(',0]', ',' + bad + ']'))

        infos = [
            make_table(sig, raw) for sig, raw in
            last_forwarded_json['Functions']['tables'].iteritems()
        ]
        function_tables_defs = '\n'.join([info[0] for info in infos] +
                                         [info[1] for info in infos])

        asm_setup = ''
        maths = [
            'Math.' + func for func in [
                'floor', 'abs', 'sqrt', 'pow', 'cos', 'sin', 'tan', 'acos',
                'asin', 'atan', 'atan2', 'exp', 'log', 'ceil'
            ]
        ]
        if settings['USE_MATH_IMUL']:
            maths += ['Math.imul']
            asm_setup += 'if (!Math.imul) Math.imul = function(x, y) { return (x*y)|0 }; // # not a real polyfill since semantics not identical, but close and fairly fast\n'
        fundamentals = [
            'Math', 'Int8Array', 'Int16Array', 'Int32Array', 'Uint8Array',
            'Uint16Array', 'Uint32Array', 'Float32Array', 'Float64Array'
        ]
        math_envs = ['Runtime.bitshift64',
                     'Math.min']  # TODO: move min to maths
        asm_setup += '\n'.join(
            ['var %s = %s;' % (f.replace('.', '_'), f) for f in math_envs])
        basic_funcs = [
            'abort', 'assert', 'asmPrintInt', 'asmPrintFloat',
            'copyTempDouble', 'copyTempFloat'
        ] + [m.replace('.', '_') for m in math_envs]
        if settings['SAFE_HEAP']:
            basic_funcs += [
                'SAFE_HEAP_LOAD', 'SAFE_HEAP_STORE', 'SAFE_HEAP_CLEAR'
            ]
        basic_vars = ['STACKTOP', 'STACK_MAX', 'tempDoublePtr', 'ABORT']
        basic_float_vars = ['NaN', 'Infinity']
        if forwarded_json['Types']['preciseI64MathUsed']:
            basic_funcs += [
                'i64Math_' + op
                for op in ['add', 'subtract', 'multiply', 'divide', 'modulo']
            ]
            asm_setup += '''
var i64Math_add = function(a, b, c, d) { i64Math.add(a, b, c, d) };
var i64Math_subtract = function(a, b, c, d) { i64Math.subtract(a, b, c, d) };
var i64Math_multiply = function(a, b, c, d) { i64Math.multiply(a, b, c, d) };
var i64Math_divide = function(a, b, c, d, e) { i64Math.divide(a, b, c, d, e) };
var i64Math_modulo = function(a, b, c, d, e) { i64Math.modulo(a, b, c, d, e) };
'''
        asm_runtime_funcs = [
            'stackAlloc', 'stackSave', 'stackRestore', 'setThrew'
        ] + ['setTempRet%d' % i for i in range(10)]

        # function tables
        def asm_coerce(value, sig):
            if sig == 'v': return value
            return ('+' if sig != 'i' else '') + value + ('|0' if sig == 'i'
                                                          else '')

        function_tables = [
            'dynCall_' + table
            for table in last_forwarded_json['Functions']['tables']
        ]
        function_tables_impls = []
        for sig in last_forwarded_json['Functions']['tables'].iterkeys():
            args = ','.join(['a' + str(i) for i in range(1, len(sig))])
            arg_coercions = ' '.join([
                'a' + str(i) + '=' + asm_coerce('a' + str(i), sig[i]) + ';'
                for i in range(1, len(sig))
            ])
            coerced_args = ','.join(
                [asm_coerce('a' + str(i), sig[i]) for i in range(1, len(sig))])
            ret = ('return ' if sig[0] != 'v' else '') + asm_coerce(
                'FUNCTION_TABLE_%s[index&{{{ FTM_%s }}}](%s)' %
                (sig, sig, coerced_args), sig[0])
            function_tables_impls.append('''
  function dynCall_%s(index%s%s) {
    index = index|0;
    %s
    %s;
  }
''' % (sig, ',' if len(sig) > 1 else '', args, arg_coercions, ret))

        # calculate exports
        exported_implemented_functions = list(exported_implemented_functions)
        exports = []
        if not simple:
            for export in exported_implemented_functions + asm_runtime_funcs + function_tables:
                exports.append("%s: %s" % (export, export))
            exports = '{ ' + ', '.join(exports) + ' }'
        else:
            exports = '_main'
        # calculate globals
        try:
            del forwarded_json['Variables']['globals'][
                '_llvm_global_ctors']  # not a true variable
        except:
            pass
        # If no named globals, only need externals
        global_vars = map(
            lambda g: g['name'],
            filter(
                lambda g: settings['NAMED_GLOBALS'] or g.get(
                    'external') or g.get('unIndexable'),
                forwarded_json['Variables']['globals'].values()))
        global_funcs = [
            '_' + x
            for x in forwarded_json['Functions']['libraryFunctions'].keys()
        ]

        def math_fix(g):
            return g if not g.startswith('Math_') else g.split('_')[1]
        asm_global_funcs = ''.join(['  var ' + g.replace('.', '_') + '=global.' + g + ';\n' for g in maths]) + \
                           ''.join(['  var ' + g + '=env.' + math_fix(g) + ';\n' for g in basic_funcs + global_funcs])
        asm_global_vars = ''.join(['  var ' + g + '=env.' + g + '|0;\n' for g in basic_vars + global_vars]) + \
                          ''.join(['  var ' + g + '=+env.' + g + ';\n' for g in basic_float_vars])
        # sent data
        the_global = '{ ' + ', '.join(
            [math_fix(s) + ': ' + s for s in fundamentals]) + ' }'
        sending = '{ ' + ', '.join([
            math_fix(s) + ': ' + s for s in basic_funcs + global_funcs +
            basic_vars + basic_float_vars + global_vars
        ]) + ' }'
        # received
        if not simple:
            receiving = ';\n'.join([
                'var ' + s + ' = Module["' + s + '"] = asm.' + s
                for s in exported_implemented_functions + function_tables
            ])
        else:
            receiving = 'var _main = Module["_main"] = asm;'
        # finalize
        funcs_js = '''
%s
function asmPrintInt(x, y) {
  Module.print('int ' + x + ',' + y);// + ' ' + new Error().stack);
}
function asmPrintFloat(x, y) {
  Module.print('float ' + x + ',' + y);// + ' ' + new Error().stack);
}
var asm = (function(global, env, buffer) {
  'use asm';
  var HEAP8 = new global.Int8Array(buffer);
  var HEAP16 = new global.Int16Array(buffer);
  var HEAP32 = new global.Int32Array(buffer);
  var HEAPU8 = new global.Uint8Array(buffer);
  var HEAPU16 = new global.Uint16Array(buffer);
  var HEAPU32 = new global.Uint32Array(buffer);
  var HEAPF32 = new global.Float32Array(buffer);
  var HEAPF64 = new global.Float64Array(buffer);
''' % (asm_setup, ) + '\n' + asm_global_vars + '''
  var __THREW__ = 0;
  var undef = 0;
  var tempInt = 0, tempBigInt = 0, tempBigIntP = 0, tempBigIntS = 0, tempBigIntR = 0.0, tempBigIntI = 0, tempBigIntD = 0, tempValue = 0, tempDouble = 0.0;
''' + ''.join(['''
  var tempRet%d = 0;''' % i
               for i in range(10)]) + '\n' + asm_global_funcs + '''
  function stackAlloc(size) {
    size = size|0;
    var ret = 0;
    ret = STACKTOP;
    STACKTOP = (STACKTOP + size)|0;
    STACKTOP = ((STACKTOP + 3)>>2)<<2;
    return ret|0;
  }
  function stackSave() {
    return STACKTOP|0;
  }
  function stackRestore(top) {
    top = top|0;
    STACKTOP = top;
  }
  function setThrew(threw) {
    threw = threw|0;
    __THREW__ = threw;
  }
''' + ''.join([
                   '''
  function setTempRet%d(value) {
    value = value|0;
    tempRet%d = value;
  }
''' % (i, i) for i in range(10)
               ]) + funcs_js + '''

  %s

  return %s;
})(%s, %s, buffer);
%s;
Runtime.stackAlloc = function(size) { return asm.stackAlloc(size) };
Runtime.stackSave = function() { return asm.stackSave() };
Runtime.stackRestore = function(top) { asm.stackRestore(top) };
''' % (pre_tables + '\n'.join(function_tables_impls) + '\n' +
        function_tables_defs.replace('\n', '\n  '), exports, the_global,
        sending, receiving)

        # Set function table masks
        def function_table_maskize(js):
            masks = {}
            default = None
            for sig, table in last_forwarded_json['Functions'][
                    'tables'].iteritems():
                masks[sig] = str(table.count(','))
                default = sig

            def fix(m):
                sig = m.groups(0)[0]
                return masks[sig]

            return re.sub(r'{{{ FTM_([\w\d_$]+) }}}', lambda m: fix(m),
                          js)  # masks[m.groups(0)[0]]

        funcs_js = function_table_maskize(funcs_js)
    else:
        function_tables_defs = '\n'.join([
            table for table in last_forwarded_json['Functions']
            ['tables'].itervalues()
        ])
        outfile.write(function_tables_defs)
    outfile.write(blockaddrsize(indexize(funcs_js)))
    funcs_js = None

    outfile.write(indexize(post))
    if DEBUG:
        print >> sys.stderr, '  emscript: phase 3 took %s seconds' % (
            time.time() - t)

    outfile.close()