def prepare(self): if "version" in self.settings: version = self.settings["version"] download(self.url % (version), self.zipfile) unzip(self.zipfile, 'temp') cp('temp/imgui-%s/' % (version), 'temp/') # TODO: mv would be cleaner else: git_clone(self.repo, 'master', 'temp') if "patch" in self.settings: with cd('temp/'): patch(self.settings["patch"])
def __fit(self, content_image, style_image, epoch_num, callback=None): xp = self.xp input_image = None height, width = content_image.shape[-2:] base_epoch = 0 for stlide in [4, 2, 1][-self.resolution_num:]: if width // stlide < 64: continue content_x = Variable(xp.asarray(content_image[:,:,::stlide,::stlide]), volatile=True) if self.keep_color: style_x = Variable(util.luminance_only(xp.asarray(style_image[:,:,::stlide,::stlide]), content_x.data), volatile=True) else: style_x = Variable(xp.asarray(style_image[:,:,::stlide,::stlide]), volatile=True) #content_layer_names = self.content_layer_names _,content_layer4_2 = self.model(content_x) #content_layers = [(name, content_layers[name]) for name in content_layer_names] #style_layer_names = self.style_layer_names style_layer3_2,style_layer4_2 = self.model(style_x) style_patch3_2 = util.patch(style_layer3_2,ksize=3) style_patch4_2 = util.patch(style_layer4_2,ksize=3) #np.save("../data/"+self.style_id+"style_3_2_0_"+str(stlide)+".npy",cuda.to_cpu(style_patch3_2[0])) #np.save("../data/"+self.style_id+"style_3_2_1_"+str(stlide)+".npy",cuda.to_cpu(style_patch3_2[1])) #np.save("../data/"+self.style_id+"style_4_2_0_"+str(stlide)+".npy",cuda.to_cpu(style_patch4_2[0])) #np.save("../data/"+self.style_id+"style_4_2_1_"+str(stlide)+".npy",cuda.to_cpu(style_patch4_2[1])) #style_patch3_2=(xp.array(np.load("../data/"+self.style_id+"style_3_2_0_"+str(stlide)+".npy")),xp.array(np.load("../data/"+self.style_id+"style_3_2_1_"+str(stlide)+".npy"))) #style_patch4_2=(xp.array(np.load("../data/"+self.style_id+"style_4_2_0_"+str(stlide)+".npy")),xp.array(np.load("../data/"+self.style_id+"style_4_2_1_"+str(stlide)+".npy"))) if input_image is None: if self.initial_image == 'content': input_image = xp.asarray(content_image[:,:,::stlide,::stlide]) else: input_image = xp.random.uniform(-20, 20, size=content_x.data.shape).astype(np.float32) else: input_image = input_image.repeat(2, 2).repeat(2, 3) h, w = content_x.data.shape[-2:] input_image = input_image[:,:,:h,:w] link = chainer.Link(x=input_image.shape) if self.device_id >= 0: link.to_gpu() link.x.data[:] = xp.asarray(input_image) self.optimizer.setup(link) for epoch in six.moves.range(epoch_num): loss_info = self.__fit_one(link, content_layer4_2.data, style_patch3_2,style_patch4_2) if callback: callback(base_epoch + epoch, link.x, loss_info) base_epoch += epoch_num input_image = link.x.data print time.time()-self.start_time,"s" return link.x
def __fit_one(self, link, content_layers, style_patches): xp = self.xp link.zerograds() layers = self.model(link.x) if self.keep_color: trans_layers = self.model(util.gray(link.x)) else: trans_layers = layers loss_info = [] loss = Variable(xp.zeros((), dtype=np.float32)) for name, content_layer in content_layers: layer = layers[name] content_loss = self.content_weight * F.mean_squared_error( layer, content_layer) loss_info.append(('content_' + name, float(content_loss.data))) loss += content_loss for name, style_patch, style_patch_norm in style_patches: patch = util.patch(trans_layers[name]) style_loss = self.style_weight * F.mean_squared_error( patch, util.nearest_neighbor_patch(patch, style_patch, style_patch_norm)) loss_info.append(('style_' + name, float(style_loss.data))) loss += style_loss tv_loss = self.tv_weight * util.total_variation(link.x) loss_info.append(('tv', float(tv_loss.data))) loss += tv_loss loss.backward() self.optimizer.update() return loss_info
def __fit(self, content_image, style_image, epoch_num, callback=None): xp = self.xp input_image = None height, width = content_image.shape[-2:] base_epoch = 0 for stlide in [4, 2, 1][-self.resolution_num:]: if width // stlide < 64: continue content_x = Variable(xp.asarray( content_image[:, :, ::stlide, ::stlide]), volatile=True) if self.keep_color: style_x = Variable(util.luminance_only( xp.asarray(style_image[:, :, ::stlide, ::stlide]), content_x.data), volatile=True) else: style_x = Variable(xp.asarray( style_image[:, :, ::stlide, ::stlide]), volatile=True) content_layer_names = self.content_layer_names content_layers = self.model(content_x) content_layers = [(name, content_layers[name]) for name in content_layer_names] style_layer_names = self.style_layer_names style_layers = self.model(style_x) style_patches = [] for name in style_layer_names: patch = util.patch(style_layers[name]) patch_norm = F.expand_dims(F.sum(patch**2, axis=1)**0.5, 1) style_patches.append((name, patch, patch_norm)) if input_image is None: if self.initial_image == 'content': input_image = xp.asarray( content_image[:, :, ::stlide, ::stlide]) else: input_image = xp.random.uniform( -20, 20, size=content_x.data.shape).astype(np.float32) else: input_image = input_image.repeat(2, 2).repeat(2, 3) h, w = content_x.data.shape[-2:] input_image = input_image[:, :, :h, :w] link = chainer.Link(x=input_image.shape) if self.device_id >= 0: link.to_gpu() link.x.data[:] = xp.asarray(input_image) self.optimizer.setup(link) for epoch in six.moves.range(epoch_num): loss_info = self.__fit_one(link, content_layers, style_patches) if callback: callback(base_epoch + epoch, link.x, loss_info) base_epoch += epoch_num input_image = link.x.data return link.x
def load_kernel(self): self.context = cl.Context([self.device], None, None) if self.device.extensions.find("cl_amd_media_ops") != -1: self.defines += " -DBITALIGN" if self.device_name in [ "Cedar", "Redwood", "Juniper", "Cypress", "Hemlock", "Caicos", "Turks", "Barts", "Cayman", "Antilles", "Wrestler", "Zacate", "WinterPark", "BeaverCreek", ]: self.defines += " -DBFI_INT" kernel_file = open("phatk.cl", "r") kernel = kernel_file.read() kernel_file.close() m = md5() m.update( "".join([self.device.platform.name, self.device.platform.version, self.device.name, self.defines, kernel]) ) cache_name = "%s.elf" % m.hexdigest() binary = None try: binary = open(cache_name, "rb") self.program = cl.Program(self.context, [self.device], [binary.read()]).build(self.defines) except (IOError, cl.LogicError): self.program = cl.Program(self.context, kernel).build(self.defines) if self.defines.find("-DBFI_INT") != -1: patchedBinary = patch(self.program.binaries[0]) self.program = cl.Program(self.context, [self.device], [patchedBinary]).build(self.defines) binaryW = open(cache_name, "wb") binaryW.write(self.program.binaries[0]) binaryW.close() finally: if binary: binary.close() self.kernel = self.program.search if not self.worksize: self.worksize = self.kernel.get_work_group_info(cl.kernel_work_group_info.WORK_GROUP_SIZE, self.device)
def fix(self, fn): if fn in self.fixes: instructions, fix = self.fixes.get(fn) logging.info("Patching shellcode for {}".format(fn)) logging.info("Replacing bytes:\n\n{}\n----\n{}\n".format( pp_hexdump(instructions), pp_hexdump(fix))) self.opcodes = patch(self.opcodes, instructions, fix) else: logging.error( "The given function does not exist in the self.fixes table.") logging.info("Available self.fixes: {}".format(", ".join( [fix for fix in self.fixes.keys()]))) logging.info("\n[i] Shellcode is unmodified.\n")
def load_kernel(self): self.context = cl.Context([self.device], None, None) if (self.device.extensions.find('cl_amd_media_ops') != -1): self.defines += ' -DBITALIGN' if self.device_name in ['Cedar', 'Redwood', 'Juniper', 'Cypress', 'Hemlock', 'Caicos', 'Turks', 'Barts', 'Cayman', 'Antilles', 'Wrestler', 'Zacate', 'WinterPark', 'BeaverCreek']: self.defines += ' -DBFI_INT' kernel_file = open('phatk.cl', 'r') kernel = kernel_file.read() kernel_file.close() m = md5(); m.update(''.join([self.device.platform.name, self.device.platform.version, self.device.name, self.defines, kernel])) cache_name = '%s.elf' % m.hexdigest() binary = None try: binary = open(cache_name, 'rb') self.program = cl.Program(self.context, [self.device], [binary.read()]).build(self.defines) except (IOError, cl.LogicError): self.program = cl.Program(self.context, kernel).build(self.defines) if (self.defines.find('-DBFI_INT') != -1): patchedBinary = patch(self.program.binaries[0]) self.program = cl.Program(self.context, [self.device], [patchedBinary]).build(self.defines) binaryW = open(cache_name, 'wb') binaryW.write(self.program.binaries[0]) binaryW.close() finally: if binary: binary.close() self.kernel = self.program.search if not self.worksize: self.worksize = self.kernel.get_work_group_info(cl.kernel_work_group_info.WORK_GROUP_SIZE, self.device)
def setup(config): workdir = os.path.abspath(os.getcwd()) if not os.path.exists('dangsan'): util.git.clone(dangsan_repo) if not os.path.exists('llvm-svn'): util.llvm.checkout_svn(llvm_svn_commit) os.chdir('llvm-svn') util.patch( os.path.join(workdir, 'dangsan', 'patches', 'LLVM-gold-plugins-3.8.diff'), '-p0') util.patch( os.path.join(workdir, 'dangsan', 'patches', 'LLVM-safestack-3.8.diff'), '-p0') os.chdir('projects/compiler-rt') util.patch( os.path.join(workdir, 'dangsan', 'patches', 'COMPILERRT-safestack-3.8.diff'), '-p0') os.chdir(workdir) if not os.path.exists('bin/ld'): prepare_gold(binutils_version) os.remove('bin/ld') shutil.copy('bin/ld.gold', 'bin/ld') if not os.path.exists('llvm-build'): os.mkdir('llvm-build') util.llvm.compile( src_dir=os.path.abspath('llvm-svn'), build_dir=os.path.abspath('llvm-build'), install_dir=os.getcwd(), options=['-DLLVM_BINUTILS_INCDIR=%s/include' % workdir]) util.llvm.install(os.path.abspath('llvm-build')) if not glob.glob('gperftools*'): util.git.clone(gperftools_repo) os.chdir('gperftools') util.git.checkout(gperftools_commit) # gperftools patch for both baseline and DangSan util.git.patch( os.path.join(workdir, 'dangsan', 'patches', 'GPERFTOOLS_SPEEDUP.patch')) if config == configs[1] or config == configs[2]: os.chdir(workdir) # Build metapagetable if not os.path.exists('gperftools-metalloc'): shutil.move('gperftools', 'gperftools-metalloc') if not os.path.exists('metapagetable'): config_fixedcompression = 'false' config_metadatabytes = 8 config_deepmetadata = 'false' config_alloc_size_hook = 'dang_alloc_size_hook' metalloc_options = [] metalloc_options.append('-DFIXEDCOMPRESSION=%s' % config_fixedcompression) metalloc_options.append('-DMETADATABYTES=%d' % config_metadatabytes) metalloc_options.append('-DDEEPMETADATA=%s' % config_deepmetadata) metalloc_options.append('-DALLOC_SIZE_HOOK=%s' % config_alloc_size_hook) os.environ['METALLOC_OPTIONS'] = ' '.join(metalloc_options) shutil.copytree(os.path.join('dangsan', 'metapagetable'), 'metapagetable') os.chdir('metapagetable') util.make('config') util.make() os.chdir(workdir) # Apply DanSan patches for gperftool os.chdir('gperftools-metalloc') util.git.patch( os.path.join(os.path.dirname(__file__), 'GPERFTOOLS_DANGSAN.patch')) util.run('autoreconf -fi') util.configure('--prefix=%s' % workdir) util.make('-j4') util.make('install') os.chdir(workdir) # build llvm-plugins llvmplugins_dir = os.path.join(workdir, 'llvm-plugins') libplugins_so = os.path.join(llvmplugins_dir, 'libplugins.so') if config == configs[1] or config == configs[2]: if not os.path.exists('staticlib'): os.environ['PATH'] = ':'.join( [os.path.join(workdir, 'bin'), os.environ['PATH']]) shutil.copytree('dangsan/staticlib', 'staticlib') os.chdir('staticlib') util.make('METAPAGETABLEDIR=%s' % os.path.join(workdir, 'metapagetable', 'obj')) os.chdir(workdir) if not os.path.exists(libplugins_so): os.environ['PATH'] = ':'.join( [os.path.join(workdir, 'bin'), os.environ['PATH']]) os.chdir('dangsan/llvm-plugins') util.make('-j4 GOLDINSTDIR=%s TARGETDIR=%s' % (workdir, llvmplugins_dir)) os.chdir(workdir) # Create spec config file print 'creating spec config file...' spec_cfg = '-'.join([command, config]) + '.cfg' if os.path.exists(spec_cfg): os.remove(spec_cfg) cflags = ['-flto'] cxxflags = ['-flto'] ldflags = [] if config == configs[1] or config == configs[2]: cflags.append('-fsanitize=safe-stack') cxxflags.append('-fsanitize=safe-stack') cxxflags.append('-DSOPLEX_DANGSAN_MASK') ldflags.append('-Wl,-plugin-opt=-load=%s' % libplugins_so) ldflags.append('-Wl,-plugin-opt=%s' % '-largestack=false') # ldflags.append('-Wl,-plugin-opt=%s' % '-stats') # option not found ldflags.append('-Wl,-plugin-opt=%s' % '-mergedstack=false') ldflags.append('-Wl,-plugin-opt=%s' % '-stacktracker') ldflags.append('-Wl,-plugin-opt=%s' % '-globaltracker') ldflags.append('-Wl,-plugin-opt=%s' % '-pointertracker') ldflags.append('-Wl,-plugin-opt=%s' % '-FreeSentryLoop') ldflags.append('-Wl,-plugin-opt=%s' % '-custominline') ldflags.append('-Wl,-whole-archive,-l:libmetadata.a,-no-whole-archive' ) # staticlib ldflags.append( '@%s' % os.path.join(workdir, 'metapagetable', 'obj', 'linker-options')) extra_libs = ['-ltcmalloc', '-lpthread', '-lunwind'] extra_libs.append('-L%s' % os.path.join(workdir, 'lib')) if config == configs[1] or config == configs[2]: extra_libs.append('-ldl') extra_libs.append('-L%s' % os.path.join(workdir, 'staticlib', 'obj')) if config == configs[2]: cflags.append('-g') cxxflags.append('-g') cc = [os.path.abspath(os.path.join(workdir, 'bin', 'clang'))] cc.extend(cflags) cxx = [os.path.abspath(os.path.join(workdir, 'bin', 'clang++'))] cxx.extend(cxxflags) cld = list(cc) cld.extend(ldflags) cxxld = list(cxx) cxxld.extend(ldflags) util.spec.create_config(dir=os.getcwd(), file=spec_cfg, name='-'.join([command, config]), cc=' '.join(cc), cxx=' '.join(cxx), cld=' '.join(cld), cxxld=' '.join(cxxld), extra_libs=' '.join(extra_libs)) # create shrc for testing print('creating test directory...') if not os.path.exists('test'): testdir = os.path.join(os.path.dirname(__file__), 'test') shutil.copytree(testdir, os.path.join(workdir, 'test')) if os.path.exists('test/shrc'): os.remove('test/shrc') util.test.create_shrc(dir=os.path.abspath('test'), cc=' '.join(cc), cxx=' '.join(cxx), ld=' '.join(cld), ldflags=' '.join(extra_libs))
def setup(config): workdir = os.path.abspath(os.getcwd()) configdir = os.path.join(os.path.dirname(__file__), 'config') INSTRUMENTATION_PATH = os.path.abspath('llvm/lib/Transforms/Instrumentation') if not os.path.exists('llvm'): util.llvm.download(llvmver, os.getcwd()) # Apply patches common to both baseline and LowFat bug81066_patch = os.path.join(os.path.dirname(__file__), 'bug81066.patch') os.chdir('llvm/projects/compiler-rt') util.patch(os.path.abspath(bug81066_patch), '-p0') os.chdir(workdir) if not os.path.exists('llvm-4.0.0') and not os.path.exists('llvm-lowfat'): os.mkdir('llvm-4.0.0') util.llvm.compile(src_dir=os.path.abspath('llvm'), build_dir=os.path.abspath('llvm-4.0.0'), install_dir=workdir) if config != configs[0]: os.environ['CC'] = os.path.abspath('llvm-4.0.0/bin/clang') os.environ['CXX'] = os.path.abspath('llvm-4.0.0/bin/clang++') if not os.path.exists('llvm-lowfat'): print 'patching llvm/clang/compiler-rt version ' + llvmver + '...' llvm_patch = os.path.join(os.path.dirname(__file__), 'llvm.patch') clang_patch = os.path.join(os.path.dirname(__file__), 'clang.patch') compilerrt_patch = os.path.join(os.path.dirname(__file__), 'compiler-rt.patch') legacy_compilerrt_patch = os.path.join(os.path.dirname(__file__), 'legacy_compiler-rt.patch') modern_compilerrt_patch = os.path.join(os.path.dirname(__file__), 'modern_compiler-rt.patch') os.chdir('llvm') util.patch(os.path.abspath(llvm_patch)) os.chdir(workdir) os.chdir('llvm/tools/clang') util.patch(os.path.abspath(clang_patch)) os.chdir(workdir) os.chdir('llvm/projects/compiler-rt') util.patch(os.path.abspath(compilerrt_patch)) cpuinfo = util.stdout(['cat', '/proc/cpuinfo']).strip().split('\n') legacy = False if not [s for s in cpuinfo if ' bmi1' in s]: legacy = True if not [s for s in cpuinfo if ' bmi2' in s]: legacy = True if not [s for s in cpuinfo if ' abm' in s]: legacy = True if legacy: util.patch(os.path.abspath(legacy_compilerrt_patch)) else: util.patch(os.path.abspath(modern_compilerrt_patch)) os.chdir(workdir) print 'building the LowFat config builder...' CONFIG = 'sizes.cfg 32' os.chdir(os.path.join(os.path.dirname(__file__), 'config')) util.make() util.run('./lowfat-config ' + CONFIG) util.make('lowfat-check-config') util.run('./lowfat-check-config') os.chdir(workdir) print 'copying the LowFat config files...' RUNTIME_PATH = os.path.abspath('llvm/projects/compiler-rt/lib/lowfat') shutil.copy(os.path.join(configdir, 'lowfat_config.h'), RUNTIME_PATH) shutil.copy(os.path.join(configdir, 'lowfat_config.c'), RUNTIME_PATH) CLANGLIB_PATH = os.path.abspath('llvm/tools/clang/lib/Basic') shutil.copy(os.path.join(RUNTIME_PATH, 'lowfat_config.c'), os.path.join(INSTRUMENTATION_PATH, 'lowfat_config.inc')) shutil.copy(os.path.join(RUNTIME_PATH, 'lowfat_config.h'), os.path.join(INSTRUMENTATION_PATH, 'lowfat_config.h')) shutil.copy(os.path.join(RUNTIME_PATH, 'lowfat_config.h'), os.path.join(CLANGLIB_PATH, 'lowfat_config.h')) shutil.copy(os.path.join(RUNTIME_PATH, 'lowfat.h'), os.path.join(INSTRUMENTATION_PATH, 'lowfat.h')) os.mkdir('llvm-lowfat') util.llvm.compile(src_dir=os.path.abspath('llvm'), build_dir=os.path.abspath('llvm-lowfat'), install_dir=workdir) os.chdir('llvm-lowfat') if not os.path.exists('lib/LowFat'): if not os.path.exists('lib'): os.mkdir('lib') os.mkdir('lib/LowFat') shutil.copy(os.path.join(configdir, 'lowfat.ld'), os.path.join('lib/LowFat', 'lowfat.ld')) if not os.path.exists('bin/lowfat-ptr-info'): os.chdir(configdir) util.make('lowfat-ptr-info') shutil.copy('lowfat-ptr-info', os.path.join(workdir, 'llvm-lowfat', 'bin')) os.chdir(workdir) if not os.path.exists('LowFat.so'): clangxx = os.path.join('llvm-4.0.0', 'bin', 'clang++') llvm_config = os.path.join('llvm-4.0.0', 'bin', 'llvm-config') cxxflags = util.stdout([llvm_config, '--cxxflags']).rstrip('\n') includedir = util.stdout([llvm_config, '--includedir']).rstrip('\n') cmd = ' '.join([clangxx, '-DLOWFAT_PLUGIN', os.path.join(INSTRUMENTATION_PATH, 'LowFat.cpp'), '-c -Wall -O2', '-I' + INSTRUMENTATION_PATH, '-o LowFat.o', cxxflags, includedir]) util.run(cmd) ldflags = util.stdout([llvm_config, '--ldflags']).rstrip('\n') cmd = ' '.join([clangxx, '-shared -rdynamic -o LowFat.so LowFat.o', ldflags]) util.run(cmd) os.chdir(workdir) # Create spec config file print 'creating spec config file...' spec_cfg = '-'.join([command, config]) + '.cfg' if os.path.exists(spec_cfg): os.remove(spec_cfg) if config == configs[0]: path = os.path.join('llvm-4.0.0', 'bin') cflags = [] else: path = os.path.join('llvm-lowfat', 'bin') cflags = ['-fsanitize=lowfat'] blacklist = os.path.join(os.path.dirname(__file__), 'blacklist.txt') cflags.extend(['-mllvm', '-lowfat-no-check-blacklist=%s' % blacklist]) cflags.append('-DPATCH_PERLBENCH_OVERFLOW') cflags.append('-DPATCH_H264REF_OVERFLOW') if config == configs[2]: cflags.append('-g') cflags.append('-fno-inline-functions') if config == configs[3]: cflags.append('-mllvm -lowfat-no-abort') cc = [os.path.abspath(os.path.join(path, 'clang'))] cc.extend(cflags) cxx = [os.path.abspath(os.path.join(path, 'clang++'))] cxx.extend(cflags) util.spec.create_config(dir=os.getcwd(), file=spec_cfg, name='-'.join([command, config]), cc=' '.join(cc), cxx=' '.join(cxx))
if os.path.exists(old_isccomp) or os.path.exists(route_doc): 'Already prepared!' sys.exit(1) namespace = subprocess.check_output(['kubectl', 'config', 'view', '--minify', '-o', 'jsonpath={..namespace}']).strip().decode() print('current namespace: ' + namespace) ocp_console = subprocess.check_output(['kubectl', 'get', '-n', 'openshift-console', '-o', 'jsonpath={..spec.host}', 'route', 'console']) print('OCP console: ' + ocp_console) domain = re.sub('.*apps', 'apps', ocp_console) debug_helper_url = '%s-debug-helper.%s' % (app, domain) os.system('kubectl get isccomponent %s -o yaml >%s' % (app, old_isccomp)) patch('isccomponent', app, {'spec': {'action': {'service': {'ports': [3200, 12424], 'proto': 'http'}, 'image': {'repository': 'gsturov/%s-with-debug-helper' % app, 'tag': 'latest'}}}}) patch('iscsequence', app, {'spec': {'labels': {'generation': '%s' % uuid.uuid1()}}}) while True: s = subprocess.check_output(['kubectl', 'get', 'deployment', app, '-o', 'yaml']).strip().decode() if 'gsturov' in s: break else: print('waiting for the deployment to get updated...'); time.sleep(2) time.sleep(2) create_from_template('debug-helper-route-template.yaml', route_doc, app = app, namespace = namespace, debug_helper_url = debug_helper_url) while True: try: status = urllib.urlopen('http://%s' % debug_helper_url).read() if 'GET' in status: break
#!/usr/bin/python app = 'car-connector-config' import os, sys, subprocess, re, json, time, uuid, urllib from util import patch old_isccomp = 'old-%s.yaml' % app route_doc = '%s-debug-helper-route.yaml' % app if not os.path.exists(old_isccomp) and not os.path.exists(route_doc): 'Cannot restore: no data!' sys.exit(1) if os.path.exists(old_isccomp): os.system('kubectl replace --force -f %s' % old_isccomp) patch('iscsequence', app, {'spec': {'labels': {'generation': '%s' % uuid.uuid1()}}}) os.remove(old_isccomp) if os.path.exists(route_doc): os.system('kubectl delete -f %s' % route_doc) os.remove(route_doc)