def __init__(self, name, start_calico=True, dind=True): self.name = name self.dind = dind self.workloads = set() # This variable is used to assert on destruction that this object was # cleaned up. If not used as a context manager, users of this object self._cleaned = False if dind: docker.rm("-f", self.name, _ok_code=[0, 1]) docker.run("--privileged", "-v", os.getcwd()+":/code", "--name", self.name, "-e", "DOCKER_DAEMON_ARGS=" "--kv-store=consul:%s:8500" % utils.get_ip(), "-tid", "calico/dind") self.ip = docker.inspect("--format", "{{ .NetworkSettings.IPAddress }}", self.name).stdout.rstrip() self.ip6 = docker.inspect("--format", "{{ .NetworkSettings." "GlobalIPv6Address }}", self.name).stdout.rstrip() # Make sure docker is up docker_ps = partial(self.execute, "docker ps") retry_until_success(docker_ps, ex_class=CalledProcessError) self.execute("docker load --input /code/calico_containers/calico-node.tar && " "docker load --input /code/calico_containers/busybox.tar") else: self.ip = get_ip() if start_calico: self.start_calico_node() self.assert_driver_up()
def _oneNodeSwarmThread(): """ Creates a swarm manager in dk and also connect dk as node """ options = optionsFromClient(dk) if options: dockersh.run(options, '-p 2386:2376','swarm', 'manage', '172.17.0.28:2376' )
def __init__(self, name, start_calico=True, dind=True): self.name = name self.dind = dind self.workloads = set() # This variable is used to assert on destruction that this object was # cleaned up. If not used as a context manager, users of this object self._cleaned = False if dind: # TODO use pydocker docker.rm("-f", self.name, _ok_code=[0, 1]) docker.run("--privileged", "-v", os.getcwd()+":/code", "--name", self.name, "-tid", "calico/dind") self.ip = docker.inspect("--format", "{{ .NetworkSettings.IPAddress }}", self.name).stdout.rstrip() self.ip6 = docker.inspect("--format", "{{ .NetworkSettings." "GlobalIPv6Address }}", self.name).stdout.rstrip() # Make sure docker is up docker_ps = partial(self.execute, "docker ps") retry_until_success(docker_ps, ex_class=CalledProcessError, retries=100) self.execute("docker load --input /code/calico_containers/calico-node.tar && " "docker load --input /code/calico_containers/busybox.tar") else: self.ip = get_ip() if start_calico: self.start_calico_node()
def __build_base_image(self, config): # This does not seem to be the right way to get the container tag? self.container_tag = config.get_container_tag() all_images = docker.images(a=True) import pdb pdb.set_trace() if self.container_tag in all_images: return print( "Container not found or changes in watched files. Rebuilding base container (%s)..." % self.container_tag) base_name = 'nose-docker-base-%s' % self.container_tag all_containers = docker.ps(a=True) if base_name in all_containers: docker.rm('-f', base_name) docker.run('--name=%s' % base_name, '-v', '%s:/app' % abspath(os.curdir), config.base_image, '/bin/bash', c="cd /app && %s" % (" && ".join(config.build_commands)), _out=sys.stdout) docker.commit('nose-docker-base-%s' % self.container_tag, 'nose-docker:%s' % self.container_tag)
def __init__(self, name, start_calico=True, as_num=None): """ Create a container using an image made for docker-in-docker. Load saved images into it. """ self.name = name self.as_num = None pwd = sh.pwd().stdout.rstrip() docker.run("--privileged", "-v", pwd+":/code", "--name", self.name, "-tid", "jpetazzo/dind") # Since `calicoctl node` doesn't fix ipv6 forwarding and module loading, we must manually fix it self.calicoctl("checksystem --fix") self.ip = docker.inspect("--format", "{{ .NetworkSettings.IPAddress }}", self.name).stdout.rstrip() self.ip6 = docker.inspect("--format", "{{ .NetworkSettings.GlobalIPv6Address }}", self.name).stdout.rstrip() # Make sure docker is up docker_ps = partial(self.execute, "docker ps") retry_until_success(docker_ps, ex_class=ErrorReturnCode) self.execute("docker load --input /code/calico_containers/calico-node.tar") self.execute("docker load --input /code/calico_containers/busybox.tar") if start_calico: self.start_calico_node() self.assert_powerstrip_up()
def __build_base_image(self, config): # This does not seem to be the right way to get the container tag? self.container_tag = config.get_container_tag() all_images = docker.images(a=True) import pdb; pdb.set_trace() if self.container_tag in all_images: return print ("Container not found or changes in watched files. Rebuilding base container (%s)..." % self.container_tag) base_name = 'nose-docker-base-%s' % self.container_tag all_containers = docker.ps(a=True) if base_name in all_containers: docker.rm('-f', base_name) docker.run( '--name=%s' % base_name, '-v', '%s:/app' % abspath(os.curdir), config.base_image, '/bin/bash', c="cd /app && %s" % ( " && ".join(config.build_commands) ), _out=sys.stdout ) docker.commit('nose-docker-base-%s' % self.container_tag, 'nose-docker:%s' % self.container_tag)
def _oneNodeSwarmThread(): """ Creates a swarm manager in dk and also connect dk as node """ options = optionsFromClient(dk) if options: dockersh.run(options, '-p 2386:2376', 'swarm', 'manage', '172.17.0.28:2376')
def main(dataset, standalone=False): runargs = [ "-d", "-h", settings.HOSTNAME, "-v", "/sys/fs/cgroup:/sys/fs/cgroup:ro", ] runargs += ports(standalone) runargs += [settings.kolabimagename(settings.populatedTag(dataset))] docker.run(*runargs)
def main(dataset): runargs = ( "-d", "-p", "80:80", "-p", "143:143", "-p", "443:443", "-p", "587:587", "-p", "4190:4190", "-h", settings.HOSTNAME, "-v", "/sys/fs/cgroup:/sys/fs/cgroup:ro", settings.kolabimagename(settings.populatedTag(dataset)) ) docker.run(*runargs)
def run_test_in_docker(self, test, full_name, result): try: exit_with_proper_code = 'EXIT=$?; cat /app/nosetests.xml; exit $EXIT' xml = docker.run( '--rm', '-v', '%s:/app' % abspath(os.curdir), 'nose-docker:%s' % self.container_tag, '/bin/bash', c="cd /app && echo 'running tests for %s...' && nosetests --with-xunit %s; %s" % ( full_name, full_name, exit_with_proper_code ), ) result.addSuccess(test) except sh.ErrorReturnCode: err = sys.exc_info()[1] xml = err.stdout[err.stdout.index('<?xml'):] root = etree.fromstring(xml) failure = FAILURE_SELECTOR(root) if failure: failure_message = su.unescape(failure[0].text).replace('\\n', '\n') result.addFailure(test, failure_message) error = ERROR_SELECTOR(root) if error: result.addError(test, su.unescape(error[0].text)) finally: result.testsRun += 1
def run_test_in_docker(self, test, full_name, result): try: exit_with_proper_code = 'EXIT=$?; cat /app/nosetests.xml; exit $EXIT' xml = docker.run( '--rm', '-v', '%s:/app' % abspath(os.curdir), 'nose-docker:%s' % self.container_tag, '/bin/bash', c="cd /app && echo 'running tests for %s...' && nosetests --with-xunit %s; %s" % (full_name, full_name, exit_with_proper_code), ) result.addSuccess(test) except sh.ErrorReturnCode: err = sys.exc_info()[1] xml = err.stdout[err.stdout.index('<?xml'):] root = etree.fromstring(xml) failure = FAILURE_SELECTOR(root) if failure: failure_message = su.unescape(failure[0].text).replace( '\\n', '\n') result.addFailure(test, failure_message) error = ERROR_SELECTOR(root) if error: result.addError(test, su.unescape(error[0].text)) finally: result.testsRun += 1
def main(): target_directory = prepare_target() output_path = pathlib.Path('./docs').resolve() test_matrix = [{ "DIST": "centos7", "PYTHON": "3.6", "IMAGE_IDENTIFIER": "autotest", "SUITE": "docs", "RUN_HTTPD": False, }] build_images = sh.Command(f"{target_directory}/tools/test/build_images.py") build_out = build_images( "-o", "list", f"etc/docker/test", _cwd=target_directory, _in=json.dumps(test_matrix), _err=sys.stderr, ) image_list = json.loads(build_out.stdout) assert len(image_list) == 1, "should have returned exactly one image" docker.run( "--volume", f"{output_path}:/opt/rucio/docs:Z", "--env", "SUITE=docs", "--env", "RUCIO_HOME=/usr/local/src/rucio", image_list[0], "sh", "-c", "tools/test/install_script.sh && tools/generate_doc.py", _in=sys.stdin, _out=sys.stdout, _err=sys.stderr, ) # assert generated doc assert os.path.exists("docs/rucio_client_api.md") assert os.path.exists("docs/rucio_rest_api.md") assert os.path.exists("docs/bin/") # render templates templates_dir: str = os.path.join(os.path.dirname(__file__), "templates") assert os.path.exists(templates_dir) render_templates(templates_dir, output_path)
def run_command_in_docker_container_and_return_output(self, command: [str], *args, **kwargs): """ Runs a command (with arguments) in the new docker container. :param command: A list of strings, e.g., ["echo","hello"] :return: The output. """ docker_command = docker.run("--rm=true", "--cap-add=SYS_PTRACE", self.docker_image, command, **kwargs) # type: sh.RunningCommand return str(docker_command)
def start_etcd(self): """ Starts the single-node etcd cluster. The etcd process runs within its own container, outside the host containers. It uses port mapping and the base machine's IP to communicate. """ docker.run( "--detach", "--publish", "2379:2379", "--publish", "2380:2380", "--name", "etcd", "quay.io/coreos/etcd:v2.0.11", name="calico", advertise_client_urls="http://%s:2379" % self.ip, listen_client_urls="http://0.0.0.0:2379", initial_advertise_peer_urls="http://%s:2380" % self.ip, listen_peer_urls="http://0.0.0.0:2380", initial_cluster_token="etcd-cluster-2", initial_cluster="calico=http://%s:2380" % self.ip, initial_cluster_state="new", )
def main(dataset): tmpname="kolab/kolabtestcontainer:tmppopulated" imagename="kolab/kolabtestcontainer:populated-"+dataset basedir = "{c.SCRIPT_DIR}/kolabpopulated".format(c=config) print("Building tmpcontainer...") docker.build(settings.dockerCacheString(), "-t", tmpname, "{basedir}/.".format(basedir=basedir)) print("Starting tmpcontainer...") container = docker.run("-d", "-h", settings.HOSTNAME, "-v", "/sys/fs/cgroup:/sys/fs/cgroup:ro", "-v", "{basedir}/{dataset}/:/data/".format(basedir=basedir, dataset=dataset), '-v', "{c.SCRIPT_DIR}/kolab/populate/:/populate".format(c=config), tmpname).rstrip() try: # Wait for imap to become available on imaps://localhost:993 time.sleep(5) print "Populate OU..." docker("exec", container, "python2", "/populate/populate_ou.py", _out=sys.stdout) print "Populate users..." docker("exec", container, "python2", "/populate/populate_users.py", _out=sys.stdout) print "Populate resources..." docker("exec", container, "python2", "/populate/populate_resources.py", _out=sys.stdout) print("Running populate.sh...") docker("exec", container, "/data/populate.sh", _out=sys.stdout) # Give kolabd some time to create all mailboxes time.sleep(5) docker("exec", container, "patch", "-R", "/etc/kolab/kolab.conf", "/data/kolab.conf.diff", _out=sys.stdout) docker("exec", container, "patch", "-R", "/etc/roundcubemail/calendar.inc.php", "/data/calendar.inc.php.diff", _out=sys.stdout) docker("exec", container, "patch", "-R", "/etc/roundcubemail/config.inc.php", "/data/config.inc.php.diff", _out=sys.stdout) docker("exec", container, "patch", "-R", "/etc/roundcubemail/kolab_addressbook.inc.php", "/data/kolab_addressbook.inc.php.diff", _out=sys.stdout) docker("exec", container, "patch", "-R", "/etc/roundcubemail/kolab_auth.inc.php", "/data/kolab_auth.inc.php.diff", _out=sys.stdout) docker("exec", container, "patch", "-R", "/etc/roundcubemail/password.inc.php", "/data/password.inc.php.diff", _out=sys.stdout) print("Comitting results to: {}".format(imagename)) docker.commit(container, imagename) except: print("Unexpected error:", sys.exc_info()[0]) traceback.print_exc(file=sys.stdout) print("Failed to setup container") docker.stop(container) docker.rm(container)
def main(dataset): tmpname="kolab/kolabtestcontainer:tmppopulated" imagename="kolab/kolabtestcontainer:populated-"+dataset SCRIPT_DIR = os.path.dirname(os.path.realpath(sys.argv[0])) print("Building tmpcontainer...") docker.build("-t", tmpname, SCRIPT_DIR+"/kolabpopulated/.") print("Starting tmpcontainer...") container = docker.run("-d", "-h", settings.HOSTNAME, "-v", "/sys/fs/cgroup:/sys/fs/cgroup:ro", "-v", SCRIPT_DIR+"/kolabpopulated/"+dataset+"/:/data/", tmpname).rstrip() # Wait for imap to become available on imaps://localhost:993 time.sleep(5) print("Running populate.sh...") docker("exec", container, "/data/populate.sh", _out=process_output) print("Comitting results...") docker.commit(container, imagename) docker.stop(container) docker.rm(container)
def main(): tmpname = settings.kolabimagename("tmpbase") SCRIPT_DIR = os.path.dirname(os.path.realpath(sys.argv[0])) print("Building tmpcontainer...") docker("build", "-t", tmpname, SCRIPT_DIR+"/kolab/") print("Starting tmpcontainer...") print(SCRIPT_DIR+"/fixRoundcubeT243.sh:/usr/share/roundcubemail/fixRoundcubeT243.sh") container = docker.run("-d", "-h", settings.HOSTNAME, "-v", "/sys/fs/cgroup:/sys/fs/cgroup:ro", "-v", SCRIPT_DIR+"/kolab/fixRoundcubeT243.sh:/usr/share/roundcubemail/fixRoundcubeT243.sh", tmpname).rstrip() print("Setting up kolab") docker("exec", container, "setup-kolab", "--default", "--timezone="+settings.TIMEZONE, "--directory-manager-pwd="+settings.LDAPPW, "--mysqlserver=new", _out=process_output) print("Fixing roundcube") docker("exec", container, "bash", "/usr/share/roundcubemail/fixRoundcubeT243.sh", _out=process_output) docker("exec", container, "systemctl", "restart", "httpd", _out=process_output) print("Comitting results...") docker.commit(container, settings.kolabimagename("base")) docker.stop(container) docker.rm(container)
def run_inference(self, docker_name, package: str, docker_args: [str], fuzzer_image: str, build_file: str, inference_command_args: List[str], timeout_per_package: float, qemu: bool = False): """ :param self: :param docker_name: :param package: :param docker_args: :param fuzzer_image: :param build_file: :param inference_command_args: :param timeout_per_package: :type inference_command_args: List :return: """ inference_command = None from celery.platforms import signals def int_handler(signum, frame): print("Int handler!") if inference_command is not None: try: docker_command.stop( docker_name, _timeout=120 ) # It should not take longer than 120 seconds to kill a docker container, right???? except sh.ErrorReturnCode: return True except sh.TimeoutException: # It took too long too kill the docker container - we are going to ignore that for now, we want to continue fuzzing return True return True # fuzzer_command.kill() # fuzzer_command.wait() else: return True signals['INT'] = int_handler print("Now working on {0}".format(package)) try: if os.path.exists(build_file): with open(build_file, "r") as jsonfp: build_dict = json.load(jsonfp) package_image_name = build_dict["docker_image_name"] else: package_image_name = package + "_" + str(uuid.uuid4())[:8] if not os.path.exists(os.path.dirname(build_file)): os.mkdir(os.path.dirname(build_file)) # TODO: There is an issue with qemu here. Fix this! package_image_name = helpers.docker_builder.return_current_package_image( package=package, fuzzer_image=fuzzer_image, package_image=package_image_name, json_output_path=build_file, qemu=qemu) print("docker run", " ".join(docker_args), package_image_name, " ".join(map(lambda x: str(x), inference_command_args))) build_dict = {} with open(build_file) as build_filefp: build_dict = json.load(build_filefp) if build_dict["qemu"] and "-Q" not in inference_command_args: inference_command_args.append("-Q") elif not build_dict["qemu"] and "-Q" in inference_command_args: inference_command_args.remove("-Q") docker_args.insert(0, '--cpus=1.0') inference_command = docker_command.run( docker_args, package_image_name, inference_command_args, _out=sys.stdout, _timeout=timeout_per_package) # type: sh.RunningCommand if inference_command.exit_code != 0: print("Some went wrong for package {0}", package) return False if not KEEP_IMAGES: docker_command.rmi("-f", package_image_name) print("Done! Returning True") return True except sh.ErrorReturnCode as e: print("Inference error:") print("STDOUT:\n", e.stdout.decode("utf-8")) print("STDERR:\n", e.stderr.decode("utf-8")) print("command line: {0}".format(e.full_cmd)) logger.error("Inference error:") logger.error("STDOUT:\n", e.stdout.decode("utf-8")) logger.error("STDERR:\n", e.stderr.decode("utf-8")) logger.error("command line: {0}".format(e.full_cmd)) return False except sh.TimeoutException as e: print("Inferring {0} timed out... Next one!".format(package)) return True except sh.SignalException_SIGKILL as e: print("Killed") return True return True
def build_and_commit(package: str, fuzzer_image: str, json_output_path: str = None, qemu=False, timeout=None) -> str: """ This builds a package inside a docker container and then commits the container to an image. :return: """ start = time.time() docker_image_name = package + "_" + str(uuid.uuid4())[:8] docker_container_name = str(uuid.uuid4()) try: if not qemu: build_process = docker.run( '--cpus=0.90', "--privileged", "--name", docker_container_name, "--entrypoint", "python", fuzzer_image, "/inputinferer/configfinder/builder_wrapper.py", "-p", package, _out=sys.stdout, _ok_code=[ config_settings.BUILDER_BUILD_NORMAL, config_settings.BUILDER_BUILD_FAILED, config_settings.BUILDER_BUILD_QEMU ], _timeout=timeout) # type: sh.RunningCommand else: build_process = docker.run( '--cpus=0.90', "--privileged", "--name", docker_container_name, "--entrypoint", "python", fuzzer_image, "/inputinferer/configfinder/builder_wrapper.py", "-p", package, "-Q", _out=sys.stdout, _ok_code=[ config_settings.BUILDER_BUILD_NORMAL, config_settings.BUILDER_BUILD_FAILED, config_settings.BUILDER_BUILD_QEMU ], _timeout=timeout) # type: sh.RunningCommand except sh.TimeoutException as e: print("Building {0} timed out!".format(package)) return None exit_code = build_process.exit_code if exit_code == -1: print("Failed to build image for package {0}, not commiting".format( package)) return None docker.commit(docker_container_name, docker_image_name, _out=sys.stdout) end = time.time() if json_output_path is not None: json_dict = {} json_dict["docker_image_name"] = docker_image_name if exit_code == config_settings.BUILDER_BUILD_NORMAL: json_dict["qemu"] = False elif exit_code == config_settings.BUILDER_BUILD_QEMU: json_dict["qemu"] = True json_dict["time"] = end - start with open(json_output_path, "w") as json_output_fp: json.dump(json_dict, json_output_fp) docker.rm(docker_container_name) # Remove the image after we commited return docker_image_name
def run(package: str, configuration_dir: str, binary_name: str, qemu: bool, minimize: bool, timeout: float, fuzz_duration: float): if os.path.exists(configuration_dir): print( "Skipping {0}. Directory already exists".format(configuration_dir)) return reset(package) print("Starting qemu={0},minimize={1},Fuzzing timeout={2}".format( qemu, minimize, timeout)) start = time.time() container_name = package + "_build" # +"_"+str(uuid.uuid4())[:8] image_name = package + "_image" # +"_"+str(uuid.uuid4())[:8] timecommand = sh.Command("time") # type: sh.Command docker_args = [ "--name", container_name, "--entrypoint", "python", "pacmanfuzzer", "/inputinferer/configfinder/builder_wrapper.py", "-p", package ] if qemu: docker_args.append("-Q") with timecommand(_with=True) as timeprocess: print("Building") build_process = docker.run(docker_args, _ok_code=[0, 1, 2], _out=sys.stdout, _err=sys.stderr) # type: sh.RunningCommand if not qemu and build_process.exit_code == 2: print("WITHOUT QEMU: Failed") return with timecommand(_with=True): docker.commit([container_name, image_name], _out=sys.stdout, _err=sys.stderr) docker_args = [ "--rm", "--cap-add=SYS_PTRACE", "-v", configuration_dir + ":/results", "--entrypoint", "python", image_name, "/inputinferer/configfinder/config_finder_for_pacman_package.py", "-p", package, "-v", "/results/" ] if qemu: docker_args.append("-Q") with timecommand(_with=True): print("Finding the input vector") input_process = docker.run(docker_args, _out=sys.stdout, _err=sys.stderr) print(input_process.cmd) with open( os.path.join(configuration_dir, package + "/" + binary_name + ".json")) as binary_name_fp: config_dict = json.load(binary_name_fp)[0] seeds = config_dict["file_type"] parameter = config_dict["parameter"] binary_path = config_dict["binary_path"] if not os.path.exists( os.path.join(configuration_dir, package + "/" + binary_name)): os.mkdir(os.path.join(configuration_dir, package + "/" + binary_name)) if minimize: docker_args = [ "--rm", "--cap-add=SYS_PTRACE", "-v", configuration_dir + ":/results", "--entrypoint", "python", image_name, "/inputinferer/configfinder/controller.py", "minimize", "-p", package, "-v", "/results", "-s", seeds, "--parameter=" + parameter, "-b", binary_path, "-afile", binary_name + ".afl_config" ] if qemu: docker_args.append("-Q") with timecommand(_with=True): print("Minimizing") docker.run(docker_args, _out=sys.stdout, _err=sys.stderr) with open( os.path.join(configuration_dir, package + "/" + binary_name + ".afl_config")) as afl_config_fp: seeds = json.load(afl_config_fp)["min_seeds_dir"] docker_args = [ "--rm", "--cap-add=SYS_PTRACE", "-v", configuration_dir + ":/results", "--entrypoint", "python", image_name, "/inputinferer/configfinder/controller.py", "evalfuzz" ] if fuzz_duration: docker_args += ["-ft", fuzz_duration] if timeout: docker_args += ["-t", timeout] docker_args += [ "-p", package, "-v", "/results", "-s", seeds, "--parameter=" + parameter, "-b", binary_path, "-afile", binary_name + ".afl_config" ] if qemu: docker_args.append("-Q") with timecommand(_with=True): print("Fuzzing") docker.run(docker_args, _out=sys.stdout, _err=sys.stderr) print("Done") end = time.time() print("Time elapsed: ", str(end - start))
def main(dataset): tmpname = "kolab/kolabtestcontainer:tmppopulated" imagename = "kolab/kolabtestcontainer:populated-" + dataset basedir = "{c.SCRIPT_DIR}/kolabpopulated".format(c=config) print("Building tmpcontainer...") docker.build("-t", tmpname, "{basedir}/.".format(basedir=basedir)) print("Starting tmpcontainer...") container = docker.run( "-d", "-h", settings.HOSTNAME, "-v", "/sys/fs/cgroup:/sys/fs/cgroup:ro", "-v", "{basedir}/{dataset}/:/data/".format(basedir=basedir, dataset=dataset), '-v', "{c.SCRIPT_DIR}/kolab/populate/:/populate".format(c=config), tmpname).rstrip() try: # Wait for imap to become available on imaps://localhost:993 time.sleep(5) print "Populate OU..." docker("exec", container, "python2", "/populate/populate_ou.py", _out=sys.stdout) print "Populate users..." docker("exec", container, "python2", "/populate/populate_users.py", _out=sys.stdout) print "Populate resources..." docker("exec", container, "python2", "/populate/populate_resources.py", _out=sys.stdout) print("Running populate.sh...") docker("exec", container, "/data/populate.sh", _out=sys.stdout) # Give kolabd some time to create all mailboxes time.sleep(5) docker("exec", container, "patch", "-R", "/etc/kolab/kolab.conf", "/data/kolab.conf.diff", _out=sys.stdout) docker("exec", container, "patch", "-R", "/etc/roundcubemail/calendar.inc.php", "/data/calendar.inc.php.diff", _out=sys.stdout) docker("exec", container, "patch", "-R", "/etc/roundcubemail/config.inc.php", "/data/config.inc.php.diff", _out=sys.stdout) docker("exec", container, "patch", "-R", "/etc/roundcubemail/kolab_addressbook.inc.php", "/data/kolab_addressbook.inc.php.diff", _out=sys.stdout) docker("exec", container, "patch", "-R", "/etc/roundcubemail/kolab_auth.inc.php", "/data/kolab_auth.inc.php.diff", _out=sys.stdout) docker("exec", container, "patch", "-R", "/etc/roundcubemail/password.inc.php", "/data/password.inc.php.diff", _out=sys.stdout) print("Comitting results to: {}".format(imagename)) docker.commit(container, imagename) except: print("Unexpected error:", sys.exc_info()[0]) traceback.print_exc(file=sys.stdout) print("Failed to setup container") docker.stop(container) docker.rm(container)
def run_minimizer(self, docker_name, package: str, docker_args: [str], fuzzer_image: str, build_file: str, fuzzer_command_args: [str], timeout_per_package: float) -> (str, bool): minimizer_command = None from celery.platforms import signals def int_handler(signum, frame): print("Int handler!") if minimizer_command is not None: try: docker_command.stop( docker_name, _timeout=120 ) # It should not take longer than 120 seconds to kill a docker container, right???? except sh.ErrorReturnCode: return package, True except sh.TimeoutException: # It took too long too kill the docker container - we are going to ignore that for now, we want to continue fuzzing return package, True return package, True else: return package, True signals['INT'] = int_handler try: if os.path.exists(build_file): with open(build_file, "r") as jsonfp: build_dict = json.load(jsonfp) package_image_name = build_dict["docker_image_name"] else: package_image_name = package + "_" + str(uuid.uuid4())[:8] package_image_name = helpers.docker_builder.return_current_package_image( package=package, fuzzer_image=fuzzer_image, package_image=package_image_name, json_output_path=build_file) docker_args.insert(0, '--cpus=1.0') print("Invoking the minimizing docker") minimizer_command = docker_command.run( docker_args, package_image_name, fuzzer_command_args, _out=sys.stdout, _bg=True ) # No timeout here, the timeouts are build into the minimizer minimizer_command.wait() if minimizer_command.exit_code != 0: print("Some went wrong for package {0}", package) return package, False print("Done! Returning True") return package, True except sh.ErrorReturnCode as e: print("Minimizer error:") print("STDOUT:\n", e.stdout.decode("utf-8")) print("STDERR:\n", e.stderr.decode("utf-8")) print("command line: {0}".format(e.full_cmd)) return package, False except sh.TimeoutException as e: print("Minimizing {0} timed out... Next one!".format(package)) return package, True except sh.SignalException_SIGKILL as e: print("Killed") return package, True return package, True
def run_fuzzer(self, docker_name, package: str, docker_args: [str], base_image: str, build_file: str, fuzzer_command_args: [str], timeout_per_package: float) -> (str, bool): fuzzer_command = None from celery.platforms import signals def int_handler(signum, frame): print("Int handler!") if fuzzer_command is not None: try: docker_command.stop( docker_name, _timeout=120 ) # It should not take longer than 120 seconds to kill a docker container, right???? except sh.ErrorReturnCode: return package, True except sh.TimeoutException: # It took too long too kill the docker container - we are going to ignore that for now, we want to continue fuzzing return package, True return package, True else: return package, True signals['INT'] = int_handler try: if os.path.exists(build_file): with open(build_file, "r") as jsonfp: build_dict = json.load(jsonfp) package_image_name = build_dict["docker_image_name"] else: package_image_name = package + "_" + str(uuid.uuid4())[:8] # TODO: Limit build process to one cpu package_image_name = helpers.docker_builder.return_current_package_image( package=package, fuzzer_image=base_image, package_image=package_image_name, json_output_path=build_file) if package_image_name is None: return False # docker_args.insert(0,'--cpus=0.90') print("Invoking the fuzzing docker") # TODO: This throws an exception in the background thread right now, which seems to be a bug in sh: # https://github.com/amoffat/sh/issues/399. For now, we are ignoring the issue. fuzzer_command = docker_command.run(docker_args, package_image_name, fuzzer_command_args, _out=sys.stdout, _bg=True, _timeout=timeout_per_package) fuzzer_command.wait() if fuzzer_command.exit_code != 0: print("Something went wrong for package {0}", package) return package, False print("Done! Returning True") return package, True except sh.ErrorReturnCode as e: print("afl-fuzz error:") print("STDOUT:\n", e.stdout.decode("utf-8")) print("STDERR:\n", e.stderr.decode("utf-8")) print("command line: {0}".format(e.full_cmd)) return package, False except sh.TimeoutException as e: print("Fuzzing {0} timed out... Next one!".format(package)) try: docker_command.stop(docker_name) except sh.ErrorReturnCode as e: # Container is already removed pass return package, True except sh.SignalException_SIGKILL as e: print("Killed") return package, True return package, True