Пример #1
0
    def output_after_run_set(self,
                             runSet,
                             cputime=None,
                             walltime=None,
                             energy={}):
        """
        The method output_after_run_set() stores the times of a run set in XML.
        @params cputime, walltime: accumulated times of the run set
        """

        self.add_values_to_run_set_xml(runSet, cputime, walltime, energy)

        # write results to files
        runSet.xml_file.replace(self._result_xml_to_string(runSet.xml))

        if len(runSet.blocks) > 1:
            for block in runSet.blocks:
                blockFileName = self.get_filename(runSet.name,
                                                  block.name + ".xml")
                util.write_file(
                    self._result_xml_to_string(
                        self.runs_to_xml(runSet, block.runs, block.name)),
                    blockFileName)
                self.all_created_files.append(blockFileName)

        self.txt_file.append(
            self.run_set_to_text(runSet, True, cputime, walltime, energy))
Пример #2
0
    def kill_all_tasks(self):
        """
        Kill all tasks in this cgroup and all its children cgroups forcefully.
        Additionally, the children cgroups will be deleted.
        """
        def kill_all_tasks_in_cgroup_recursively(cgroup, delete):
            for dirpath, dirs, _files in os.walk(cgroup, topdown=False):
                for subCgroup in dirs:
                    subCgroup = os.path.join(dirpath, subCgroup)
                    kill_all_tasks_in_cgroup(subCgroup, ensure_empty=delete)

                    if delete:
                        remove_cgroup(subCgroup)

            kill_all_tasks_in_cgroup(cgroup, ensure_empty=delete)

        # First, we go through all cgroups recursively while they are frozen and kill
        # all processes. This helps against fork bombs and prevents processes from
        # creating new subgroups while we are trying to kill everything.
        # But this is only possible if we have freezer, and all processes will stay
        # until they are thawed (so we cannot check for cgroup emptiness and we cannot
        # delete subgroups).
        if FREEZER in self.per_subsystem:
            cgroup = self.per_subsystem[FREEZER]
            freezer_file = os.path.join(cgroup, "freezer.state")

            util.write_file("FROZEN", freezer_file)
            kill_all_tasks_in_cgroup_recursively(cgroup, delete=False)
            util.write_file("THAWED", freezer_file)

        # Second, we go through all cgroups again, kill what is left,
        # check for emptiness, and remove subgroups.
        # Furthermore, we do this for all hierarchies, not only the one with freezer.
        for cgroup in self.paths:
            kill_all_tasks_in_cgroup_recursively(cgroup, delete=True)
Пример #3
0
    def __init__(self, cgroups, pid_to_kill, callbackFn=lambda reason: None):
        super(KillProcessOnOomThread, self).__init__()
        self.name = "KillProcessOnOomThread-" + self.name
        self._finished = threading.Event()
        self._pid_to_kill = pid_to_kill
        self._cgroups = cgroups
        self._callback = callbackFn

        cgroup = cgroups[MEMORY]  # for raw access
        ofd = os.open(os.path.join(cgroup, "memory.oom_control"), os.O_WRONLY)
        try:
            # Important to use CLOEXEC, otherwise the benchmarked tool inherits
            # the file descriptor.
            self._efd = _libc.eventfd(0, _EFD_CLOEXEC)

            try:
                util.write_file(f"{self._efd} {ofd}", cgroup, "cgroup.event_control")

                # If everything worked, disable Kernel-side process killing.
                # This is not allowed if memory.use_hierarchy is enabled,
                # but we don't care.
                try:
                    os.write(ofd, b"1")
                except OSError as e:
                    logging.debug(
                        "Failed to disable kernel-side OOM killer: error %s (%s)",
                        e.errno,
                        e.strerror,
                    )
            except OSError as e:
                os.close(self._efd)
                raise e
        finally:
            os.close(ofd)
Пример #4
0
def execute_benchmark(benchmark, output_handler):
    if not _justReprocessResults:
        # build input for cloud
        (cloudInput, numberOfRuns) = getCloudInput(benchmark)
        cloudInputFile = os.path.join(benchmark.log_folder, 'cloudInput.txt')
        util.write_file(cloudInput, cloudInputFile)
        output_handler.all_created_files.append(cloudInputFile)

        # install cloud and dependencies
        ant = subprocess.Popen(["ant", "resolve-benchmark-dependencies"],
                               cwd=_ROOT_DIR,
                               shell=util.is_windows())
        ant.communicate()
        ant.wait()

        # start cloud and wait for exit
        logging.debug("Starting cloud.")
        if benchmark.config.debug:
            logLevel = "FINER"
        else:
            logLevel = "INFO"
        heapSize = benchmark.config.cloudClientHeap + numberOfRuns // 10  # 100 MB and 100 kB per run
        lib = os.path.join(_ROOT_DIR, "lib", "java-benchmark", "vcloud.jar")
        cmdLine = [
            "java", "-Xmx" + str(heapSize) + "m", "-jar", lib, "benchmark",
            "--loglevel", logLevel
        ]
        if benchmark.config.cloudMaster:
            cmdLine.extend(["--master", benchmark.config.cloudMaster])
        if benchmark.config.debug:
            cmdLine.extend(["--print-new-files", "true"])

        walltime_before = time.time()

        cloud = subprocess.Popen(cmdLine,
                                 stdin=subprocess.PIPE,
                                 shell=util.is_windows())
        try:
            (out, err) = cloud.communicate(cloudInput.encode('utf-8'))
        except KeyboardInterrupt:
            stop()
        returnCode = cloud.wait()

        walltime_after = time.time()
        usedWallTime = walltime_after - walltime_before

        if returnCode:
            if STOPPED_BY_INTERRUPT:
                output_handler.set_error('interrupted')
            else:
                errorMsg = "Cloud return code: {0}".format(returnCode)
                logging.warning(errorMsg)
                output_handler.set_error(errorMsg)
    else:
        returnCode = 0
        usedWallTime = None

    handleCloudResults(benchmark, output_handler, usedWallTime)

    return returnCode
Пример #5
0
    def generate_tables_and_compare_csv(self, args, table_prefix, result_prefix=None,
                                        diff_prefix=None, result_diff_prefix=None,
                                        expected_counts=None):
        output, csv_file, csv_diff_file = \
            self.generate_tables_and_check_produced_files(args, table_prefix, diff_prefix)

        generated = util.read_file(csv_file)
        expected_file = [here, 'expected', (result_prefix or table_prefix) + '.csv']
        if OVERWRITE_MODE:
            util.write_file(generated, *expected_file)
        else:
            self.assertMultiLineEqual(generated, util.read_file(*expected_file))

        if diff_prefix:
            generated_diff = util.read_file(csv_diff_file)
            expected_diff_file = [here, 'expected', (result_diff_prefix or diff_prefix) + '.csv']
            if OVERWRITE_MODE:
                util.write_file(generated_diff, *expected_diff_file)
            else:
                self.assertMultiLineEqual(generated_diff, util.read_file(*expected_diff_file))

        if expected_counts:
            # output of table-generator should end with statistics about regressions
            counts = output[output.find('REGRESSIONS'):].strip()
            self.assertMultiLineEqual(expected_counts, counts)
        else:
            self.assertNotIn('REGRESSIONS', output)
            self.assertNotIn('STATS', output)
Пример #6
0
    def append(self, newContent, keep=True):
        """
        Add content to the represented file.
        If keep is False, the new content will be forgotten during the next call
        to this method.
        """
        content = self.__content + newContent
        if keep:
            self.__content = content

        if self.__needsRewrite:
            """
            Replace the content of the file.
            A temporary file is used to avoid loss of data through an interrupt.
            """
            tmpFilename = self.__filename + ".tmp"

            util.write_file(content, tmpFilename)

            os.rename(tmpFilename, self.__filename)
        else:
            with open(self.__filename, "a") as file:
                file.write(newContent)

        self.__needsRewrite = not keep
Пример #7
0
    def append(self, newContent, keep=True):
        """
        Add content to the represented file.
        If keep is False, the new content will be forgotten during the next call
        to this method.
        """
        content = self.__content + newContent
        if keep:
            self.__content = content

        if self.__needsRewrite:
            """
            Replace the content of the file.
            A temporary file is used to avoid loss of data through an interrupt.
            """
            tmpFilename = self.filename + ".tmp"

            util.write_file(content, tmpFilename)

            os.rename(tmpFilename, self.filename)
        else:
            with open(self.filename, "a") as file:
                file.write(newContent)

        self.__needsRewrite = not keep
Пример #8
0
 def test_nonwritable_file(self):
     temp_file = os.path.join(self.base_dir, "tempfile")
     util.write_file("", temp_file)
     os.chmod(temp_file, 0)
     util.rmtree(self.base_dir)
     self.assertFalse(os.path.exists(self.base_dir),
                      "Failed to remove directory with non-writable file")
Пример #9
0
 def create_and_delete_directory(self, mode):
     tempdir = os.path.join(self.base_dir, "tempdir")
     os.mkdir(tempdir)
     util.write_file("", tempdir, "tempfile")
     os.chmod(tempdir, mode)
     util.rmtree(self.base_dir)
     self.assertFalse(os.path.exists(self.base_dir), "Failed to remove directory")
Пример #10
0
    def __init__(self, cgroups, kill_process_fn, process, callbackFn=lambda reason: None):
        super(KillProcessOnOomThread, self).__init__()
        self.daemon = True
        self._finished = threading.Event()
        self._process = process
        self._cgroups = cgroups
        self._callback = callbackFn
        self._kill_process = kill_process_fn

        cgroup = cgroups[MEMORY] #for raw access
        ofd = os.open(os.path.join(cgroup, 'memory.oom_control'), os.O_WRONLY)
        try:
            # Important to use CLOEXEC, otherwise the benchmarked tool inherits
            # the file descriptor.
            self._efd = _libc.eventfd(0, _EFD_CLOEXEC)

            try:
                util.write_file('{} {}'.format(self._efd, ofd),
                                     cgroup, 'cgroup.event_control')

                # If everything worked, disable Kernel-side process killing.
                # This is not allowed if memory.use_hierarchy is enabled,
                # but we don't care.
                try:
                    os.write(ofd, '1'.encode('ascii'))
                except OSError as e:
                    logging.debug("Failed to disable kernel-side OOM killer: error %s (%s)",
                                  e.errno, e.strerror)
            except EnvironmentError as e:
                os.close(self._efd)
                raise e
        finally:
            os.close(ofd)
Пример #11
0
 def create_and_delete_directory(self, mode):
     tempdir = os.path.join(self.base_dir, "tempdir")
     os.mkdir(tempdir)
     util.write_file("", tempdir, "tempfile")
     os.chmod(tempdir, mode)
     util.rmtree(self.base_dir)
     self.assertFalse(os.path.exists(self.base_dir), "Failed to remove directory")
Пример #12
0
 def _prepareInputfile(self, inputfile):
     content = open(inputfile, "r").read()
     content = content.replace("ERROR;", "ERROR_LOCATION;").replace(
         "ERROR:", "ERROR_LOCATION:").replace(
             "errorFn();", "goto ERROR_LOCATION; ERROR_LOCATION:;")
     newFilename = inputfile + "_acsar.c"
     util.write_file(newFilename, content)
     return newFilename
Пример #13
0
 def set_value(self, subsystem, option, value):
     """
     Write the given value for the given subsystem.
     Do not include the subsystem name in the option name.
     Only call this method if the given subsystem is available.
     """
     assert subsystem in self
     util.write_file(str(value), self.per_subsystem[subsystem], subsystem + '.' + option)
Пример #14
0
 def _prepareInputfile(self, inputfile):
     content = open(inputfile, "r").read()
     content = content.replace(
         "ERROR;", "ERROR_LOCATION;").replace(
         "ERROR:", "ERROR_LOCATION:").replace(
         "errorFn();", "goto ERROR_LOCATION; ERROR_LOCATION:;")
     newFilename = inputfile + "_acsar.c"
     util.write_file(newFilename, content)
     return newFilename
Пример #15
0
def execute_benchmark(benchmark, output_handler):
    if not _justReprocessResults:
        # build input for cloud
        (cloudInput, numberOfRuns) = getCloudInput(benchmark)
        cloudInputFile = os.path.join(benchmark.log_folder, 'cloudInput.txt')
        util.write_file(cloudInput, cloudInputFile)
        output_handler.all_created_files.append(cloudInputFile)

        # install cloud and dependencies
        ant = subprocess.Popen(["ant", "resolve-benchmark-dependencies"],
                               cwd=_ROOT_DIR,
                               shell=util.is_windows())
        ant.communicate()
        ant.wait()

        # start cloud and wait for exit
        logging.debug("Starting cloud.")
        if benchmark.config.debug:
            logLevel =  "FINER"
        else:
            logLevel = "INFO"
        heapSize = benchmark.config.cloudClientHeap + numberOfRuns//10 # 100 MB and 100 kB per run
        lib = os.path.join(_ROOT_DIR, "lib", "java-benchmark", "vcloud.jar")
        cmdLine = ["java", "-Xmx"+str(heapSize)+"m", "-jar", lib, "benchmark", "--loglevel", logLevel]
        if benchmark.config.cloudMaster:
            cmdLine.extend(["--master", benchmark.config.cloudMaster])
        if benchmark.config.debug:
            cmdLine.extend(["--print-new-files", "true"])

        walltime_before = time.time()

        cloud = subprocess.Popen(cmdLine, stdin=subprocess.PIPE, shell=util.is_windows())
        try:
            (out, err) = cloud.communicate(cloudInput.encode('utf-8'))
        except KeyboardInterrupt:
            stop()
        returnCode = cloud.wait()

        walltime_after = time.time()
        usedWallTime = walltime_after - walltime_before

        if returnCode:
            if STOPPED_BY_INTERRUPT:
                output_handler.set_error('interrupted')
            else:
                errorMsg = "Cloud return code: {0}".format(returnCode)
                logging.warning(errorMsg)
                output_handler.set_error(errorMsg)
    else:
        returnCode = 0
        usedWallTime = None

    handleCloudResults(benchmark, output_handler, usedWallTime)

    return returnCode
Пример #16
0
def setup_container_system_config(basedir):
    """Create a minimal system configuration for use in a container.
    @param basedir: The root directory of the container as bytes.
    """
    etc = os.path.join(basedir, b"etc")
    if not os.path.exists(etc):
        os.mkdir(etc)

    for file, content in CONTAINER_ETC_FILE_OVERRIDE.items():
        util.write_file(content, etc, file)

    os.symlink(b"/proc/self/mounts", os.path.join(etc, b"mtab"))
Пример #17
0
    def __init__(self, filename, content):
        """
        The constructor of FileWriter creates the file.
        If the file exist, it will be OVERWRITTEN without a message!
        """

        self.__filename = filename
        self.__needsRewrite = False
        self.__content = content

        # Open file with "w" at least once so it will be overwritten.
        util.write_file(content, self.__filename)
Пример #18
0
    def __init__(self, filename, content):
        """
        The constructor of FileWriter creates the file.
        If the file exist, it will be OVERWRITTEN without a message!
        """

        self.filename = filename
        self.__needsRewrite = False
        self.__content = content

        # Open file with "w" at least once so it will be overwritten.
        util.write_file(content, self.filename)
Пример #19
0
def setup_container_system_config(basedir):
    """Create a minimal system configuration for use in a container.
    @param basedir: The root directory of the container as bytes.
    """
    etc = os.path.join(basedir, b"etc")
    if not os.path.exists(etc):
        os.mkdir(etc)

    for file, content in CONTAINER_ETC_FILE_OVERRIDE.items():
        util.write_file(content, etc, file)

    os.symlink(b"/proc/self/mounts", os.path.join(etc, b"mtab"))
Пример #20
0
def setup_container_system_config(basedir, mountdir=None):
    """Create a minimal system configuration for use in a container.
    @param basedir: The directory where the configuration files should be placed as bytes.
    @param mountdir: If present, bind mounts to the configuration files will be added below
        this path (given as bytes).
    """
    etc = os.path.join(basedir, b"etc")
    if not os.path.exists(etc):
        os.mkdir(etc)

    for file, content in CONTAINER_ETC_FILE_OVERRIDE.items():
        # Create "basedir/etc/file"
        util.write_file(content, etc, file)
        if mountdir:
            # Create bind mount to "mountdir/etc/file"
            make_bind_mount(
                os.path.join(etc, file), os.path.join(mountdir, b"etc", file), private=True)

    os.symlink(b"/proc/self/mounts", os.path.join(etc, b"mtab"))
Пример #21
0
def setup_container_system_config(basedir, mountdir, dir_modes):
    """Create a minimal system configuration for use in a container.
    @param basedir: The directory where the configuration files should be placed (bytes)
    @param mountdir: The base directory of the mount hierarchy in the container (bytes).
    @param dir_modes: All directory modes in the container.
    """
    # If overlayfs is not used for /etc, we need additional bind mounts
    # for files in /etc that we want to override, like /etc/passwd
    symlinks_required = determine_directory_mode(dir_modes,
                                                 b"/etc") != DIR_OVERLAY

    etc = os.path.join(basedir, b"etc")
    if not os.path.exists(etc):
        os.mkdir(etc)

    for file, content in CONTAINER_ETC_FILE_OVERRIDE.items():
        # Create "basedir/etc/file"
        util.write_file(content, etc, file)
        if symlinks_required:
            # Create bind mount to "mountdir/etc/file"
            make_bind_mount(
                os.path.join(etc, file),
                os.path.join(mountdir, b"etc", file),
                private=True,
            )

    os.symlink(b"/proc/self/mounts", os.path.join(etc, b"mtab"))
    # Bind bounds for symlinks are not possible, so do nothing for "mountdir/etc/mtab".
    # This is not a problem because most systems have the correct symlink anyway.

    if not os.path.isdir(mountdir.decode() + CONTAINER_HOME):
        logging.warning(
            "Home directory in container should be %(h)s but this directory "
            "cannot be created due to directory mode of parent directory. "
            "It is recommended to use '--overlay-dir %(p)s' or '--hidden-dir %(p)s' "
            "and overwrite directory modes for subdirectories where necessary.",
            {
                "h": CONTAINER_HOME,
                "p": os.path.dirname(CONTAINER_HOME)
            },
        )
Пример #22
0
    def output_after_run_set(self, runSet, cputime=None, walltime=None, energy={}):
        """
        The method output_after_run_set() stores the times of a run set in XML.
        @params cputime, walltime: accumulated times of the run set
        """

        self.add_values_to_run_set_xml(runSet, cputime, walltime, energy)

        # write results to files
        runSet.xml_file.replace(self._result_xml_to_string(runSet.xml))

        if len(runSet.blocks) > 1:
            for block in runSet.blocks:
                blockFileName = self.get_filename(runSet.name, block.name + ".xml")
                util.write_file(
                    self._result_xml_to_string(self.runs_to_xml(runSet, block.runs, block.name)),
                    blockFileName
                )
                self.all_created_files.append(blockFileName)

        self.txt_file.append(self.run_set_to_text(runSet, True, cputime, walltime, energy))
Пример #23
0
def setup_user_mapping(pid, uid=os.getuid(), gid=os.getgid()):
    """Write uid_map and gid_map in /proc to create a user mapping
    that maps our user from outside the container to the same user inside the container
    (and no other users are mapped).
    @see: http://man7.org/linux/man-pages/man7/user_namespaces.7.html
    @param pid: The PID of the process in the container.
    """
    proc_child = os.path.join("/proc", str(pid))
    try:
        uid_map = "{0} {1} 1".format(uid, os.getuid()) # map uid internally to our uid externally
        util.write_file(uid_map, proc_child, "uid_map")
    except IOError as e:
        logging.warning("Creating UID mapping into container failed: %s", e)

    try:
        util.write_file("deny", proc_child, "setgroups")
    except IOError as e:
        # Not all systems have this file (depends on the kernel version),
        # but if it does not exist, we do not need to write to it.
        if e.errno != errno.ENOENT:
            logging.warning("Could not write to setgroups file in /proc: %s", e)

    try:
        gid_map = "{0} {1} 1".format(gid, os.getgid()) # map gid internally to our gid externally
        util.write_file(gid_map, proc_child, "gid_map")
    except IOError as e:
        logging.warning("Creating GID mapping into container failed: %s", e)
Пример #24
0
def setup_user_mapping(pid, uid=os.getuid(), gid=os.getgid()):
    """Write uid_map and gid_map in /proc to create a user mapping
    that maps our user from outside the container to the same user inside the container
    (and no other users are mapped).
    @see: http://man7.org/linux/man-pages/man7/user_namespaces.7.html
    @param pid: The PID of the process in the container.
    """
    proc_child = os.path.join("/proc", str(pid))
    try:
        uid_map = "{0} {1} 1".format(uid, os.getuid()) # map uid internally to our uid externally
        util.write_file(uid_map, proc_child, "uid_map")
    except IOError as e:
        logging.warning("Creating UID mapping into container failed: %s", e)

    try:
        util.write_file("deny", proc_child, "setgroups")
    except IOError as e:
        # Not all systems have this file (depends on the kernel version),
        # but if it does not exist, we do not need to write to it.
        if e.errno != errno.ENOENT:
            logging.warning("Could not write to setgroups file in /proc: %s", e)

    try:
        gid_map = "{0} {1} 1".format(gid, os.getgid()) # map gid internally to our gid externally
        util.write_file(gid_map, proc_child, "gid_map")
    except IOError as e:
        logging.warning("Creating GID mapping into container failed: %s", e)
Пример #25
0
    def __init__(self,
                 cgroups,
                 kill_process_fn,
                 process,
                 callbackFn=lambda reason: None):
        super(KillProcessOnOomThread, self).__init__()
        self.daemon = True
        self._finished = threading.Event()
        self._process = process
        self._cgroups = cgroups
        self._callback = callbackFn
        self._kill_process = kill_process_fn

        cgroup = cgroups[MEMORY]  #for raw access
        ofd = os.open(os.path.join(cgroup, 'memory.oom_control'), os.O_WRONLY)
        try:
            # Important to use CLOEXEC, otherwise the benchmarked tool inherits
            # the file descriptor.
            self._efd = _libc.eventfd(0, _EFD_CLOEXEC)

            try:
                util.write_file('{} {}'.format(self._efd, ofd), cgroup,
                                'cgroup.event_control')

                # If everything worked, disable Kernel-side process killing.
                # This is not allowed if memory.use_hierarchy is enabled,
                # but we don't care.
                try:
                    os.write(ofd, '1'.encode('ascii'))
                except OSError as e:
                    logging.debug(
                        "Failed to disable kernel-side OOM killer: error %s (%s)",
                        e.errno, e.strerror)
            except EnvironmentError as e:
                os.close(self._efd)
                raise e
        finally:
            os.close(ofd)
Пример #26
0
 def _prepareInputfile(self, inputfile):
     content = open(inputfile, "r").read()
     content = content.replace("goto ERROR;", "assert(0);")
     newFilename = "tmp_benchmark_feaver.c"
     util.write_file(newFilename, content)
     return newFilename
Пример #27
0
 def _prepareInputfile(self, inputfile):
     content = open(inputfile, "r").read()
     content = content.replace("goto ERROR;", "assert(0);")
     newFilename = "tmp_benchmark_feaver.c"
     util.write_file(newFilename, content)
     return newFilename
Пример #28
0
 def assert_file_content_equals(self, content, file):
     if OVERWRITE_MODE:
         util.write_file(content, *file)
     else:
         self.assertMultiLineEqual(content, util.read_file(*file))
Пример #29
0
 def test_writable_file(self):
     util.write_file("", self.base_dir, "tempfile")
     util.rmtree(self.base_dir)
     self.assertFalse(os.path.exists(self.base_dir),
                      "Failed to remove directory with file")
Пример #30
0
 def try_write_to_freezer(content):
     try:
         util.write_file(content, freezer_file)
     except IOError:
         pass # expected if freezer not enabled, we try killing without it
Пример #31
0
    def saveResult(self, run, task):
        taskKey = task['key']
        log_file = run['logFile']
        headers = {'Accept':'text/plain'}

        fileNames = []
        for file in task['files']:
            fileNames.append(file['name'])

        try:
            util.write_file(json.dumps(task), log_file+'.stdOut')
        except:
            logging.debug('Could not save task '+taskKey)

        statisticsProcessed = False
        if APPENGINE_SETTINGS['statisticsFileName'] in fileNames:
            try:
                uri = self.benchmark.config.appengineURI+'/tasks/'+taskKey+'/files/' + APPENGINE_SETTINGS['statisticsFileName']
                request = urllib2.Request(uri, headers=headers)
                response = urllib2.urlopen(request).read().decode()
                util.write_file(response, log_file)
                statisticsProcessed = True
            except:
                statisticsProcessed = False
                logging.exception('Could not save statistics of'+taskKey)
        else:
            statisticsProcessed = True


        if APPENGINE_SETTINGS['errorFileName'] in fileNames:
            try:
                uri = self.benchmark.config.appengineURI+'/tasks/'+taskKey+'/files/' + APPENGINE_SETTINGS['errorFileName']
                request = urllib2.Request(uri, headers=headers)
                response = urllib2.urlopen(request).read().decode()
                response = 'Task Key: {}\n{}'.format(task['key'], response)
                util.write_file(response, log_file+'.stdErr')
            except:
                logging.exception('Error while retrieving result file for '+taskKey)

        headers = {'Content-type':'application/json', 'Accept':'application/json'}
        markedAsProcessed = False
        if statisticsProcessed:
            try:
                uri = self.benchmark.config.appengineURI+'/tasksets/'+self.tasksetKey+'/tasks'
                request = urllib2.Request(uri, json.dumps([taskKey]).encode(), headers=headers)
                request.get_method = lambda: 'PUT'
                urllib2.urlopen(request)
                self.finishedTasks += 1
                markedAsProcessed = True
                logging.info('Stored result of task {0} in file {1}'.format(taskKey, log_file))
                try:
                    with open(self.benchmark.output_base_name+'.Processed_Tasks.txt', 'a') as f:
                        f.write(taskKey+'\n')
                except: pass
                logging.debug('Task {} finished. Status: {}'.format(taskKey, task['status']))
            except:
                logging.debug('The task {} could not be marked as processed.'.format(taskKey))

        if self.benchmark.config.appengineDeleteWhenDone and markedAsProcessed:
            try:
                uri = self.benchmark.config.appengineURI+'/tasks/'+taskKey
                request = urllib2.Request(uri, headers=headers)
                request.get_method = lambda: 'DELETE'
                urllib2.urlopen(request).read()
            except:
                logging.exception('The task {} could not be deleted.'.format(taskKey))
Пример #32
0
 def assert_file_content_equals(self, content, file):
     if OVERWRITE_MODE:
         util.write_file(content, *file)
     else:
         self.assertMultiLineEqual(content, util.read_file(*file))
Пример #33
0
def execute_benchmark(benchmark, output_handler):
    if not _justReprocessResults:
        # build input for cloud
        (cloudInput, numberOfRuns) = getCloudInput(benchmark)
        if benchmark.config.debug:
            cloudInputFile = os.path.join(benchmark.log_folder, 'cloudInput.txt')
            util.write_file(cloudInput, cloudInputFile)
            output_handler.all_created_files.add(cloudInputFile)
        meta_information = json.dumps({"tool": {"name": benchmark.tool_name,\
                                                "revision": benchmark.tool_version, \
                                                "benchexec-module" : benchmark.tool_module}, \
                                       "benchmark" : benchmark.name,
                                       "timestamp" : benchmark.instance,
                                       "generator": "benchmark.vcloud.py"})

        # install cloud and dependencies
        ant = subprocess.Popen(["ant", "resolve-benchmark-dependencies"],
                               cwd=_ROOT_DIR,
                               shell=util.is_windows())
        ant.communicate()
        ant.wait()

        # start cloud and wait for exit
        logging.debug("Starting cloud.")
        if benchmark.config.debug:
            logLevel =  "FINER"
        else:
            logLevel = "INFO"
        heapSize = benchmark.config.cloudClientHeap + numberOfRuns//10 # 100 MB and 100 kB per run
        lib = os.path.join(_ROOT_DIR, "lib", "java-benchmark", "vcloud.jar")
        cmdLine = ["java", "-Xmx"+str(heapSize)+"m", "-jar", lib, "benchmark", "--loglevel", logLevel, \
                   "--run-collection-meta-information", meta_information, \
                   "--environment", formatEnvironment(benchmark.environment()), \
                   "--max-log-file-size", str(benchmark.config.maxLogfileSize), \
                   "--debug", str(benchmark.config.debug)]
        if benchmark.config.cloudMaster:
            cmdLine.extend(["--master", benchmark.config.cloudMaster])
        if benchmark.config.debug:
            cmdLine.extend(["--print-new-files", "true"])

        walltime_before = time.time()

        cloud = subprocess.Popen(cmdLine, stdin=subprocess.PIPE, shell=util.is_windows())
        try:
            cloud.communicate(cloudInput.encode('utf-8'))
        except KeyboardInterrupt:
            stop()
        returnCode = cloud.wait()

        walltime_after = time.time()
        usedWallTime = walltime_after - walltime_before

        if returnCode:
            if STOPPED_BY_INTERRUPT:
                output_handler.set_error('interrupted')
            else:
                errorMsg = "Cloud return code: {0}".format(returnCode)
                logging.warning(errorMsg)
                output_handler.set_error(errorMsg)
    else:
        returnCode = 0
        usedWallTime = None

    handleCloudResults(benchmark, output_handler, usedWallTime)

    return returnCode
Пример #34
0
 def try_write_to_freezer(content):
     try:
         util.write_file(content, freezer_file)
     except IOError:
         pass  # expected if freezer not enabled, we try killing without it
Пример #35
0
 def test_nonwritable_file(self):
     temp_file = os.path.join(self.base_dir, "tempfile")
     util.write_file("", temp_file)
     os.chmod(temp_file, 0)
     util.rmtree(self.base_dir)
     self.assertFalse(os.path.exists(self.base_dir), "Failed to remove directory with non-writable file")
Пример #36
0
 def test_writable_file(self):
     util.write_file("", self.base_dir, "tempfile")
     util.rmtree(self.base_dir)
     self.assertFalse(os.path.exists(self.base_dir), "Failed to remove directory with file")
Пример #37
0
    def saveResult(self, run, task):
        taskKey = task['key']
        log_file = run['logFile']
        headers = {'Accept': 'text/plain'}

        fileNames = []
        for file in task['files']:
            fileNames.append(file['name'])

        try:
            util.write_file(json.dumps(task), log_file + '.stdOut')
        except:
            logging.debug('Could not save task ' + taskKey)

        statisticsProcessed = False
        if APPENGINE_SETTINGS['statisticsFileName'] in fileNames:
            try:
                uri = self.benchmark.config.appengineURI + '/tasks/' + taskKey + '/files/' + APPENGINE_SETTINGS[
                    'statisticsFileName']
                request = urllib2.Request(uri, headers=headers)
                response = urllib2.urlopen(request).read().decode()
                util.write_file(response, log_file)
                statisticsProcessed = True
            except:
                statisticsProcessed = False
                logging.exception('Could not save statistics of' + taskKey)
        else:
            statisticsProcessed = True

        if APPENGINE_SETTINGS['errorFileName'] in fileNames:
            try:
                uri = self.benchmark.config.appengineURI + '/tasks/' + taskKey + '/files/' + APPENGINE_SETTINGS[
                    'errorFileName']
                request = urllib2.Request(uri, headers=headers)
                response = urllib2.urlopen(request).read().decode()
                response = 'Task Key: {}\n{}'.format(task['key'], response)
                util.write_file(response, log_file + '.stdErr')
            except:
                logging.exception('Error while retrieving result file for ' +
                                  taskKey)

        headers = {
            'Content-type': 'application/json',
            'Accept': 'application/json'
        }
        markedAsProcessed = False
        if statisticsProcessed:
            try:
                uri = self.benchmark.config.appengineURI + '/tasksets/' + self.tasksetKey + '/tasks'
                request = urllib2.Request(uri,
                                          json.dumps([taskKey]).encode(),
                                          headers=headers)
                request.get_method = lambda: 'PUT'
                urllib2.urlopen(request)
                self.finishedTasks += 1
                markedAsProcessed = True
                logging.info('Stored result of task {0} in file {1}'.format(
                    taskKey, log_file))
                try:
                    with open(
                            self.benchmark.output_base_name +
                            '.Processed_Tasks.txt', 'a') as f:
                        f.write(taskKey + '\n')
                except:
                    pass
                logging.debug('Task {} finished. Status: {}'.format(
                    taskKey, task['status']))
            except:
                logging.debug(
                    'The task {} could not be marked as processed.'.format(
                        taskKey))

        if self.benchmark.config.appengineDeleteWhenDone and markedAsProcessed:
            try:
                uri = self.benchmark.config.appengineURI + '/tasks/' + taskKey
                request = urllib2.Request(uri, headers=headers)
                request.get_method = lambda: 'DELETE'
                urllib2.urlopen(request).read()
            except:
                logging.exception(
                    'The task {} could not be deleted.'.format(taskKey))
Пример #38
0
def execute_benchmark(benchmark, output_handler):
    if not _justReprocessResults:
        # build input for cloud
        (cloudInput, numberOfRuns) = getCloudInput(benchmark)
        if benchmark.config.debug:
            cloudInputFile = os.path.join(benchmark.log_folder, 'cloudInput.txt')
            util.write_file(cloudInput, cloudInputFile)
            output_handler.all_created_files.add(cloudInputFile)
        meta_information = json.dumps({"tool": {"name": benchmark.tool_name,\
                                                "revision": benchmark.tool_version, \
                                                "benchexec-module" : benchmark.tool_module}, \
                                       "benchmark" : benchmark.name,
                                       "timestamp" : benchmark.instance,
                                       "generator": "benchmark.vcloud.py"})

        # install cloud and dependencies
        ant = subprocess.Popen(["ant", "resolve-benchmark-dependencies"],
                               cwd=_ROOT_DIR,
                               shell=util.is_windows())
        ant.communicate()
        ant.wait()

        # start cloud and wait for exit
        logging.debug("Starting cloud.")
        if benchmark.config.debug:
            logLevel =  "FINER"
        else:
            logLevel = "INFO"
        heapSize = benchmark.config.cloudClientHeap + numberOfRuns//10 # 100 MB and 100 kB per run
        lib = os.path.join(_ROOT_DIR, "lib", "java-benchmark", "vcloud.jar")
        cmdLine = ["java", "-Xmx"+str(heapSize)+"m", "-jar", lib, "benchmark", "--loglevel", logLevel, \
                   "--run-collection-meta-information", meta_information, \
                   "--environment", formatEnvironment(benchmark.environment()), \
                   "--max-log-file-size", str(benchmark.config.maxLogfileSize), \
                   "--debug", str(benchmark.config.debug)]
        if benchmark.config.cloudMaster:
            cmdLine.extend(["--master", benchmark.config.cloudMaster])
        if benchmark.config.debug:
            cmdLine.extend(["--print-new-files", "true"])

        walltime_before = time.time()

        cloud = subprocess.Popen(cmdLine, stdin=subprocess.PIPE, shell=util.is_windows())
        try:
            cloud.communicate(cloudInput.encode('utf-8'))
        except KeyboardInterrupt:
            stop()
        returnCode = cloud.wait()

        walltime_after = time.time()
        usedWallTime = walltime_after - walltime_before

        if returnCode:
            if STOPPED_BY_INTERRUPT:
                output_handler.set_error('interrupted')
            else:
                errorMsg = "Cloud return code: {0}".format(returnCode)
                logging.warning(errorMsg)
                output_handler.set_error(errorMsg)
    else:
        returnCode = 0
        usedWallTime = None

    handleCloudResults(benchmark, output_handler, usedWallTime)

    return returnCode