示例#1
0
def run():
    is_timeout = False
    code = request.form.get('code')
    stdin = request.form.get('stdin')
    code_filename = "/tmp/" + str(uuid4())
    try:
        with open(code_filename, "w") as code_file:
            code_file.write(code)
        p = Popen(
            ['bbm', code_filename],
            stdout=PIPE,
            stdin=PIPE,
            stderr=PIPE
        )
        stdout, stderr = p.communicate(input=stdin.encode('utf-8'), timeout=15)
    except TimeoutExpired:
        is_timeout = True
        p.kill()
        stdout, stderr = p.communicate()
    finally:
        remove(code_filename)
    stdout = stdout.decode('utf-8')
    stderr = stderr.decode('utf-8')
    return jsonify({
        'stdout': stdout,
        'stderr': stderr,
        'is_timeout': is_timeout
    })
示例#2
0
def runcmd(cmd, input=None, stringio=True, **kwargs):
    if six.PY2:
        from subprocess32 import Popen, PIPE
    else:
        from subprocess import Popen, PIPE
    timeout = kwargs.pop('timeout', None)
    if input: kwargs['stdin'] = PIPE
    if not 'bufsize' in kwargs: kwargs['bufsize']= -1
    if isinstance(cmd, six.string_types):
        import shlex
        cmd = shlex.split(cmd)
    elif kwargs.get('shell'):
        from six.moves import shlex_quote
        cmd = [shlex_quote(arg) for arg in cmd]
    process = Popen(cmd,universal_newlines=stringio,stdout=PIPE,stderr=PIPE,**kwargs)
    try:
        output, error = process.communicate(input=input, timeout=timeout)
    except TimeoutExpired:
        process.kill()
        output, error = process.communicate()
        raise TimeoutExpired(process.args, timeout, output=output)
    retcode = process.poll()
    if retcode:
        raise SubProcessError(retcode, process.args, output=output, error=error)
    return output, error
示例#3
0
def git_get_tracked(dir="."):
    proc = Popen(
        ["git", "rev-parse", "--abbrev-ref", "--symbolic-full-name", "@{u}"],
        cwd=dir,
        stdout=PIPE)
    (stdout, _) = proc.communicate()
    return stdout.strip()
def install_dropbox_helper(package = None, fullpath = None, tmp = False):
    if not os.path.exists(HELPER_INSTALLER_PATH):
        raise InstallError('no installer')
    assert package or fullpath
    if not fullpath:
        fullpath = get_package_location(package)
    TRACE('Installing %s', package or fullpath)
    cmd = 'install-tmp' if tmp else 'install'
    p = Popen([HELPER_INSTALLER_PATH,
     cmd,
     BUILD_KEY,
     fullpath], stdout=PIPE, stderr=PIPE)
    out, err = p.communicate()
    if p.returncode != 0:
        if out:
            TRACE('stdout: %s', out)
        if err:
            TRACE('stderr: %s', err)
        raise InstallError('Installer returned %d' % p.returncode)
    if tmp:
        path = None
        for l in out.split('\n'):
            m = re.match('\\<path\\>(?P<path>.+)\\</path\\>', l)
            if m:
                path = m.group('path')

        if path:
            return os.path.join(path, REL_HELPERS_DIR)
        raise InstallError('no path')
示例#5
0
    def process(self, json_data, namedblobfile=None):

        tmpdir_path = tempfile.mkdtemp(prefix='opengever.core.sablon_')
        output_path = join(tmpdir_path, 'sablon_output.docx')

        if namedblobfile is None:
            template_path = self.template.as_file(tmpdir_path)
        else:
            template_path = join(tmpdir_path, namedblobfile.filename)
            with open(template_path, 'wb') as template_file:
                template_file.write(namedblobfile.data)

        try:
            sablon_path = environ.get('SABLON_BIN', 'sablon')
            subprocess = Popen([sablon_path, template_path, output_path],
                               stdin=PIPE,
                               stdout=PIPE,
                               stderr=PIPE)
            self.stdout, self.stderr = subprocess.communicate(input=json_data)
            self.returncode = subprocess.returncode
            if self.is_processed_successfully():
                with open(output_path, 'rb') as outfile:
                    self.file_data = outfile.read()
        finally:
            shutil.rmtree(tmpdir_path)

        return self
示例#6
0
class DummySim(ProcessWorkerThread):

    def handle_eval(self, record):
        # This gives a file name / directory name that no other thread can use
        my_unique_filename = my_gen.next_filename()
        my_unique_filename = str(my_unique_filename) + ".txt"

        # Print to the input file
        f = open(my_unique_filename, 'w')
        f.write(array2str(record.params[0]))
        f.close()

        # Run the objective function and pass the filename of the input file
        self.process = Popen(['./sphere_ext_files', my_unique_filename], stdout=PIPE)
        out = self.process.communicate()[0]

        # Parse the output
        try:
            val = float(out)  # This raises ValueError if out is not a float
            self.finish_success(record, val)
            os.remove(my_unique_filename)  # Remove input file
        except ValueError:
            logging.warning("Function evaluation crashed/failed")
            self.finish_failure(record)
            os.remove(my_unique_filename)  # Remove input file
示例#7
0
class DummySim(ProcessWorkerThread):
    def handle_eval(self, record):
        # This gives a file name / directory name that no other thread can use
        my_unique_filename = my_gen.next_filename()
        my_unique_filename = str(my_unique_filename) + ".txt"

        # Print to the input file
        f = open(my_unique_filename, 'w')
        f.write(array2str(record.params[0]))
        f.close()

        # Run the objective function and pass the filename of the input file
        self.process = Popen(['./sphere_ext_files', my_unique_filename],
                             stdout=PIPE)
        out = self.process.communicate()[0]

        # Parse the output
        try:
            val = float(out)  # This raises ValueError if out is not a float
            self.finish_success(record, val)
            os.remove(my_unique_filename)  # Remove input file
        except ValueError:
            logging.warning("Function evaluation crashed/failed")
            self.finish_failure(record)
            os.remove(my_unique_filename)  # Remove input file
示例#8
0
class CppSim(MPIProcessWorker):
    def eval(self, record_id, params, extra_args=None):
        try:
            fp = open("TEST_AGAIN.txt", "a")
            fp.write("this is try to launch process%s\n" % (rank))
            fp.write("this is try to launch process%s\n" % (str(record_id)))
            fp.write("this is try to launch process%s\n" % (str(params[0])))
            fp.write("this is try to launch process%s\n" % (array2str(params[0])))

            fp.write("this is try to launch process%s\n" % (str(params[1])))
            fp.write("this is try to launch process%s\n" % (str(params[2])))
            fp.write("this is try to launch process%s\n" % (type(params)))
            fp.close()
            # # self.process = Popen(['python', "object_func.py", array2str(params[0]), "%s" % rank], stdout=PIPE)
            # workingdir = "/home/users/nus/e0022672/delft3d/examples/" + str(params[1]) + "/"
            workingdir = "/home/users/nus/e0022672/delft3d/examples/"
            # os.chdir(workingdir)
            # os.system("./run_flow2d3d.sh")
            self.process = Popen(["python", workingdir + "run_simulation.py", array2str(params[0]), str(params[1]), str(params[2])], stdout=PIPE)
            out = self.process.communicate()[0]
            # val = 2
            self.finish_success(record_id, float(out))
        except ValueError:
            logging.info("WARNING: Incorrect output or crashed evaluation")
            self.finish_cancel(record_id)
示例#9
0
def check_container_status_rkt():
    """
    Checks and prints the calico/node container status when running in rkt. 
    """
    list_cmd = ["sudo", "rkt", "list"]
    p = Popen(list_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
    stdout, stderr = p.communicate()
    containers = RKT_CONTAINER_RE.findall(stdout) 

    if p.returncode:
        print "Unable to list rkt containers: '%s'" % stderr.strip()
        sys.exit(1)

    if len(containers) == 0:
        print "calico-node container not running"
        sys.exit(1)
    else:
        # Get statuses for all calico/node containers, and determine
        # if any are running.
        statuses = [c[2] for c in containers]
        running = "running" in statuses

        # If one is running, status is "running".  Else, use the status of
        # the first container.
        status = "running" if running else statuses[0]

        # Print status.  If it at least one is running, this will display
        # "running" status.
        print "calico-node container status: %s" % status
示例#10
0
def pipe(cmd, stdin=None, timeout=None, env=None):
    '''Runs 'stdin' through 'cmd' and returns stdout, stderr and whether we
    timed out.'''

    # Extend the current environment, if requested
    extra_env = None
    if env:
        extra_env = environ.copy()
        for var in env:
            extra_env[var] = env[var]

    # setsid puts the subprocess in its own process group, rather than the group
    # containing this Python process
    killed = None
    proc = Popen(cmd,
                 stdin=PIPE if stdin else None,
                 stdout=PIPE,
                 stderr=PIPE,
                 preexec_fn=setsid,
                 env=extra_env)

    try:
        (stdout, stderr) = proc.communicate(input=stdin, timeout=timeout)
        result = {'stdout': stdout, 'stderr': stderr, 'killed': False}
    except TimeoutExpired:
        # Kill the process group, which will include all children
        killpg(getpgid(proc.pid), SIGTERM)
        result = {'stdout': None, 'stderr': None, 'killed': True}

    proc.wait()  # Reaps zombies

    return result
示例#11
0
    def process(self, json_data, namedblobfile=None):

        tmpdir_path = tempfile.mkdtemp(prefix='opengever.core.sablon_')
        output_path = join(tmpdir_path, 'sablon_output.docx')

        if namedblobfile is None:
            template_path = self.template.as_file(tmpdir_path)
        else:
            template_path = join(tmpdir_path, namedblobfile.filename)
            with open(template_path, 'wb') as template_file:
                template_file.write(namedblobfile.data)

        try:
            sablon_path = environ.get('SABLON_BIN', 'sablon')
            subprocess = Popen(
                [sablon_path, template_path, output_path],
                stdin=PIPE, stdout=PIPE, stderr=PIPE)
            self.stdout, self.stderr = subprocess.communicate(input=json_data)
            self.returncode = subprocess.returncode
            if not self.is_processed_successfully():
                raise SablonProcessingFailed(self.stderr)
            with open(output_path, 'rb') as outfile:
                self.file_data = outfile.read()
        finally:
            shutil.rmtree(tmpdir_path)

        return self
示例#12
0
def run_timed(cmd, stdin=None, timeout=None):
    '''Run the given command 'cmd', with optional stdin, and return its stdout,
    stderr and the time it took to run (in seconds). Also report whether or not
    the command was killed by going over the (optional) timeout.'''
    start = default_timer()
    proc = Popen(cmd,
                 stdin=PIPE if stdin else None,
                 stdout=PIPE,
                 stderr=PIPE,
                 preexec_fn=setsid)

    try:
        (stdout, stderr) = proc.communicate(input=stdin, timeout=timeout)
        result = {'stdout': stdout, 'stderr': stderr, 'killed': False}
    except TimeoutExpired:
        # Kill the process group, which will include all children
        killpg(getpgid(proc.pid), SIGTERM)
        result = {'stdout': None, 'stderr': None, 'killed': True}

    proc.wait()  # Reaps zombies

    end = default_timer()
    result['time'] = end - start
    if timeout is not None:
        result['timeout'] = timeout
    return result
 def _clone_repo(self):
     """Execute clone_from method."""
     cmd = ['git', 'clone', '--depth', '1', self._git_url, self._repo_path]
     print('Executing command: %r', ' '.join(cmd))
     proc = Popen(cmd)
     # collection stdout and stderr if needed.
     stdout, stderr = proc.communicate()
示例#14
0
 def run_judge_client(self):
     self.copy_assets()
     args = ["python", os.path.realpath('judge-client.py'), './attacker', './defender']
     self.log += ['Running: ' + ' '.join(args)]
     proc = Popen(args, cwd=self.base_dir, stdin=PIPE, stdout=PIPE, stderr=PIPE)
     output = proc.communicate()
     self.log += [str(output[1])]
     if proc.returncode:
         self.log += ["Judge client crashed with return code %d." % proc.returncode]
         raise JudgeClientException("judge client crashed.")
     result = output[0].split('\n')
     winner = result[0]
     if winner == "attacker":
         self.record.attacker_wins()
     elif winner == "defender":
         self.record.defender_wins()
     else:
         self.log += ["Judge client return unknown winner %s." % winner]
         raise JudgeClientException("unknown winner.")
     reason = result[1]
     if reason == "Finished":
         self.record.status = ExecutionRecord.STATUS_FINISHED
     elif reason == "IllegalMovement":
         self.record.status = ExecutionRecord.STATUS_ILLEGAL_MOVE
     elif reason == "IllegalOutput":
         self.record.status = ExecutionRecord.STATUS_BAD_FORMAT
     elif reason == "TLE":
         self.record.status = ExecutionRecord.STATUS_TLE
     elif reason == "Crashed":
         self.record.status = ExecutionRecord.STATUS_RUNTIME_ERROR
     else:
         self.log += ["Judge client return unknown reason %s." % reason]
         raise JudgeClientException("unknown reason.")
     self.record.replay = result[2:]
示例#15
0
def check_container_status_rkt():
    """
    Checks and prints the calico/node container status when running in rkt.
    """
    list_cmd = ["sudo", "rkt", "list"]
    p = Popen(list_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
    stdout, stderr = p.communicate()
    containers = RKT_CONTAINER_RE.findall(stdout)

    if p.returncode:
        print "Unable to list rkt containers: '%s'" % stderr.strip()
        sys.exit(1)

    if len(containers) == 0:
        print "calico-node container not running"
        sys.exit(1)
    else:
        # Get statuses for all calico/node containers, and determine
        # if any are running.
        statuses = [c[2] for c in containers]
        running = "running" in statuses

        # If one is running, status is "running".  Else, use the status of
        # the first container.
        status = "running" if running else statuses[0]

        # Print status.  If it at least one is running, this will display
        # "running" status.
        print "calico-node container status: %s" % status
示例#16
0
def invoke_side_effects(argv):
    log("invoke_side_effects: %s"
        % ' '.join(sys.argv))

    gccinv = GccInvocation(argv)

    # Try to run each side effect in a subprocess, passing in a path
    # for the XML results to be written to.
    # Cover a multitude of possible failures by detecting if no output
    # was written, and capturing *that* as a failure
    for sourcefile in gccinv.sources:
        if sourcefile.endswith('.c'): # FIXME: other extensions?
            for script, genname in [('invoke-cppcheck', 'cppcheck'),
                                    ('invoke-clang-analyzer', 'clang-analyzer'),
                                    ('invoke-cpychecker', 'cpychecker'),

                                    # Uncomment the following to test a
                                    # checker that fails to write any XML:
                                    # ('echo', 'failing-checker'),

                                    ]:
                with tempfile.NamedTemporaryFile() as f:
                    dstxmlpath = f.name
                assert not os.path.exists(dstxmlpath)

                # Restrict the invocation to just one source file at a
                # time:
                singleinv = gccinv.restrict_to_one_source(sourcefile)
                singleargv = singleinv.argv

                TIMEOUT=60
                t = Timer()

                args = [script, dstxmlpath] + singleargv
                log('invoking args: %r' % args)
                p = Popen(args,
                          stdout=PIPE, stderr=PIPE)
                try:
                    out, err = p.communicate(timeout=TIMEOUT)
                    write_streams(script, out, err)

                    if os.path.exists(dstxmlpath):
                        with open(dstxmlpath) as f:
                            analysis = Analysis.from_xml(f)
                    else:
                        analysis = make_failed_analysis(genname, sourcefile, t,
                                                        msgtext=('Unable to locate XML output from %s'
                                                                 % script),
                                                        failureid='no-output-found')
                        analysis.set_custom_field('stdout', out)
                        analysis.set_custom_field('stderr', err)
                        analysis.set_custom_field('returncode', p.returncode)
                except TimeoutExpired:
                    analysis = make_failed_analysis(genname, sourcefile, t,
                                                    msgtext='Timeout running %s' % genname,
                                                    failureid='timeout')
                    analysis.set_custom_field('timeout', TIMEOUT)
                analysis.set_custom_field('gcc-invocation', ' '.join(argv))
                write_analysis_as_xml(analysis)
示例#17
0
def get_docker_git_revs(docker_image, paths):
    proc = Popen(["docker", "run", "--rm", docker_image, "cat"] + paths,
                 stdout=PIPE)
    (stdout, _) = proc.communicate()
    git_revs = stdout.strip().split('\n')
    if len(git_revs) != len(paths):
        return len(paths) * [""]
    return git_revs
示例#18
0
    def do_interview(self,
                     hosts=None,
                     num_of_segments=1,
                     directory_pairs=None,
                     has_mirrors=False):
        """
        hosts: list of hosts to expand to
        num_of_segments: number of segments to expand
        directory_pairs: list of tuple directory pairs where first element is the primary directory and the 2nd is the mirror.

        Note: this code is done with the assumption of primary only cluster.
        There is an assumption that the user knows the kind of cluster to expand.

        Returns: string output, int returncode
        """
        if directory_pairs is None:
            directory_pairs = ('/tmp/foo', '')

        if num_of_segments != len(directory_pairs):
            raise Exception(
                "Amount of directory_pairs needs to be the same amount as the segments."
            )

        # If working_directory is None, then Popen will use the directory where
        # the python code is being ran.
        p1 = Popen(["gpexpand"],
                   stdout=PIPE,
                   stdin=PIPE,
                   cwd=self.working_directory)

        # Very raw form of doing the interview part of gpexpand.
        # May be pexpect is the way to do this if we really need it to be more complex
        # Cannot guarantee that this is not flaky either.

        # Would you like to initiate a new System Expansion Yy|Nn (default=N):
        p1.stdin.write("y\n")

        # **Enter a blank line to only add segments to existing hosts**[]:
        p1.stdin.write("%s\n" % (",".join(hosts) if hosts else ""))

        if has_mirrors:
            #What type of mirroring strategy would you like? spread|grouped (default=grouped):
            p1.stdin.write("\n")

        #How many new primary segments per host do you want to add? (default=0):
        p1.stdin.write("%s\n" % num_of_segments)

        # Enter new primary data directory #<number primary segment>
        for directory in directory_pairs:
            primary, mirror = directory
            p1.stdin.write("%s\n" % primary)
            if mirror:
                p1.stdin.write("%s\n" % mirror)

        output, err = p1.communicate()

        return output, p1.wait()
示例#19
0
    def _stop_node(self, container_id):
        proc = Popen(["docker", "kill", container_id], stdout=PIPE)
        (stdoutdata, stderrdata) = proc.communicate()
        stopped_container_id = self._get_container_id(stdoutdata)
        stop_successful = container_id == stopped_container_id

        proc = Popen(["docker", "rm", "-f", container_id], stdout=PIPE)
        (stdoutdata, stderrdata) = proc.communicate()
        removed_container_id = self._get_container_id(stdoutdata)
        remove_successful = container_id == stopped_container_id

        self.logger.log(
            "stop_node", {
                "container_id": container_id,
                "is_head": container_id == self.head_container_id,
                "stop_success": stop_successful,
                "remove_success": remove_successful
            })
示例#20
0
def get_relationship(refname, other_refname):
    proc = Popen([
        "git", "rev-list", "--left-right", "--count",
        refname + "..." + other_refname
    ],
                 stdout=PIPE)
    (stdout, stderr) = proc.communicate()
    (ahead, behind) = map(lambda x: int(x), stdout.strip().split())
    return (ahead, behind)
示例#21
0
def execute(command, env_file, name, profile, region):
    """Inject env. variables into an executable via <name> and/or <env_file>"""

    # command is a tuple
    if len(command) == 0:
        click.echo("nothing to execute")
        return
    
    env_dict = {}
    if env_file:
        env_vars = []
        f = open(env_file, 'r')
        for line in f:
            if line.startswith('#'):
                continue
            key, value = line.strip().split('=', 1)
            env_vars.append({'name': key, 'value': value})
        for env_var in env_vars:
            key = env_var['name']
            value = env_var['value']
            if value.startswith('ssm:'):
                secretKey = value[4:]
                out = get_param(secretKey, profile, region)
                env_var['value'] = out['Value']

        for env_var in env_vars:
            key = env_var['name']
            value = env_var['value']
            env_dict[key] = value
            click.echo("injected %s" % key)

    if name:
        env_vars = []
        if name[0] == '/':
            path = name
        else:
            path = '/' + name
        params = get_parameters_by_path(path, profile, region)
        for param in params:
            key = formatKey(param['Name'])
            formatKey(key)
            value = param['Value']
            env_dict[key] = value
            click.echo("injected %s" % key)

    cmd_env = environ.copy()
    cmd_env.update(env_dict)

    p = Popen(command,
            universal_newlines=True,
            bufsize=0,
            shell=False,
            env=cmd_env)
    _, _ = p.communicate()

    return p.returncode
示例#22
0
class CppSim(ProcessWorkerThread):
    def handle_eval(self, record):
        try:
            self.process = Popen(
                ['./sphere_ext', array2str(record.params[0])], stdout=PIPE)
            val = self.process.communicate()[0]
            self.finish_success(record, float(val))
        except ValueError:
            self.finish_cancelled(record)
            logging.info("WARNING: Incorrect output or crashed evaluation")
示例#23
0
class CppSim(MPIProcessWorker):
    def eval(self, record_id, params, extra_args=None):
        try:
            self.process = Popen(
                ['./sphere_ext', array2str(params[0])], stdout=PIPE)
            val = self.process.communicate()[0]
            self.finish_success(record_id, float(val))
        except ValueError:
            logging.info("WARNING: Incorrect output or crashed evaluation")
            self.finish_cancelled(record_id)
示例#24
0
class DummySim(ProcessWorkerThread):
    def handle_eval(self, record):
        self.process = Popen(
            ['./sphere_ext', array2str(record.params[0])], stdout=PIPE)
        out = self.process.communicate()[0]
        try:
            val = float(out)  # This raises ValueError if out is not a float
            self.finish_success(record, val)
        except ValueError:
            logging.warning("Function evaluation crashed/failed")
            self.finish_failure(record)
示例#25
0
    def objfunction(self, x):
        if len(x) != self.dim:
            raise ValueError('Dimension mismatch')

        self.f_eval_count = self.f_eval_count + 1
        experimentId = 'p-'+str(len(x))+'-'+str(self.f_eval_count)+'-'+self.seed+'-'+self.server
        fileId = 'p-'+str(len(x))+'-'+self.seed+'-'+self.server
        m = self.hyper_map

        exp_arg = []
        exp_arg.append('th'),
        exp_arg.append('eval_mnist_GPU.lua')
        exp_arg.append('--mean')
        exp_arg.append(str(x[m['mean']]))
        exp_arg.append('--std')
        exp_arg.append(str(x[m['std']]))
        exp_arg.append('--learnRate')
        exp_arg.append(str(x[m['learnRate']]))
        exp_arg.append('--momentum')
        exp_arg.append(str(x[m['momentum']]))
        exp_arg.append('--epochs')
        exp_arg.append(str(x[m['epochs']]))
        exp_arg.append('--hiddenNodes')
        exp_arg.append(str(x[m['hiddenNodes']]))
        exp_arg.append('--experimentId')
        exp_arg.append(experimentId)
        exp_arg.append('--seed')
        exp_arg.append(self.seed)
        
        millis_start = int(round(time.time() * 1000))
        proc = Popen(exp_arg, stdout=PIPE)
        out, err = proc.communicate()
        
        if proc.returncode == 0:
            results = out.split('###')
            result = float(results[0])
            testResult = float(results[1])
            millis = int(round(time.time() * 1000))
            f_eval_time = millis - millis_start

            if self.bestResult > result:
                self.bestResult = result
            
            row = [self.bestResult, f_eval_time, result, testResult, self.f_eval_count, millis] 
            for xi in range(0, len(x)):
                row.append(x[xi])
            with open('logs/'+fileId+'-output.csv', 'a') as f:
                writer = csv.writer(f)
                writer.writerow(row)
            
            return result
        else:
            print err
            raise ValueError('Function evaluation error')
示例#26
0
class DummySim(ProcessWorkerThread):

    def handle_eval(self, record):
        self.process = Popen(['./sphere_ext', array2str(record.params[0])],
                             stdout=PIPE)
        out = self.process.communicate()[0]
        try:
            val = float(out)  # This raises ValueError if out is not a float
            self.finish_success(record, val)
        except ValueError:
            logging.warning("Function evaluation crashed/failed")
            self.finish_failure(record)
示例#27
0
def execute_command(command, env=None, timeout=None):
    try:
        p = Popen(command, env=env, shell=True, stdout=PIPE, stderr=PIPE)
        output, error = p.communicate(timeout=timeout)
        code = p.returncode
        output = output.decode(errors='replace')
        error = error.decode(errors='replace')
    except Exception as e:
        output = ''
        error = traeback.format_exc()
        code = 255
    return code, output, error
示例#28
0
def crawl_and_capture(crawlCommand):
    ps = Popen(crawlCommand,
               stdout=PIPE,
               stderr=PIPE,
               cwd=os.path.dirname(os.path.abspath(__file__)))
    stdout, stderr = None, None
    try:
        stdout, stderr = ps.communicate()
        stdout = stdout.strip()
        print stdout
    except:
        raise Exception(stderr)
示例#29
0
    def gen_XML_files(self):
        # Run source code analysis and generate xml files
        input_file = 'INPUT=' + self.filename
        output_dir = 'OUTPUT_DIRECTORY=' + self.outdir
        cmd_1 = ['cat', self.conf]
        p1 = Popen(cmd_1, stdout=PIPE)
        doxy_conf = p1.communicate()[0]
        doxy_conf = doxy_conf + input_file + '\n'
        doxy_conf = doxy_conf + output_dir

        cmd = 'doxygen -'
        cmd_2 = cmd.split()
        p2 = Popen(cmd_2, stdin=PIPE, stdout=PIPE, stderr=PIPE)
        p2.stdin.write(doxy_conf)
        # On rare occasions, doxygen may 'hang' on input files, not delivering
        # any result. Use a timeout to work around such situations
        try:
            p2.communicate(timeout=5 * 60)  # Stop trying after 5 minutes
        except TimeoutExpired:
            log.warning("Doxygen got stuck, cancelling analysis run")
            p2.kill()
示例#30
0
    def run_benchmark(self,
                      workload_script,
                      benchmark_iteration,
                      log_start_fn,
                      waited_time_limit=None):
        proc = Popen([
            "docker", "exec", self.head_container_id, "/bin/bash", "-c",
            "RAY_BENCHMARK_ENVIRONMENT=stress RAY_BENCHMARK_ITERATION={} RAY_REDIS_ADDRESS={}:6379 RAY_NUM_WORKERS={} python {}"
            .format(benchmark_iteration, self.head_container_ip,
                    self.num_workers, workload_script)
        ],
                     stdout=PIPE,
                     stderr=PIPE)

        log_start_fn(proc.pid)
        start_time = time.time()
        done = False
        while not done:
            try:
                (stdoutdata, stderrdata) = proc.communicate(
                    timeout=min(10, waited_time_limit))
                done = True
            except (subprocess32.TimeoutExpired):
                waited_time = time.time() - start_time
                if waited_time_limit and waited_time > waited_time_limit:
                    self.logger.log(
                        "killed", {
                            "pid": proc.pid,
                            "waited_time": waited_time,
                            "waited_time_limit": waited_time_limit
                        })
                    proc.kill()
                    return {"success": False, "return_code": None, "stats": {}}
                else:
                    self.logger.log(
                        "waiting", {
                            "pid": proc.pid,
                            "time_waited": waited_time,
                            "waited_time_limit": waited_time_limit
                        })
        m = re.search('^BENCHMARK_STATS: ({.*})$', stdoutdata, re.MULTILINE)
        if m:
            output_stats = json.loads(m.group(1))
        else:
            output_stats = {}

        print stdoutdata
        print stderrdata
        return {
            "success": proc.returncode == 0,
            "return_code": proc.returncode,
            "stats": output_stats
        }
示例#31
0
def check_output(*popenargs, **kwargs):
    r"""Run command with arguments and return its output as a byte string.
    If the exit code was non-zero it raises a CalledProcessError.  The
    CalledProcessError object will have the return code in the returncode
    attribute and output in the output attribute.
    The arguments are the same as for the Popen constructor.  Example:
    >>> check_output(["ls", "-l", "/dev/null"])
    'crw-rw-rw- 1 root root 1, 3 Oct 18  2007 /dev/null\n'
    The stdout argument is not allowed as it is used internally.
    To capture standard error in the result, use stderr=STDOUT.
    >>> check_output(["/bin/sh", "-c",
    ...               "ls -l non_existent_file ; exit 0"],
    ...              stderr=STDOUT)
    'ls: non_existent_file: No such file or directory\n'
    """

    timeout = kwargs.pop('timeout', None)
    if 'stdout' in kwargs:
        raise ValueError('stdout argument not allowed, it will be overridden.')

    if _kill_processes.is_set():
        raise TerminateSignaled()

    process = Popen(stdout=PIPE, *popenargs, **kwargs)
    _processes.append(process)

    try:
        output, unused_err = process.communicate(timeout=timeout)
        _processes.remove(process)

    except TimeoutExpired:
        _processes.remove(process)
        process.kill()
        output, unused_err = process.communicate()
        raise TimeoutExpired(process.args, timeout, output=output)

    retcode = process.poll()
    if retcode:
        raise CalledProcessError(retcode, process.args, output=output)
    return output
示例#32
0
 def run(self):
     '''
     Execute a module as a bash command. Open handles file object as input.
     Log output and/or errors.
     '''
     command = self.bake_command()
     try:
         process = Popen(command,
                         stdin=self.streams['input'],
                         stdout=self.streams['output'],
                         stderr=self.streams['error'])
         # Prepare handles input.
         input_data = None
         if self.streams['input'] == PIPE:
             input_data = open(self.handles).readlines()
             # We have to provide the temporary filename to the modules.
             i = -1
             for line in input_data:
                 i = i + 1
                 # Replace the value of the 'hdf5_filename' key.
                 # Doing this via YAML should be saver.
                 if re.match('hdf5_filename', line):
                     hdf5_key = yaml.load(line)
                     hdf5_key['hdf5_filename'] = self.tmp_filename
                     input_data[i] = yaml.dump(hdf5_key,
                                               default_flow_style=False)
             # Create the new handles string.
             input_data = ''.join(input_data)
         # Execute sub-process.
         (stdoutdata, stderrdata) = process.communicate(input=input_data)
         # Write output and errors if 'logging' is requested by user
         if self.logging_level is not None:
             self.write_output_and_errors(stdoutdata, stderrdata)
         # Modify for nicer output to command line.
         ignore_list = ['INFO:']
         if any([re.search(x, stderrdata) for x in ignore_list]):
             newstderrdata = str()
             for line in stderrdata.split('\n'):
                 if not any([re.search(x, line) for x in ignore_list]):
                     newstderrdata = newstderrdata + line
             stderrdata = newstderrdata
         print stdoutdata
         print stderrdata
         # Close STDIN file descriptor.
         process.stdin.close
         # Take care of any errors during the execution.
         if process.returncode > 0 or re.search('Error', stderrdata):
             raise JteratorError(self.get_error_message(process,
                                 input_data, stdoutdata, stderrdata))
     except ValueError as error:
         raise JteratorError('Failed running \'%s\'. Reason: \'%s\'' %
                             (command, str(error)))
示例#33
0
def invoke_real_executable(argv):
    args = [get_real_executable(argv)] + argv[1:]
    if 0:
        log(' '.join(args))
    p = Popen(args, stderr=PIPE)
    try:
        t = Timer()
        out, err = p.communicate()
        sys.stderr.write(err)
        parse_gcc_stderr(err,
                         stats=make_stats(t))
    except KeyboardInterrupt:
        pass
    return p.returncode
示例#34
0
    def process(self, json_data):
        tmpdir_path = tempfile.mkdtemp(prefix="opengever.core.sablon_")
        output_path = join(tmpdir_path, "sablon_output.docx")
        template_path = self.template.as_file(tmpdir_path)

        try:
            sablon_path = environ.get("SABLON_BIN", "sablon")
            subprocess = Popen([sablon_path, template_path, output_path], stdin=PIPE, stdout=PIPE, stderr=PIPE)
            self.stdout, self.stderr = subprocess.communicate(input=json_data)
            self.returncode = subprocess.returncode
            if self.is_processed_successfully():
                with open(output_path, "rb") as outfile:
                    self.file_data = outfile.read()
        finally:
            shutil.rmtree(tmpdir_path)
示例#35
0
 def _get_container_ip(self, container_id):
     proc = Popen([
         "docker", "inspect",
         "--format={{.NetworkSettings.Networks.bridge.IPAddress}}",
         container_id
     ],
                  stdout=PIPE,
                  stderr=PIPE)
     (stdoutdata, _) = proc.communicate()
     p = re.compile("([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})")
     m = p.match(stdoutdata)
     if not m:
         raise RuntimeError("Container IP not found")
     else:
         return m.group(1)
    def _execute(cmd, tmo, stdin_str, log, separate_stderr):
        if log:
            logging.info(">> running '%s' (timeout=%d)" % (cmd, tmo))
        stdin = PIPE if stdin_str is not None else None
        if separate_stderr:
            p = Popen(shlex.split(cmd), stderr=PIPE, stdout=PIPE, stdin=stdin)
            out, err = p.communicate(timeout=tmo, input=stdin_str)
        else:
            p = Popen(shlex.split(cmd),
                      stderr=STDOUT,
                      stdout=PIPE,
                      stdin=stdin)
            out = p.communicate(timeout=tmo, input=stdin_str)[0].rstrip()
            err = ''
        if log:
            logging.info(">>> '%s' returned=%s\nstdout=%s\nstderr=%s" % \
                         (cmd, str(p.returncode), str(out), str(err)))
        if int(p.returncode) != 0:
            raise Exception("%s: returned=%s\nstdout=%s\nstderr=%s" % \
                            (cmd, str(p.returncode), str(out), str(err)))
        if separate_stderr:
            return p.returncode, out, err

        return p.returncode, out
示例#37
0
文件: base.py 项目: naphatkrit/easyci
    def run(self, *args, **kwargs):
        if self.path is not None:
            # only None when called in the __init__ function
            kwargs.setdefault('cwd', self.path)

        # NOTE if we do want to make a copy of environmental variables,
        # we must remove GIT_WORK_TREE
        kwargs['env'] = {}
        kwargs['stdout'] = PIPE
        kwargs['stderr'] = PIPE

        proc = Popen(args, **kwargs)
        (stdout, stderr) = proc.communicate()
        if proc.returncode != 0:
            raise CommandError(args[0], proc.returncode, stdout, stderr)
        return stdout
示例#38
0
文件: base.py 项目: naphatkrit/easyci
    def run(self, *args, **kwargs):
        if self.path is not None:
            # only None when called in the __init__ function
            kwargs.setdefault("cwd", self.path)

        # NOTE if we do want to make a copy of environmental variables,
        # we must remove GIT_WORK_TREE
        kwargs["env"] = {}
        kwargs["stdout"] = PIPE
        kwargs["stderr"] = PIPE

        proc = Popen(args, **kwargs)
        (stdout, stderr) = proc.communicate()
        if proc.returncode != 0:
            raise CommandError(args[0], proc.returncode, stdout, stderr)
        return stdout
示例#39
0
    def clone(cls, remote_url, path):
        """Clone the remote and return a GitVcs object pointed at the new repo.

        :param str remote_url: the URL to clone from
        :param str path: path to clone to

        :rtype: GitVcs
        :returns: a GitVcs object for the new cloned repo

        :raises tigerhost.vcs.base.CommandError:
        """
        args = ['git', 'clone', '--recursive', remote_url, path]
        proc = Popen(args, stdout=PIPE, stderr=PIPE)
        (stdout, stderr) = proc.communicate()
        if proc.returncode != 0:
            raise CommandError(args[0], proc.returncode, stdout, stderr)
        return cls(path=path)
示例#40
0
    def clone(cls, remote_url, path):
        """Clone the remote and return a GitVcs object pointed at the new repo.

        :param str remote_url: the URL to clone from
        :param str path: path to clone to

        :rtype: GitVcs
        :returns: a GitVcs object for the new cloned repo

        :raises tigerhost.vcs.base.CommandError:
        """
        args = ['git', 'clone', '--recursive', remote_url, path]
        proc = Popen(args, stdout=PIPE, stderr=PIPE)
        (stdout, stderr) = proc.communicate()
        if proc.returncode != 0:
            raise CommandError(args[0], proc.returncode, stdout, stderr)
        return cls(path=path)
示例#41
0
 def run_compiler(self, language, filename, executable_name):
     args = ["g++" if language else "gcc", "-static", "-w", "-O2", filename, "-o",
             executable_name]
     self.log += ['Running: ' + ' '.join(args)]
     proc = Popen(args,
                  cwd=self.base_dir, stdin=PIPE, stdout=PIPE, stderr=PIPE)
     output = proc.communicate(timeout=self.COMPILE_TIMEOUT)
     self.log += [str(output[1])]
     if proc.poll() is None:
         try:
             self.log += ['Compile timeout.']
             proc.kill()
         except Exception:
             pass
     self.log += ["Compiler returns %d." % proc.returncode]
     if proc.returncode:
         raise CompileErrorException()
示例#42
0
    def stop(self, job, msg, exit_code):
        """
        Stop running job and remove it from PBS queue.

        :param job: :py:class:`Job` instance
        :param msg: Message that will be passed to the user
        """
        _pbs_id = ''

        # Get Job PBS ID
        try:
            _pbs_id = str(job.scheduler.id)
        except:
            job.die('@PBS - Unable to read PBS job ID', exc_info=True)
            return

        # Run qdel
        logger.debug("@PBS - Killing job")
        # @TODO Seperate users for each sevice
        # Run qdel with proper user permissions
        # _user = self.jm.services[job.service].config['username']
        # _comm = "/usr/bin/qdel %s" % _pbs_id
        # _sudo = "/usr/bin/sudo /bin/su -c \"%s\" %s" % (_comm, _user)
        # _opts = ['/usr/bin/ssh', '-t', '-t', 'local', _sudo]
        _opts = ['/usr/bin/qdel', '-b', str(conf.pbs_timeout), _pbs_id]
        logger.log(VERBOSE, "@PBS - Running command: %s", _opts)
        _proc = Popen(_opts, stdout=PIPE, stderr=STDOUT)
        _output = _proc.communicate()[0]
        logger.log(VERBOSE, _output)
        # Check return code. If qdel was not killed by signal Popen will
        # not rise an exception
        # @TODO handle temporary communication timeouts with pbs server
        if _proc.returncode == 170:  # Job in wrong state (e.g. exiting)
            logger.debug("@PBS - Wait with job kill: /usr/bin/qdel "
                         "returned 170 exit code (%s)", _output)
            raise CisError("PBS qdel wrong job state")
        if _proc.returncode != 0:
            raise OSError((
                _proc.returncode,
                "/usr/bin/qdel returned non zero exit code.\n%s" %
                str(_output)
            ))

        # Mark as killed by user
        job.mark(msg, exit_code)
    def invoke(self, argv):
        """FIXME"""
        self.log("Driver.invoke: %s"
            % ' '.join(sys.argv))

        gccinv = GccInvocation(argv)

        self.log('  gccinv.sources: %r' % gccinv.sources)

        # Run the side effects on each source file:
        for sourcefile in gccinv.sources:
            self.log('    sourcefile: %r' % sourcefile)
            if sourcefile.endswith('.c'): # FIXME: other extensions?
                single_source_gccinv = gccinv.restrict_to_one_source(sourcefile)

                # Avoid linker errors due to splitting up the build into
                # multiple gcc invocations:
                single_source_gccinv.argv += ['-c']

                self.log('    single_source_gccinv: %r' % single_source_gccinv)
                for side_effect in self.side_effects:
                    analysis = self.invoke_tool(side_effect,
                                                single_source_gccinv,
                                                sourcefile)
                    #analysis.set_custom_field('gcc-invocation', ' '.join(argv))
                    self.write_analysis_as_xml(analysis)

        # Now run the real driver.
        # Note that we already ran the real gcc earlier as a
        # side-effect per source-file, capturing warnings there.
        # We have to do it separately from here since the invocation
        # might cover multiple source files.

        argv = [self.real_driver] + gccinv.argv[1:]
        env=os.environ.copy()
        # FIXME: this probably shouldn't be hardcoded
        env['LANG'] = 'C'
        p = Popen(argv,
                  stdout=PIPE, stderr=PIPE, env=env)
        out, err = p.communicate()
        self.ctxt.stdout.write(out)
        self.ctxt.stderr.write(err)
        self.returncode = p.returncode
示例#44
0
 def run_judge_client(self):
     self.copy_assets()
     args = [
         "python",
         os.path.realpath('judge-client.py'), './attacker', './defender'
     ]
     self.log += ['Running: ' + ' '.join(args)]
     proc = Popen(args,
                  cwd=self.base_dir,
                  stdin=PIPE,
                  stdout=PIPE,
                  stderr=PIPE)
     output = proc.communicate()
     self.log += [str(output[1])]
     if proc.returncode:
         self.log += [
             "Judge client crashed with return code %d." % proc.returncode
         ]
         raise JudgeClientException("judge client crashed.")
     result = output[0].split('\n')
     winner = result[0]
     if winner == "attacker":
         self.record.attacker_wins()
     elif winner == "defender":
         self.record.defender_wins()
     else:
         self.log += ["Judge client return unknown winner %s." % winner]
         raise JudgeClientException("unknown winner.")
     reason = result[1]
     if reason == "Finished":
         self.record.status = ExecutionRecord.STATUS_FINISHED
     elif reason == "IllegalMovement":
         self.record.status = ExecutionRecord.STATUS_ILLEGAL_MOVE
     elif reason == "IllegalOutput":
         self.record.status = ExecutionRecord.STATUS_BAD_FORMAT
     elif reason == "TLE":
         self.record.status = ExecutionRecord.STATUS_TLE
     elif reason == "Crashed":
         self.record.status = ExecutionRecord.STATUS_RUNTIME_ERROR
     else:
         self.log += ["Judge client return unknown reason %s." % reason]
         raise JudgeClientException("unknown reason.")
     self.record.replay = result[2:]
示例#45
0
def check_output(*popenargs, **kwargs):
    """
    Re-implement check_output from subprocess32, but with a timeout that kills
    child processes.

    See https://github.com/google/python-subprocess32/blob/master/subprocess32.py#L606
    """
    timeout = kwargs.pop('timeout', None)
    if 'stdout' in kwargs:
        raise ValueError('stdout argument not allowed, it will be overridden.')
    process = Popen(stdout=PIPE, preexec_fn=os.setsid, *popenargs, **kwargs)
    try:
        output = process.communicate(timeout=timeout)[0]
    except TimeoutExpired as error:
        os.killpg(process.pid, signal.SIGINT)
        raise error
    retcode = process.poll()
    if retcode:
        raise CalledProcessError(retcode, process.args, output=output)
    return output
示例#46
0
class CppSim(ProcessWorkerThread):
    def handle_eval(self, record):
        try:
            # Print to the input file
            f = open(self.my_filename, 'w')
            f.write(array2str(record.params[0]))
            f.close()

            self.process = Popen(['./sphere_ext_files', self.my_filename],
                                 stdout=PIPE,
                                 bufsize=1,
                                 universal_newlines=True)
            val = self.process.communicate()[0]

            self.finish_success(record, float(val))
            os.remove(self.my_filename)  # Remove input file
        except ValueError:
            logging.info("WARNING: Incorrect output or crashed evaluation")
            os.remove(self.my_filename)  # Remove input file
            self.finish_cancelled(record)
示例#47
0
 def _start_worker_node(self, mem_size, shm_size, num_workers):
     mem_arg = ["--memory=" + mem_size] if mem_size else []
     shm_arg = ["--shm-size=" + shm_size] if shm_size else []
     proc = Popen(["docker", "run", "-d"] + mem_arg + shm_arg + [
         "--shm-size=" + shm_size, "ray-project/benchmark",
         "/ray/scripts/start_ray.sh", "--redis-address={:s}:6379".format(
             self.head_container_ip),
         "--num-workers={:d}".format(num_workers)
     ],
                  stdout=PIPE)
     (stdoutdata, stderrdata) = proc.communicate()
     container_id = self._get_container_id(stdoutdata)
     if not container_id:
         raise RuntimeError("Failed to find container id")
     self.worker_container_ids.append(container_id)
     self.logger.log(
         "start_node", {
             "container_id": container_id,
             "is_head": False,
             "num_workers": num_workers,
             "shm_size": shm_size
         })
 def _run_subprocess(self, sourcefile, argv, env=None):
     """
     Support for running the bulk of the side effect in a subprocess,
     with timeout support.
     """
     self.log('%s: _run_subprocess(%r, %r)' % (self.name, sourcefile, argv))
     if 0:
         self.log('env: %s' % env)
     p = Popen(argv,
               stdout=PIPE, stderr=PIPE, env=env)
     try:
         t = Timer()
         out, err = p.communicate(timeout=self.timeout)
         self.ctxt.write_streams(argv[0], out, err)
         result = SubprocessResult(sourcefile, argv, p.returncode, out, err, t)
         analysis = self.handle_output(result)
         return analysis
     except TimeoutExpired:
         analysis = self._make_failed_analysis(sourcefile, t,
                                               msgtext='Timeout running %s' % self.name,
                                               failureid='timeout')
         analysis.set_custom_field('timeout', self.timeout)
         analysis.set_custom_field('command-line', ' '.join(argv))
         return analysis
示例#49
0
def run_popen_with_timeout(command_string, timeout):
    """
    Run a sub-program in subprocess.Popen, pass it the input_data,
    kill it if the specified timeout has passed.
    returns a tuple of success, stdout, stderr
    """
    kill_check = threading.Event()

    def _kill_process_after_a_timeout(pid):
        os.kill(pid, signal.SIGTERM)
        kill_check.set()  # tell the main routine that we had to kill
        # use SIGKILL if hard to kill...
        return

    p = Popen(command_string, bufsize=1, shell=True,
              stdin=PIPE, stdout=PIPE, stderr=PIPE)
    pid = p.pid
    watchdog = threading.Timer(timeout, _kill_process_after_a_timeout, args=(pid,))
    watchdog.start()
    stdout, stderr = p.communicate()
    watchdog.cancel()  # if it's still waiting to run
    success = not kill_check.isSet()
    kill_check.clear()
    return (success, stdout, stderr)
示例#50
0
def change_password(username, current_password, new_password):
    msg_to_web = None
    
    # Popen executes smbpasswd as a child program in a new process,
    # with arguments to the program smbpasswd in a list [].
    #
    # The -s option causes smbpasswd to read from stdin instead of prompting the user.
    # The -U [username] option allows a user to change his/her own password.
    #
    # stdin, stdout and stderr are assigned as PIPE so that we can
    # use communicate() to provide input to stdin of smbpasswd and
    # get message of both success and error back from the program.
    #
    # shell=False in order to avoid the security risk with shell 
    # injection from unsanitized input such as "input; rm -rf /".
    smbpasswd_proc = Popen([u"smbpasswd", u"-s", u"-r", SMB_SERVER, u"-U", username],
                                 stdout=PIPE, stdin=PIPE, stderr=STDOUT, shell=False)

    try:
        # Space, '.' and newline are inconsistently used in the output from smbpasswd
        # and therefore we strip those characters from the end of the output so that
        # we can do sane regex matching without fearing that one day someone will fix
        # this and break our application.
        smbpasswd_output = (smbpasswd_proc.communicate(
                                    input=(current_password + u'\n'
                                           + new_password   + u'\n'
                                           + new_password   + u'\n')
                                           .encode("UTF-8"), timeout=30)[0]
                                           ).rstrip(u' .\n') 
    except TimeoutExpired:
        smbpasswd_proc.kill()
        log_to_file(u"TIME_OUT: User: %s: subprocess.communicate timed out." % username)
        smbpasswd_output_to_logfile(smbpasswd_output)
        return u"The operation timed out. Please contact your system administrator."

    # According to the output from smbpasswd, decide what message should be shown 
    # in the log and on the web page. 
    if smbpasswd_output.endswith(u'NT_STATUS_LOGON_FAILURE'):
        msg_to_web = translate_message(u"change_password", u"1") 
        log_to_file("AUTH_FAIL: User: %s entered invalid USERNAME or PASSWORD." % username)
        smbpasswd_output_to_logfile(smbpasswd_output)

    # Not all configurations of samba provides this information.
    # "map to guest = bad user" is needed in /etc/samba/smb.conf to make this work.         
    elif smbpasswd_output.endswith(u'NT_STATUS_RPC_PROTOCOL_ERROR'):
        msg_to_web = translate_message(u"change_password", u"2")
        log_to_file(u"Error: User: %s: Incorrect USERNAME" % username)
        smbpasswd_output_to_logfile(smbpasswd_output)

    elif smbpasswd_output.endswith(u'NT_STATUS_UNSUCCESSFUL'):
        msg_to_web = translate_message(u"change_password", u"3")
        log_to_file(u"Error: Could not connect to the Samba server. " 
                    u"Server down or unreachable.")
        smbpasswd_output_to_logfile(smbpasswd_output)

    elif smbpasswd_output.endswith(u'NT_STATUS_INVALID_PARAMETER'):
        msg_to_web = translate_message(u"change_password", u"4")
        log_to_file(u"Error: Invalid parameter detected for smbpasswd.")
        smbpasswd_output_to_logfile(smbpasswd_output)

    elif smbpasswd_output.endswith(u'Error was : Password restriction'):
        msg_to_web = translate_message(u"change_password", u"5")
        log_to_file(u"Error: User: %s tried to change her/his password. But it did " 
                    u"not conform to the policy set by Samba" %  username)
        smbpasswd_output_to_logfile(smbpasswd_output)
    
    elif smbpasswd_output.startswith(u'Unable to find an IP address for'):
        msg_to_web = translate_message(u"change_password", u"6")
        log_to_file(u"ServerName_Error: Server name/address in gsmbpasswd.conf is invalid.")
        smbpasswd_output_to_logfile(smbpasswd_output)
     
    elif smbpasswd_output.startswith(u'Password changed for user'):
        msg_to_web = translate_message(u"change_password", u"7")
        log_to_file(u"SUCCESS: User: %s changed password successfully." % username)
        smbpasswd_output_to_logfile(smbpasswd_output)
        
    return msg_to_web
示例#51
0
                return_code = call(['./gdc', filepath, os.path.join(directory[0], dir_entry[:-5] + '.exec')], 
                    timeout=15)
                if return_code == 0:
                    print 'COMPILATION SUCCESSFUL'
                    comp_success = True
                else:
                    print 'COMPILATION FAILED'
                    summary_results[dir_entry[:-5]] = ('fail', directory[0])
            except:
                print 'compile executable. Stop.'
                continue;

            if (comp_success):
                out_child = Popen('./' + os.path.join(directory[0], dir_entry[:-5]) + '.exec', 
                    shell=True, stdout=PIPE, stderr=PIPE)
                output = out_child.communicate()[0]
                
                output_filepath = os.path.join(directory[0], dir_entry[:-5] + '.outgdc')
                with open(output_filepath, 'w') as intermediate_output:
                    intermediate_output.write(output)

                out_filepath = os.path.join(directory[0], dir_entry[:-5] + '.out')
                output_filepath = os.path.join(directory[0], dir_entry[:-5] + '.outgdc')

                if (os.path.exists(out_filepath)):
                    diff_command = ['diff', '-bB', out_filepath, output_filepath]
                    diff_child = Popen(diff_command, stdout=PIPE)
                    diff_output = diff_child.communicate()[0]

                    if diff_output.strip() == '':
                        print 'PASSED TEST'
示例#52
0
import datetime
import re
from subprocess32 import (PIPE, Popen, call)

'''
PreCluster submission.

Joblist is created and the first job is send out to test whether the pipeline
runs successfully.
'''


# 1) Create joblist
process = Popen(['jt', 'joblist'], stdin=PIPE, stdout=PIPE, stderr=PIPE)

(stdoutdata, stderrdata) = process.communicate()
print stdoutdata
print stderrdata

if process.returncode > 0 or re.search('Failed', stderrdata):
    raise Exception('\n--> Building joblist failed!')

if not os.path.exists('lsf'):
    os.mkdir('lsf')

# 2) Run 'PreCluster'
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H-%M-%S')
lsf = os.path.abspath(os.path.join('lsf', '%.5d_%s.precluster' % (1, st)))
if not os.path.exists(lsf):
    print('jt - PreCluster submission:')
示例#53
0
    def submit(self, job):
        """
        Submit a job to PBS queue. The "pbs.sh" script should be already
        present in the pbs_work_path/job directory.

        :param job: :py:class:`Job` instance
        :return: True on success and False otherwise.
        """

        # Check that maximum job limit is not exceeded
        # Use exclusive session to be thread safe, no need to pass the session
        # from JobManager
        _job_count = -1
        try:
            _session = G.STATE_MANAGER.new_session()
            _job_count = G.STATE_MANAGER.get_job_count(scheduler=self.name,
                                                         session=_session)
            _session.close()
        except:
            logger.error("@PBS - Unable to connect to DB.", exc_info=True)
        if _job_count < 0:
            return False
        if _job_count >= self.max_jobs:
            return False

        # Path names
        _work_dir = os.path.join(self.work_path, job.id())
        _run_script = os.path.join(_work_dir, "pbs.sh")
        _output_log = os.path.join(_work_dir, "output.log")
        # Select queue
        _queue = G.SERVICE_STORE[job.status.service].config['queue']
        if 'CIS_QUEUE' in job.data.data:
            _queue = job.data.data['CIS_QUEUE']

        try:
            # Submit
            logger.debug("@PBS - Submitting new job")
            # Run qsub with proper user permissions
            #
            # @TODO
            # Implementation of dedicated users for each service
            # - The apprunner user currently does not allow to log in via ssh
            # - either this limitation is lifted or the server will be run as
            #    another user that is allowed to perform "sudo su apprunner"
            #  - to execute sudo su a TTY is required
            #  - this can be cirumvented using "ssh -t -t"
            #   - A passwordless key is setup:
            #    - .ssh/authorized_keys entry contains from="127.0.0.1" to
            #      limit the scope of the key
            #    - .ssh/config contains following entries to auto select the
            #      correct key when connecting to the "local" host:
            #     Host local
            #         HostName 127.0.0.1
            #         Port 22
            #         IdentityFile ~/.ssh/id_dsa_local
            #  - using this will generate TTY warning messages that are
            #    suppressed only for OpenSSH 5.4 or newer. Hence the STDERR
            #    should not be mixed with STDIN in this case
            # - There still is a problem of file permisions:
            #  - Data management is run with service users permisions
            #  - Data is set to be group writable
            #  - We use ACLs
            # @TODO END
            #
            # _user = self.jm.services[job.service].config['username']
            # _comm = "/usr/bin/qsub -q %s -d %s -j oe -o %s " \
            # "-l epilogue=epilogue.sh %s" % \
            # (_queue, _work_dir, _output_log, _run_script)
            # _sudo = "/usr/bin/sudo /bin/su -c \"%s\" %s" % (_comm, _user)
            # _opts = ['/usr/bin/ssh', '-t', '-t', 'local', _sudo]
            _opts = ['/usr/bin/qsub', '-q', _queue, '-d', _work_dir, '-j',
                     'oe', '-o', _output_log, '-b', str(conf.pbs_timeout),
                     '-l', 'epilogue=epilogue.sh',
                     _run_script]
            logger.log(VERBOSE, "@PBS - Running command: %s", _opts)
            _proc = Popen(_opts, stdout=PIPE, stderr=PIPE)
            _out = _proc.communicate()
            _output = _out[0]
            logger.log(VERBOSE, _out)
            # Hopefully qsub returned meaningful job ID
            _pbs_id = _output.strip()
            # Check return code. If qsub was not killed by signal Popen will
            # not rise an exception
            if _proc.returncode != 0:
                raise OSError((
                    _proc.returncode,
                    "/usr/bin/qsub returned non zero exit code.\n%s" %
                    str(_out)
                ))
        except:
            job.die("@PBS - Unable to submit job %s." % job.id(), exc_info=True)
            return False

        # @TODO Do I need DB session here???
        # Store the PBS job ID
        job.scheduler = Jobs.SchedulerQueue(
            scheduler=self.name, id=_pbs_id, queue=_queue)
        # Reduce memory footprint
        job.compact()

        logger.info("Job successfully submitted: %s", job.id())
        return True
示例#54
0
    def update(self, jobs):
        """
        Update job states to match their current state in PBS queue.

        :param jobs: A list of Job instances for jobs to be updated.
        """
        # Extract list of user names associated to the jobs
        _users = []
        for _service in G.SERVICE_STORE.values():
            if _service.config['username'] not in _users:
                _users.append(_service.config['username'])

        # We agregate the jobs by user. This way one qstat call per user is
        # required instead of on call per job
        _job_states = {}
        for _usr in _users:
            try:
                # Run qstat
                logger.log(VERBOSE, "@PBS - Check jobs state for user %s", _usr)
                _opts = ["/usr/bin/qstat", "-f", "-x", "-u", _usr]
                logger.log(VERBOSE, "@PBS - Running command: %s", _opts)
                _proc = Popen(_opts, stdout=PIPE, stderr=STDOUT)
                # Requires subprocess32 module from pip
                _output = _proc.communicate(timeout=conf.pbs_timeout)[0]
                logger.log(VERBOSE, _output)
                # Check return code. If qstat was not killed by signal Popen
                # will not rise an exception
                if _proc.returncode != 0:
                    raise OSError((
                        _proc.returncode,
                        "/usr/bin/qstat returned non zero exit code.\n%s" %
                        str(_output)
                    ))
            except TimeoutExpired:
                _proc.kill()
                logger.error("@PBS - Unable to check jobs state.",
                             exc_info=True)
                return
            except:
                logger.error("@PBS - Unable to check jobs state.",
                             exc_info=True)
                return

            # Parse the XML output of qstat
            try:
                _xroot = ET.fromstring(_output)
                for _xjob in _xroot.iter('Job'):
                    _xjid = _xjob.find('Job_Id').text
                    _xstate = _xjob.find('job_state').text
                    _xexit = _xjob.find('exit_status')
                    if _xexit is not None:
                        _xexit = _xexit.text
                    _job_states[_xjid] = (_xstate, _xexit)
            except:
                logger.error("@PBS - Unable to parse qstat output.",
                             exc_info=True)
                return
            logger.log(VERBOSE, _job_states)

        # Iterate through jobs
        for _job in jobs:
            # TODO rewrite to get an array JID -> queue from SchedulerQueue table with single SELECT
            _pbs_id = str(_job.scheduler.id)
            # Check if the job exists in the PBS
            if _pbs_id not in _job_states:
                _job.die('@PBS - Job %s does not exist in the PBS', _job.id())
            else:
                # Update job progress output
                self.progress(_job)
                _new_state = 'queued'
                _exit_code = 0
                _state = _job_states[_pbs_id]
                logger.log(VERBOSE, "@PBS - Current job state: '%s' (%s)",
                           _state[0], _job.id())
                # Job has finished. Check the exit code.
                if _state[0] == 'C':
                    _new_state = 'done'
                    _msg = 'Job finished succesfully'

                    _exit_code = _state[1]
                    if _exit_code is None:
                        _new_state = 'killed'
                        _msg = 'Job was killed by the scheduler'
                        _exit_code = ExitCodes.SchedulerKill

                    _exit_code = int(_exit_code)
                    if _exit_code > 256:
                        _new_state = 'killed'
                        _msg = 'Job was killed by the scheduler'
                    elif _exit_code > 128:
                        _new_state = 'killed'
                        _msg = 'Job was killed'
                    elif _exit_code > 0:
                        _new_state = 'failed'
                        _msg = 'Job finished with error code'

                    try:
                        _job.finish(_msg, _new_state, _exit_code)
                    except:
                        _job.die('@PBS - Unable to set job state (%s : %s)' %
                                 (_new_state, _job.id()), exc_info=True)
                # Job is running
                elif _state[0] == 'R' or _state[0] == 'E':
                    if _job.get_state() != 'running':
                        try:
                            _job.run()
                        except:
                            _job.die("@PBS - Unable to set job state "
                                     "(running : %s)" % _job.id(), exc_info=True)
                # Treat all other states as queued
                else:
                    if _job.get_state() != 'queued':
                        try:
                            _job.queue()
                        except:
                            _job.die("@PBS - Unable to set job state "
                                     "(queued : %s)" % _job.id(), exc_info=True)