示例#1
0
def check_upgrade():
        server_file = curl(PIFM_HOST + '/client_agent/pifm_agent.py')
        server_sum = awk(
                        md5sum(
                            grep(server_file, '-v', '^PIFM_HOST')
                        ), '{print $1}'
        )

        local_sum = awk(
                        md5sum(
                            grep('-v', '^PIFM_HOST', OUR_SCRIPT)
                        ), '{print $1}'
        )

        if str(server_sum) != str(local_sum):
            logging.info(
                "server: {server}, local: {local}, should update.".format(
                    server=server_sum,
                    local=local_sum
                )
            )
            with open(TMP_SCRIPT, 'w') as f:
                f.write(str(server_file))
            sed('-i',
                "0,/def/ s#http://pi_director#{myhost}#".format(myhost=PIFM_HOST),
                OUR_SCRIPT
                )
            status = python('-m', 'py_compile', TMP_SCRIPT)
            if (status.exit_code == 0):
                shutil.copy(TMP_SCRIPT, OUR_SCRIPT)
                os.chmod(OUR_SCRIPT, 0755)
                sys.exit(0)
示例#2
0
 def make_stats(self, rerun = False):
     self.stats = {}
     if os.path.exists(self.out_dir + "mapper/"):
         if os.path.exists(self.out_dir + self.name + "_stats.json") and not rerun:
             print "Loading stats for", self.name
             with open(self.out_dir + self.name + "_stats.json") as handle:
                 self.stats = json.load(handle)
         else:
             print "Computing stats for", self.name
             self.stats['raw_reads'] = self.pairs
             self.stats['output'] = self.out_dir
             print "Computing raw number of paired-reads"
             self.stats['read_count'] = int(sh.zgrep("-Ec", "$", self.reads))/4 
             print "Computing clean number of paired-reads"
             self.stats['clean_read_count'] = int(sh.grep("-Ec", "$", self.reads.replace("fastq.gz","clean.fastq")))/4
             self.stats['ratios'] = float(self.stats['clean_read_count'])/float(self.stats['read_count'])
             for k in tqdm(self.success_ks):
                 self.stats[k] = {}
                 self.stats[k]['Success'] =  os.path.exists(self.out_dir + "ray_" + str(k) + "/Contigs.fasta")
                 if self.stats[k]['Success']:
                     t=sh.assemstats(0,self.out_dir + "ray_" + str(k) + "/Contigs.fasta" )
                     self.stats[k]['Assembly'] = {a:b for a,b in  zip(*[[w.strip() for w in  l.split("\t")][1:] for l in str(t).split("\n")[0:2]])}
                     self.stats[k]['mapped_frac'] = float(sh.grep("overall",  self.out_dir + "map_" + str(k) + ".err").split(" ")[0][0:-1])/100
                     self.stats[k]['dupli_frac'] = float(sh.grep("Unknown", self.out_dir + "mapper/ray_" +str(k)+ "_" + self.name+ "-smd.metrics").split("\t")[7])
         
     with open(self.out_dir + self.name + "_stats.json", 'w') as handle:
         json.dump(self.stats, handle)
示例#3
0
def assert_hg_count(count, rev=None):
    '''Assuming you are in an hg repository, assert that ``count`` commits
    have been made to that repo.'''
    if rev:
        assert sh.grep(sh.hg.log(rev=rev), 'changeset:').stdout.count(b'\n') == count
    else:
        assert sh.grep(sh.hg.log(), 'changeset:').stdout.count(b'\n') == count
示例#4
0
def assert_hg_count(count, rev=None):
    """Assuming you are in an hg repository, assert that ``count`` commits
    have been made to that repo."""
    if rev:
        assert sh.grep(sh.hg.log(rev=rev), "changeset:").stdout.count(b"\n") == count
    else:
        assert sh.grep(sh.hg.log(), "changeset:").stdout.count(b"\n") == count
示例#5
0
def check_if_running_polkit_auth():
    """
    check if polkit authentication agent is running
    and launch it if it is not
    """
    try:
        grep(ps('aux'), '[p]olkit-gnome-authentication-agent-1')
    except sh.ErrorReturnCode_1:
        logger.debug('polkit auth agent not found, trying to launch it...')
        run_polkit_auth_agent()
示例#6
0
    def parse_rdf(self):
        """ cat|grep's the rdf file for minimum metadata
        """
        # FIXME: make this an rdf parser if I can
        _title = sh.grep(sh.cat(self.rdf_path), 'dcterms:title', _tty_out=False)
        try:
            _author = sh.grep(sh.cat(self.rdf_path), 'name', _tty_out=False)
            self.author = self._clean_properties(_author)
        except sh.ErrorReturnCode_1:
            self.author = "Various"

        self.title = self._clean_properties(_title)
示例#7
0
    def _info_celery(self):
        d = {}

        try:
            lines = sh.grep(
                sh.grep(self._ps(), "celery"), "worker").split("\n")[:-1]
            for line in lines:
                (pid, command) = line.lstrip().split(" ", 1)
                d[pid] = line
        except:
            pass
        return d
示例#8
0
def find_process_pid(process_line, child_process=False):
    ps_opts = 'auxww' if not child_process else 'auxfww'
    try:
        pid = sh.awk(
            sh.grep(
                sh.grep(sh.ps(ps_opts, _piped=True, _tty_out=False), "-ie", process_line),
                '-v', 'grep'),
            "{print $2}",
        )
    except sh.ErrorReturnCode:
        raise AssertionError("Cannot find process pid")

    return pid.strip()
示例#9
0
文件: ssh.py 项目: baremetal/debgit
def cache_ssh_key(hostname):
    """
    Request the host's key and cache it in .ssh/known_hosts

    http://serverfault.com/questions/321167/add-correct-host-key-in-known-hosts-multiple-ssh-host-keys-per-hostname
    """

    try:
        sh.grep('-q', hostname, SSH_KNOWN_HOSTS)
    except sh.ErrorReturnCode:
        with open(SSH_KNOWN_HOSTS, 'ab') as known_hosts_file:
            known_hosts_file.write('\n')

            keyscan = sh.Command('ssh-keyscan')
            keyscan('-t', 'rsa', hostname, _out=known_hosts_file)
示例#10
0
def findContainer(name):
    container=""
    try:
        container = sh.awk("{print $1}", _in=sh.head("-n 1", _in=sh.grep(name, _in=sh.docker.ps())))
    except:
        print "container not available"
    return container.rstrip()
示例#11
0
    def get(self):
        query = unquote(self.get_argument('q'))
        try:
            results = str(grep('-R', '--exclude-dir', '.git', query,
                               self.settings.repo))
        except ErrorReturnCode_1 as e:
            results = ''

        try:
            results += str(find(self.settings.repo, '-type', 'f', '-name',
                                '*' + query + '*', '-not', '(', '-path',
                                '%s/%s/*' % (self.settings.repo, '.git') ))
        except ErrorReturnCode_1 as e:
            pass

        results = results.replace(self.settings.repo, '').split('\n')[:-1]
        formatted_results = []
        for result in results:
            if 'Binary file' in result or result == '':
                continue

            # TODO this doesn't play well with colons in filenames
            stuff = result.split(':')
            filename = stuff[0]
            if path.basename(filename).startswith('.'):
                filename = path.join(path.dirname(filename),
                                     path.basename(filename)[1:])
            string = ''.join(stuff[1:])
            string = self._highlight(string, query)
            formatted_results.append({'filename': filename, 'string': string})
        self.render('search.html', query=query, results=formatted_results)
示例#12
0
def ip_adresses():
    """
    IP Host und Device auslesen bzw setzen
    """
    check = sh.ifconfig
    check = sh.grep("inet")
    print check
示例#13
0
def get_free_mb(folder):
    df_result = grep(df("-k", folder), "/")

    kb_sizes = map(int, filter(is_int, df_result.split(" ")))
    available_mb = kb_sizes[2] / 1024

    return available_mb
示例#14
0
def findImage(repository, tag):
    container=""
    try:
        container = sh.awk("{print $3}", _in=sh.head("-n 1", _in=sh.grep(tag, _in=sh.docker("images", repository))))
    except:
        print "container not available"
    return container.rstrip()
示例#15
0
def check_if_needed(path):
    if path[-6:] == '_r.MOV':
        return False

    file_name, ext = os.path.splitext(path)

    conv_name = os.path.join(WORKDIR, file_name + '_r' + ext)

    if os.path.isfile(conv_name):
        return False

    try:
        result = grep(grep(mediainfo(path), i='Rotation'), '180')
    except sh.ErrorReturnCode_1 as e:
        return False

    return True
示例#16
0
def _get_last_two_deploys(environment):
    import sh
    git = sh.git.bake(_tty_out=False)
    pipe = git('tag')
    pipe = sh.grep(pipe, environment)
    pipe = sh.sort(pipe, '-rn')
    pipe = sh.head(pipe, '-n2')
    return pipe.strip().split('\n')
示例#17
0
def force_push(git, branch):
    try:
        git.push('origin', branch, '--force')
    except sh.ErrorReturnCode_128 as e:
        # oops we're using a read-only URL, so change to the suggested url
        line = sh.grep('  Use ', _in=e.stderr)
        edit_url = line.strip().split()[1]
        git.remote('set-url', 'origin', edit_url)
        git.push('origin', branch, '--force')
示例#18
0
def get_vendor(mac):
    if not mac:
        return None
    grepped = grep(
        mac.upper().replace(':','')[:6],
        OUI_FILE,
        _ok_code=[0,1]
    )
    return str(grepped)[7:] or '<unknown vendor>'
示例#19
0
def get_node_ip():
        """Returns the node's IP, for the interface
        that is being used as the default gateway.
        This should work on both MacOS X and Linux."""
        try:
            default_interface = grep(netstat('-nr'), '-e', '^default', '-e' '^0.0.0.0').split()[-1]
            my_ip = ifaddresses(default_interface)[2][0]['addr']
            return my_ip
        except:
            raise Exception("Unable to resolve local IP address.")
示例#20
0
 def refresh(self):
     self.vms = {}
     lines = grep (nova("list"), "|").split("\n")[1:-1]        
     for line in lines:
         line = line[2:-2]
         (id, name, status, networks) = line.split("|")
         self.vms[id] = {"id": id.strip(),
                    "name":name.strip(),
                    "status":status.strip(),
                    "networks": networks.strip()}
def get_remote_branches(origin, git=None):
    git = git or get_git()
    branches = [
        line.strip().replace('origin/HEAD -> ', '')[len(origin) + 1:]
        for line in
        sh.grep(
            git.branch('--remote'), r'^  {}'.format(origin)
        ).strip().split('\n')
    ]
    return branches
示例#22
0
def has_line(path, line):
    """
    Check if a certain file has a given line.
    """
    from sh import grep

    # Check if we have the option already in /etc/sudoers
    # http://man.cx/grep#heading14 -> 1 == no lines
    grep_status = grep(line, path, _ok_code=[0, 1]).exit_code

    return grep_status == 0
def get_unmerged_remote_branches(git=None):
    git = git or get_git()
    try:
        lines = sh.grep(
            git.branch('--remote', '--no-merged', 'origin/master'),
            '^  origin',
        ).strip().split('\n')
    except sh.ErrorReturnCode_1:
        lines = []
    branches = [line.strip()[len('origin/'):] for line in lines]
    return branches
示例#24
0
def dhclient(iface, enable, script=None):
    # Disable the dhcp client and flush interface
    try:
        dhclients = sh.awk(
            sh.grep(
                sh.grep(sh.ps("ax"), iface, _timeout=5),
                "dhclient", _timeout=5),
            "{print $1}")
        dhclients = dhclients.split()
        for dhclient in dhclients:
            sh.kill(dhclient)
    except Exception as e:
        _logger.info("Failed to stop dhclient: %s" % e)
        pass

    if enable:
        if script:
            sh.dhclient("-nw", "-sf", script, iface, _bg=True)
        else:
            sh.dhclient("-nw", iface, _bg=True)
示例#25
0
文件: create_vm.py 项目: bmoar/vm
    def __get_host_count(self, host_type=""):
        """
            Get the current number of VMs running that match
            host_type string
        """
        hosts = 0
        if host_type:
            hosts = sh.wc(sh.awk(sh.grep(sh.virsh('list', '--all'), '%s' % host_type), '{print $2}'), '-l')
        else:
            sys.exit("Can't count non-existant host_type")

        return str(hosts).rstrip()
示例#26
0
文件: device.py 项目: Stupeflix/fixxd
def device_list():
    """
    Get udid from iPhone and iPad plugged into the computer
    Returns a list
    """

    # TODO: separate iPhone and iPad
    raw = sh.sed(sh.system_profiler("SPUSBDataType"), "-n", "-e",
                 '/iPad/,/Serial/p', "-e", '/iPhone/,/Serial/p')
    devices = sh.awk(
        sh.grep(raw, "Serial Number:"), "-F", ": ", "{print $2}").split('\n')
    return [device for device in devices if device]
示例#27
0
def monitor_publisher_size():
    #ps aux | grep publish | awk '{print $6}'
    from sh import ps, grep, pkill
    try:
        psinfo = grep(ps('aux'), 'publish')
        mem_used = int(psinfo.strip().split()[5])
        print 'mem_publish:', mem_used
        if mem_used >= 1 * 1024 * 1024: #(1G = 1*1024*1024K)
            pkill('-f', 'publish.py')
    except Exception:
        import traceback
        traceback.print_exc()
示例#28
0
def get_cmd_from_ps(needle):
    result = sh.grep(sh.cat(sh.ps('-wwaeopid,cmd')), needle)
    if result.exit_code == 0:
        for line in result.stdout.split('\n'):
            line = line.strip()
            if not line:
                continue
            match = re.search(r'^(\d*)\s*(.*)', line)
            if match.group(2).startswith('grep'):
                continue
            return match.group(2)
    raise KeyError('Failed to find: %s' % needle)
示例#29
0
def get_commit_files(file_type):
  system("git diff --cached --name-status > /tmp/git_hook")

  files = awk(
    grep(
     cat("/tmp/git_hook"), "-P", "A|M.*.%s$" % file_type,
     _ok_code = [0, 1]
    ), "{print $2}", _iter = True
  )

  exten = ".%s" % file_type
  return [path[:path.rindex(exten) + len(exten)] for path in files]
示例#30
0
文件: ir.py 项目: netdingo/cbplayer
 def ir_get_dev(self):
     """
         get real ir device.
     """
     try:
         out = sh.grep(sh.dmesg(), IR_MODULE, _iter = True) 
         count = len(out) 
         if count > 0 and out.exit_code == 0:
             lines = filter(lambda x : x, out.stdout.split("\n"))
             last_line = lines[-1].split(IR_MODULE + " as")
             if len(last_line) > 1:
                 path = "/sys" + last_line[-1].strip() + "/event1/uevent"
                 out = sh.grep("DEVNAME", path, _iter = True) 
                 if out > 0 and out.exit_code == 0:
                     lines = filter(lambda x : x, out.stdout.split("\n"))
                     last_line = lines[-1].split("=")
                     if len(last_line) > 1:
                         dev = "/dev/" + last_line[-1].strip()
                         return dev
         return None
     except Exception, e:
         return None
def remote_url(git, remote, original="origin"):
    origin_url = sh.grep(git.remote("-v"), original).split()[1]
    repo_name = origin_url.rsplit("/", 1)[1]
    return "https://github.com/{}/{}".format(remote, repo_name)
示例#32
0
out = 'csv/time.csv'
t0 = 0
with open(out, 'w+') as f:

    f.write("P\tmean\trel-err\tspeedup\tefficiency\n")

    for P in P_list:

        name = 'P{}'.format(P)
        out = 'out/' + name

        try:
            #sh.tail('-1', sh.grep('^stats', out), _out=buf)
            buf = StringIO()
            sh.tail(sh.grep('^stats', out), '-1', _out=buf)
            line = buf.getvalue()
        except:
            line = ""

        if line == "": continue

        mean = float(get_field(line, "mean"))
        sem = float(get_field(line, "sem"))
        rel_error = sem * 1.96 / mean
        if P == 1:
            speedup = 1
            t0 = mean
        else:
            speedup = t0 / mean
示例#33
0
def get_subclasses(name):
    try:
        res = sh.grep("-r", str("@interface \w* : " + name), ".")
        return len(res.splitlines())
    except:
        return 0
示例#34
0
        def __init__(self):
            """Loads dynamic libraries"""

            try:
                if 'LIBCASM' in os.environ:
                    libcasm_path = os.environ['LIBCASM']
                else:
                    casm_path = find_executable('ccasm')
                    if platform == 'darwin':
                        libcasm_path = sh.grep(sh.otool('-L', casm_path),
                                               'libcasm').split()[0]
                        libcasm_path = libcasm_path.replace(
                            '@loader_path', dirname(casm_path))
                    else:
                        libcasm_path = sh.grep(sh.ldd(casm_path),
                                               'libcasm').split()[2]
                libccasm_path = libcasm_path.replace('libcasm', 'libccasm')

                self.lib_casm = ctypes.CDLL(libcasm_path,
                                            mode=ctypes.RTLD_GLOBAL)
                self.lib_ccasm = ctypes.CDLL(libccasm_path,
                                             mode=ctypes.RTLD_GLOBAL)
            except Exception as e:
                print("\n~~~ Error loading casm libraries ~~~")
                if 'LIBCASM' in os.environ:
                    libcasm_path = os.environ['LIBCASM']
                    print("Looking for libcasm at LIBCASM:", libcasm_path)
                    if not os.path.exists(libcasm_path):
                        print("File does not exist")
                        print(
                            "Install CASM if it is not installed, or update your PATH, or set LIBCASM to the location of libcasm."
                        )
                    else:
                        print(
                            "File exists, but for unknown reason could not be loaded."
                        )
                else:
                    casm_path = find_executable('ccasm')
                    print("find_executable('ccasm'):", casm_path)
                    if casm_path is None:
                        print(
                            "Could not find 'ccasm' executable. CASM is not installed on your PATH."
                        )
                        print(
                            "Install CASM if it is not installed, or update your PATH, or set LIBCASM to the location of libcasm."
                        )
                    elif basename(dirname(casm_path)) == ".libs":
                        print(
                            "Found 'ccasm' executable in a '.libs' directory. Are you running tests?"
                        )
                        if platform == 'darwin':
                            check = join(dirname(casm_path), "libcasm.dylib")
                        else:
                            check = join(dirname(casm_path), "libcasm.so")
                        if os.path.exists(check):
                            print("You probably need to set LIBCASM=" + check)
                        else:
                            print(
                                "You probably need to re-make or update your PATH"
                            )
                    else:
                        print(
                            "Found 'ccasm', but for unknown reason could not determine libcasm location."
                        )
                        if platform == 'darwin':
                            print("otool -L:")
                            res = sh.otool('-L', casm_path)
                            for val in res:
                                print(val.strip())
                        else:
                            print("ldd:")
                            res = sh.ldd(casm_path)
                            for val in res:
                                print(val.strip())
                print("")
                raise e

            #### Argument types

            self.lib_ccasm.casm_STDOUT.restype = ctypes.c_void_p

            self.lib_ccasm.casm_STDERR.restype = ctypes.c_void_p

            self.lib_ccasm.casm_nullstream.restype = ctypes.c_void_p

            self.lib_ccasm.casm_ostringstream_new.restype = ctypes.c_void_p

            self.lib_ccasm.casm_ostringstream_delete.argtypes = [
                ctypes.c_void_p
            ]
            self.lib_ccasm.casm_ostringstream_delete.restype = None

            self.lib_ccasm.casm_ostringstream_size.argtypes = [ctypes.c_void_p]
            self.lib_ccasm.casm_ostringstream_size.restype = ctypes.c_ulong

            self.lib_ccasm.casm_ostringstream_strcpy.argtypes = [
                ctypes.c_void_p,
                ctypes.POINTER(ctypes.c_char)
            ]
            self.lib_ccasm.casm_ostringstream_strcpy.restype = ctypes.POINTER(
                ctypes.c_char)

            self.lib_ccasm.casm_primclex_null.argtypes = None
            self.lib_ccasm.casm_primclex_null.restype = ctypes.c_void_p

            self.lib_ccasm.casm_primclex_new.argtypes = [
                ctypes.c_char_p, ctypes.c_void_p, ctypes.c_void_p
            ]
            self.lib_ccasm.casm_primclex_new.restype = ctypes.c_void_p

            self.lib_ccasm.casm_primclex_delete.argtypes = [ctypes.c_void_p]
            self.lib_ccasm.casm_primclex_delete.restype = None

            self.lib_ccasm.casm_primclex_refresh.argtypes = [
                ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p,
                ctypes.c_bool, ctypes.c_bool, ctypes.c_bool, ctypes.c_bool,
                ctypes.c_bool
            ]
            self.lib_ccasm.casm_primclex_refresh.restype = None

            self.lib_ccasm.casm_command_list.argtypes = [ctypes.c_void_p]
            self.lib_ccasm.casm_command_list.restype = None

            self.lib_ccasm.casm_capi.argtypes = [
                ctypes.c_char_p, ctypes.c_void_p, ctypes.c_char_p,
                ctypes.c_void_p, ctypes.c_void_p
            ]
            self.lib_ccasm.casm_capi.restype = ctypes.c_int
示例#35
0
 def count(self):
     if self.gziped: return int(sh.zgrep('-c', "^+$", self.fwd_path, _ok_code=[0,1]))
     else: return int(sh.grep('-c', "^+$", self.fwd_path, _ok_code=[0,1]))
示例#36
0
          sep=" ",
          file=sys.stderr)
elif args.Extract == True:
    print("Detected",
          len(Samples),
          "individuals for EXTRACTION.",
          sep=" ",
          file=sys.stderr)

#Get sample index in geno and ind files.
Index = {}
Sex = {}
Pop = {}
for i in Samples:
    for line in sh.grep(sh.cat("-n", args.indFn),
                        "{}".format(i),
                        _ok_code=[0, 1]):
        fields = line.strip().split()
        if fields[1] == i:
            Index[fields[1]] = (int(fields[0]) - 1)
            Sex[fields[1]] = fields[2]
            Pop[fields[1]] = fields[3]

print("Indexed", len(Index), "individuals.", sep=" ", file=sys.stderr)

IndOutFile = open(args.Output + ".ind", "w")
GenoOutFile = open(args.Output + ".geno", "w")
SnpOutFile = open(args.Output + ".snp", "w")

#Extract function
if args.Extract == True:
示例#37
0
print('-' * 50)

from sh import ls, glob
print(ls('-ld', glob('/etc/pr*')))
print('-' * 50)

w = sh.who()
print(w)
print('-' * 50)

disk_usage = sh.df('-h')
print(disk_usage)
print('-' * 50)

from sh import uname
print(uname())
print(uname('-a'))
print(uname(a=True))
print('-' * 50)

from sh import grep, wc

# grep 'sh' /etc/passwd | wc -l
print(grep('sh', '/etc/passwd'))
print(wc(grep('sh', '/etc/passwd'), l=True))
print('-' * 50)

from sh import tr
fruits = 'apple banana mango orange'.split()
print(tr("[:lower:]", "[:upper:]", _in=fruits))
示例#38
0
 def searchUser(self):
     #由于可能会遇到新入职的老师还没有开通邮箱,此时传入的字段为空,则应该跳过
     if self.username != 'None' and self.username != '' and self.username is not None:
         #添加部门属性ldif文件
         add_DEPART_LDAP_TPL = DEPART_LDAP_TPL.format_map(
             {'depart_name': self.departName})
         with open('/tmp/temp_depart.ldif', 'w',
                   encoding='utf-8') as file_handler:
             file_handler.write(add_DEPART_LDAP_TPL)
         try:
             departadd_res = sh.ldapadd('-x', '-D',
                                        'cn=root,dc=limikeji,dc=com', '-w',
                                        'limikeji', '-f',
                                        '/tmp/temp_depart.ldif')
             print(departadd_res)
         except sh.ErrorReturnCode_68:
             # 当部门已近存在的时候会触发一个错误的退出,退出的code为68,不过即使重复添加也没有什么问题,因此这里直接pass掉不做任何处理
             pass
         #查询过滤LDAP
         try:
             ldap_res = sh.grep(
                 sh.ldapsearch('-x', '-D', 'cn=root,dc=limikeji,dc=com',
                               '-w', 'limikeji', 'uid=%s' % self.username),
                 'mail').stdout.decode('utf-8')
         except sh.ErrorReturnCode_1:
             new_USER_LDAP_TPL = USER_LDAP_TPL.format_map({
                 'username':
                 self.username,
                 'userid':
                 str(self.userid).lstrip('0').replace(
                     '-', '9'
                 ),  #因为取钉钉上的userid,但是ldap添加人员信息uid不能0开始,部分userid还有'-'这个符号
                 'useralias':
                 self.useralias,
                 'SSHA':
                 '{SSHA}'
             })
             with open(
                     '/alidata/scripts/ldap/log/users/new_user/new_user_%s'
                     % time.strftime('%Y-%m-%d', time.localtime()),
                     'a',
                     encoding='utf-8') as file_handler:
                 file_handler.write(
                     '新增人员姓名:{},userid:{},useralias:{},departname:{}'.
                     format(self.username, self.userid, self.useralias,
                            self.departName) + '\n')
             with open('/tmp/temp_user.ldif', 'w',
                       encoding='utf-8') as file_handler:
                 file_handler.write(new_USER_LDAP_TPL)
             try:
                 adduser_res = sh.ldapadd('-x', '-D',
                                          'cn=root,dc=limikeji,dc=com',
                                          '-w', 'limikeji', '-f',
                                          '/tmp/temp_user.ldif')
                 # print(adduser_res)
                 # 添加以及更新人员部门属性信息
                 new_ATTR_MODIFY = ATTR_MODIFY.format_map({
                     'depart_name':
                     self.departName,
                     'username':
                     self.username
                 })
                 add_ATTR_MODIFY_ALL = ATTR_MODIFY_ALL.format_map(
                     {'username': self.username})
                 with open('/tmp/temp_user_attr_modify.ldif',
                           'w',
                           encoding='utf-8') as file_handler:
                     file_handler.write(new_ATTR_MODIFY)
                 with open('/tmp/temp_user_Azu.ldif', 'w',
                           encoding='utf-8') as file:  # 添加新增人员到一个大组
                     file.write(add_ATTR_MODIFY_ALL)
                 try:
                     addzu_res = sh.ldapadd(
                         '-x', '-D', 'cn=root,dc=limikeji,dc=com', '-w',
                         'limikeji', '-f',
                         '/tmp/temp_user_attr_modify.ldif')
                     # print(addzu_res)
                 except sh.ErrorReturnCode_20 or sh.ErrorReturnCode_20 as error:
                     # ldap_modify: Type or value exists (20)
                     print(
                         '\033[0;36;40ml add %s faild Invalid syntax (21)\033[0m'
                         % self.username)
                     print(
                         '\033[0;36;40mldap_modify: Type or value exists (20)\033[0m'
                     )
                 try:
                     azu_res = sh.ldapadd('-x', '-D',
                                          'cn=root,dc=limikeji,dc=com',
                                          '-w', 'limikeji', '-f',
                                          '/tmp/temp_user_Azu.ldif')
                     # print(azu_res)
                 except sh.ErrorReturnCode_20 as error:
                     # ldap_modify: Type or value exists (20)
                     print(
                         '\033[0;36;40mldap_modify: Type or value exists (20),添加到A组失败 \033[0m'
                     )
             except sh.ErrorReturnCode_21 as error:
                 # ldap_add: Invalid syntax (21)
                 print(str(error))
     else:
         pass
         with open('/alidata/scripts/ldap/log/users/new_user/new_user_%s' %
                   time.strftime('%Y-%m-%d', time.localtime()),
                   'a',
                   encoding='utf-8') as file_handler:
             file_handler.write(
                 '新增失败人员姓名:{},userid:{},useralias:{},departname:{}'.format(
                     self.username, self.userid, self.useralias,
                     self.departName) + '\n')
示例#39
0
    def delete_Depart_User(self):
        try:
            ldap_res = sh.cut(
                sh.cut(
                    sh.cut(
                        sh.grep(
                            sh.ldapsearch('-x', '-D',
                                          'cn=root,dc=limikeji,dc=com', '-w',
                                          'limikeji',
                                          'cn=%s' % self.departName), 'uid'),
                        '-d', ' ', '-f2'), '-d', ',', '-f1'), '-d', '=',
                '-f2').stdout.decode('utf-8')
            ldap_resList = str(ldap_res).split('\n')[:-1]  #部门所有的人员姓名
            if 'test' in ldap_resList:
                ldap_resList.remove('test')
                # print('删除组%s'%self.departName)
                # 再删除部门中的用户,如果只是删除用户的话,那么部门中仍然可以看到用户,只是用户的dn是不可用的
                for user in ldap_resList:
                    try:
                        ldap_res = sh.grep(
                            sh.ldapsearch('-x', '-D',
                                          'cn=root,dc=limikeji,dc=com', '-w',
                                          'limikeji', 'uid=%s' % user),
                            'mail').stdout.decode('utf-8')
                        # print(ldap_res)
                    except sh.ErrorReturnCode_1:
                        with open('/tmp/temp_user_delete_attr.ldif',
                                  'w',
                                  encoding='utf-8') as file_handler:
                            file_handler.write(
                                ATTR_DELETE_MODIFY.format_map({
                                    'depart_name':
                                    self.departName,
                                    'username':
                                    user
                                }))
                        sh.ldapadd('-x', '-D', 'cn=root,dc=limikeji,dc=com',
                                   '-w', 'limikeji', '-f',
                                   '/tmp/temp_user_delete_attr.ldif')
            else:
                # print('删除组%s' % self.departName)
                # 再删除部门中的用户,如果只是删除用户的话,那么部门中仍然可以看到用户,只是用户的dn是不可用的,显示结果为一个红叉
                for user in ldap_resList:
                    try:
                        ldap_res = sh.grep(
                            sh.ldapsearch('-x', '-D',
                                          'cn=root,dc=limikeji,dc=com', '-w',
                                          'limikeji', 'uid=%s' % user),
                            'mail').stdout.decode('utf-8')
                        # print(ldap_res)
                    except sh.ErrorReturnCode_1:
                        with open('/tmp/temp_user_delete_attr.ldif',
                                  'w',
                                  encoding='utf-8') as file_handler:
                            file_handler.write(
                                ATTR_DELETE_MODIFY.format_map({
                                    'depart_name':
                                    self.departName,
                                    'username':
                                    user
                                }))
                        sh.ldapadd('-x', '-D', 'cn=root,dc=limikeji,dc=com',
                                   '-w', 'limikeji', '-f',
                                   '/tmp/temp_user_delete_attr.ldif')

        except sh.ErrorReturnCode_1:
            pass
        time.sleep(1)

## Step 4
## Configure radvd to use that prefix
print('Creating radvd.conf')
with open(HOMEDIR + '/radvd.conf.in') as fin:
    conf = fin.read()
    confout = conf.replace('%PREFIX%', prefix)
    confout = confout.replace('%INTERFACE%', BORDERROUTER_INTERFACE)
    with open(HOMEDIR + '/radvd.conf', 'w') as fout:
        fout.write(confout)

## Step 5
## Run the BorderRouter
try:
    out = grep(ps('-A'), 'BorderRouterC')
    print('BorderRouter already running.')
    print('Killing BorderRouter...')
    sudo.pkill('-9', 'BorderRouterC')
except Exception:
    pass
print('Starting Border Router.')
subprocess.call([
    "screen", "-d", "-m", "-S", "border-router", "sudo",
    HOMEDIR + "/BorderRouterC", '-i', BORDERROUTER_INTERFACE
])

time.sleep(5)

## Step 6
## Run radvd
示例#41
0
import sh
print(sh.wpa_cli("-i", "wlan0", "list_networks"))

search1 = "SmashCam01"
networkNum = sh.cut(sh.grep(sh.wpa_cli("-i", "wlan0", "list_networks"), search1), "-f", "1")
sh.wpa_cli("-i", "wlan0", "select_network", networkNum)
print(sh.wpa_cli("-i", "wlan0", "list_networks"))
    


示例#42
0
def getErrorLogs(project):
	"""
		Récupération des logs d"erreurs
		===============================

		Author: Fabien Bellanger

		:param project: Nom du projet
		:type project: string

		:return: Une chaîne de caractères contenant les erreurs
		:rtype:  string
	"""

	filePath    = getFileName(project)
	oldFilePath = filePath + ".1"
	error       = {}
	filesToRead = []

	# Les fichiers de log doivent exister et ne doivent pas être vide
	# ---------------------------------------------------------------
	if path.isfile(filePath) and path.getsize(filePath) != 0:
		filesToRead.append(filePath)
	if path.isfile(oldFilePath) and path.getsize(oldFilePath) != 0:
		filesToRead.append(oldFilePath)
	
	if len(filesToRead) > 0:
		
		buffer       = StringIO()
		linesArray   = []
		lineArray    = {}
		linesNumber  = 0

		for fileToRead in filesToRead:
			try:
				for line in sh.grep(config.GREP_PATTERN, fileToRead):
					# On parse la chaîne pour extraire la date, l'heure et le message
					# ---------------------------------------------------------------
					matchObject = re.match(config.GREP_REGEX, line, re.M|re.I)

					if matchObject:
						date     = matchObject.group(1)
						time     = matchObject.group(2)
						client   = matchObject.group(4) if (matchObject.group(4)) else ""
						server   = matchObject.group(5) if (matchObject.group(5)) else ""
						request  = matchObject.group(6) if (matchObject.group(6)) else ""
						upstream = matchObject.group(7) if (matchObject.group(7)) else ""
						host     = matchObject.group(8) if (matchObject.group(8)) else ""

						if matchObject.group(3):
							message = matchObject.group(3)
						elif matchObject.group(9):
							message = matchObject.group(9)

						# Un message est-il déjà dans le tableau ?
						currentIndex     = 0
						linesArrayNumber = len(linesArray)

						while not (currentIndex == linesArrayNumber or linesArray[currentIndex]["message"] == message):
							currentIndex += 1
						
						if currentIndex == linesArrayNumber:
							# Pas de message trouvé, on ajoute au tableau
							lineArray = {
								"date":         date,
								"timeStart":    time,
								"message":      message,
								"timeEnd":      time,
								"number":       1,
							}
							linesArray.append(lineArray)
						else:
							# On modifie l'heure de fin et le compteur
							linesArray[currentIndex]["timeEnd"] = time
							linesArray[currentIndex]["number"]  += 1

						# Nombre de lignes total
						linesNumber += 1

			except sh.ErrorReturnCode_1:
				print(Fore.RED + "[Erreur]",
					  Fore.GREEN + "{:<30s}" . format("[" + project + "]"),
					  Style.RESET_ALL + " : Fichier non trouvé ou vide ou bien pas de résultat")
			except sh.ErrorReturnCode_2:
				print(Fore.RED + "[Erreur]",
					  Fore.GREEN + "{:<30s}" . format("[" + project + "]"),
					  Style.RESET_ALL + " : Fichier non trouvé ou vide ou bien pas de résultat")
		
		# Ajout à la chaîne de caractères d'erreurs globale
		# -------------------------------------------------
		error = displayProject(project, linesNumber, linesArray)
	else:
		print(Fore.RED + "[Erreur]",
			  Fore.GREEN + "{:<30s}" . format("[" + project + "]"),
			  Style.RESET_ALL + " : Fichiers non trouvés ou vides")
		
	return error
示例#43
0
    def register_error_in_database(self, session: Session):
        """
        This methode create database object associated to the statification with the result of the log
        that scrapy has generated.
        :param session
        :raise NoResultFound if there is no statification with empty commit sha
        """

        # finalization of the statification by removing unwanted files and directories and empty directories
        self.delete_files()
        self.delete_directories()
        self.delete_empty_directories()

        # get the statification with empty commit
        statification = Statification.get_statification(session, '')

        # open the log file that contain scrapy errors
        f_file = open(self.s_log_file)

        expecting_other_line_for_error_message = False
        s_error_message = ''

        # for each line will look for information that will be used to fill object of the database
        for line in f_file:

            # check if the line contain a warning or a info
            if re.match('(.*)WARNING(.*)', line) or re.match(
                    '(.*)INFO(.*)', line) or re.match('(.*) ERROR:(.*)', line):
                expecting_other_line_for_error_message = False

            if expecting_other_line_for_error_message:
                s_error_message += line

            if (not expecting_other_line_for_error_message
                ) and s_error_message != '':
                statification.add_object_to_statification(
                    ScrapyError, session, s_error_message)
                s_error_message = ''

            # in the case the line match an External link
            if re.match('(.*) INFO: External link detected(.*)', line):
                # we get the second part of the line there are also [] in the first part
                s_trunked_line = line[line.index('INFO: External link detected'
                                                 ):len(line)]

                # we get the position of begining of the URL
                i_start_url = s_trunked_line.index('[')
                # we ge the position of the end of the URL
                i_end_url = s_trunked_line.index(']')
                # we get the position of the begining of the source url
                i_start_source = s_trunked_line.index(' in ') + 4

                try:
                    # we create and add a new ExtenalLink to our statification
                    statification.add_object_to_statification(
                        ExternalLink, session,
                        s_trunked_line[i_start_source:len(s_trunked_line)],
                        s_trunked_line[i_start_url + 1:i_end_url])
                except ValueError as e:
                    self.logger.info(e)
            # in the case the line match a Scrapy Error
            elif re.match('(.*) ERROR:(.*)', line):
                expecting_other_line_for_error_message = True
                # retrieve the Scrapy Error
                s_trunked_line = line[line.index('ERROR: ') + 7:len(line)]
                s_error_message += s_trunked_line

            # in the case the line match an error for type MIME
            elif re.match('(.*) WARNING: Forbidden content (.*)', line):

                # we get the second part of the line where begin the information that interest us
                s_trunked_line = line[line.index('WARNING: Forbidden content '
                                                 ):len(line)]

                # get the starting position of the Error type MIME
                i_start_error_mime = s_trunked_line.index('[')
                # get the end position of the error type MIME
                i_end_error_mime = s_trunked_line.index(']')
                # get the error type MIME
                s_error_mime = s_trunked_line[i_start_error_mime +
                                              1:i_end_error_mime]
                # get the source of the error
                s_source_link = s_trunked_line[s_trunked_line.
                                               index('detected in') +
                                               12:len(s_trunked_line)]

                try:
                    # create an ErrorTypeMIME associated to the statification
                    statification.add_object_to_statification(
                        ErrorTypeMIME, session, s_error_mime, s_source_link)
                except ValueError as e:
                    self.logger.info(e)
            # in the case the line match an HTTP error
            elif re.match('(.*) WARNING: HTTP error (.*)', line):

                # we get the second part of the line where begin the information that interest us
                s_trunked_line = line[line.index('WARNING: HTTP error '
                                                 ):len(line)]

                # we get the starting position of the Error Code
                i_start_error_code = s_trunked_line.index('[')
                # we get the end position of the Error Code
                i_end_error_code = s_trunked_line.index(']')
                # we get the start position of the url source of the error
                i_start_url = s_trunked_line.index(' for ')
                # we get the end position of the url source of the error
                i_end_url = s_trunked_line.index(' from ')

                # we retrieve the error code
                s_error_code = s_trunked_line[i_start_error_code +
                                              1:i_end_error_code]

                # we retrieve the url that cause the error
                s_url = s_trunked_line[i_start_url + 5:i_end_url]

                # we retrieve the url of the source where was found the url that caused the error
                s_url_source = s_trunked_line[i_end_url +
                                              6:len(s_trunked_line) - 1]

                try:
                    # we create a new HtmlError associated to the statification
                    statification.add_object_to_statification(
                        HtmlError, session, s_error_code, s_url, s_url_source)
                except ValueError as e:
                    self.logger.info(e)
            elif re.match('(.*)response_received_count(.*)', line):

                # we get the second part of the line where begin the information that interest us
                s_value_item_scraped_count = line[line.index(': ') +
                                                  2:line.index(',')]

                try:
                    # set the number of crawled item into the statification object
                    statification.upd_nb_item(session, statification.commit,
                                              int(s_value_item_scraped_count))
                except ValueError as e:
                    self.logger.info(e)
        try:
            # retrieve the list of type file with number of file for each type
            s_result_type_files = sh.uniq(
                sh.sort(
                    sh.grep(
                        sh.find(sh.glob(self.s_repository_path + '/*'),
                                '-type', 'f'), '-o', '-E', '\.[a-zA-Z0-9]+$')),
                '-c')
            # the result is a string so we need to get a table,
            # here we get a table made of each line returned, we remove all space
            a_table_result_type_files = s_result_type_files.replace(
                ' ', '').split('\n')

            # browse the line of result
            for row in a_table_result_type_files:
                if row:
                    # a line is composed of a number followed by a type like "42.png",
                    # we separate the number and the type
                    s_type_file = row.split('.')

                    try:
                        # create a new ScannedFile associated to the statificaiton
                        statification.add_object_to_statification(
                            ScannedFile, session, s_type_file[1],
                            int(s_type_file[0]))
                    except ValueError as e:
                        self.logger.info(e)
        except sh.ErrorReturnCode_1:
            self.logger.info('There is no folder in the static repository')
        finally:
            # in all case we need to close the file
            f_file.close()

        # change the status of the statification (NEED TO BE DONE AT THE END !!)
        statification.upd_status(session, '', Status.STATIFIED)
示例#44
0
from sh import uname, sed, dpkg, awk, grep

# Get our current kernel version
# $(uname -r | sed -r 's/-[a-z]+//')

kernel_version = sed(uname('-r'), '-r', 's/-[a-z]+//').strip()

print('kernel version:', kernel_version)

# get & filter package list
packages = \
    grep(
        awk(
            dpkg('-l', 'linux-image-[0-9]*', 'linux-headers-[0-9]*', _env={'COLUMNS': '200'}),
            '/ii/{print $2}'
        ),
        '-ve',
        kernel_version
    )

print(packages.strip())
示例#45
0
    def _on_poll(self, param):

        action_result = self.add_action_result(phantom.ActionResult(dict(param)))

        ret_val, resp_json = self._make_slack_rest_call(action_result, consts.SLACK_AUTH_TEST, {})

        if not ret_val:
            return ret_val

        bot_id = resp_json.get('user_id')

        if not bot_id:
            return action_result.set_status(phantom.APP_ERROR, "Could not get bot ID from Slack")

        pid = self._state.get('pid', '')

        if pid:

            try:
                if 'slack_bot.pyc' in sh.ps('ww', pid):  # pylint: disable=E1101
                    self.save_progress("Detected SlackBot running with pid {0}".format(pid))
                    return action_result.set_status(phantom.APP_SUCCESS, "SlackBot already running")
            except:
                pass

        config = self.get_config()
        bot_token = config.get('bot_token', '')
        ph_auth_token = config.get('ph_auth_token', None)

        if not ph_auth_token:
            return action_result.set_status(phantom.APP_ERROR, "The ph_auth_token asset configuration parameter is required to run the on_poll action.")

        app_version = self.get_app_json().get('app_version', '')

        try:
            ps_out = sh.grep(sh.ps('ww', 'aux'), 'slack_bot.pyc')  # pylint: disable=E1101

            if app_version not in ps_out:

                old_pid = shlex.split(str(ps_out))[1]

                self.save_progress("Found an old version of slackbot running with pid {}, going to kill it".format(old_pid))

                sh.kill(old_pid)  # pylint: disable=E1101

        except:
            pass

        try:
            if bot_token in sh.grep(sh.ps('ww', 'aux'), 'slack_bot.pyc'):  # pylint: disable=E1101
                return action_result.set_status(phantom.APP_ERROR, "Detected an instance of SlackBot running with the same bot token. Not going to start new instance.")

        except:
            pass

        self.save_progress("Starting SlackBot")

        ret_val, base_url = self._get_phantom_base_url_slack(action_result)

        if phantom.is_fail(ret_val):
            return ret_val

        slack_bot_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'slack_bot.pyc')

        base_url += '/' if not base_url.endswith('/') else ''

        proc = subprocess.Popen(['phenv', 'python2.7', slack_bot_filename, bot_token, bot_id, base_url, app_version, ph_auth_token])

        self._state['pid'] = proc.pid

        self.save_progress("Started SlackBot with pid: {0}".format(proc.pid))

        return action_result.set_status(phantom.APP_SUCCESS, "SlackBot started")
示例#46
0
def svnurl_for_path(path):
    cd(path)
    url = grep(svn.info(), "URL: ")
    url = url.replace("URL: ", "").replace("\n", "")
    return url
示例#47
0
#!/usr/bin/python
# -*- coding: utf-8 -*-
import csv
import sys
from sh import  grep,cat,awk

log_file = sys.argv[1]
file_name = sys.argv[2]
total_topology = ['3_clique','4_circle','4_clique','7_circle','7_clique','7_bridge','7_star']
txn_num = [5000,10000,15000,20000]

total_num = []
oret = awk(grep(grep(grep(cat('%s'%log_file),5000),'-v','configure'),'-v',15000),'{print $7}').split()
i = 0
for tps in oret:
    tps_num = tps.replace('/s','')
    total_num.append([5000,tps_num,total_topology[i]])
    i+=1

for tx_nm in txn_num[1:]:
    oret = awk(grep(grep(cat('%s'%log_file),tx_nm),'-v','configure'),'{print $7}').split()
    i = 0
    for tps in oret:
        tps_num = tps.replace('/s', '')
        total_num.append([tx_nm, tps_num, total_topology[i]])
        i += 1

with open(file_name,"w") as csvfile:
      writer = csv.writer(csvfile)
      writer.writerow(['num_txn','TPS','cluster_size'])
      writer.writerows(sorted(total_num,key=lambda stu:stu[2]))
示例#48
0
def parse(filename, **kwargs):
    # cat outfile | grep ip | cut -d '|' -f 2 | cut -d ' ' -f 3 | cut -d '.' -f 4 | sort -n | wc -l
    return sh.sort(cut(cut(cut(grep(cat(filename), "ip"), d="|", f=2), d=" ", f=3), d=".", f=4), "-n", _out=kwargs.get("_out"))
示例#49
0
文件: grepopen.py 项目: UPBGE/tools
from sh import grep, pwd, kate
import sys

print(pwd())
found = grep("-Frne", sys.argv[-1])

files = {occurence.split(":")[0] for occurence in found}

for file in files:
    print("\t", file)

askopenall = input("Open all files ? (y/n)")
if askopenall == "y":
    for file in files:
        p = kate(file, _bg=True)
        p.wait()

for file in files:
    askopen = input("Open file %s ? (y/n)" % file) or "y"
    if askopen == "y":
        print("open")
示例#50
0
文件: setup.py 项目: tmoon/python-sgx
try:
    import sh
except ImportError:
    import pip
    pip.main(["install", "sh>=1.12"])
    import sh

GRAPHENE_DIR = os.path.abspath("./graphene/")

# Copy config file
sh.cp("config/config.py", "sgx/config.py")

# Check if swig >= 3.0.10 is installed
try:
    out = sh.grep(sh.dpkg("-s", "swig", "swig3.0"), '^Version:')
    version = out.split(" ")[-1].split("-")[0]
except sh.ErrorReturnCode_1:
    version = None
if not version:
    exit("Error: Couldn't find swig")
if LooseVersion(version) < LooseVersion("3.0.10"):
    exit(
        "Error: swig version is lower than 3.0.10. Install swig 3.0.10 or higher."
    )

# Check if apport is installed (it produces error messages when executing Python with Graphene)
try:
    sh.dpkg("-s", "python3-apport")
    exit("Error: python3-apport is installed. Please uninstall it.")
except sh.ErrorReturnCode_1:
示例#51
0
inheritable_methods = get_inheritable_methods(superclass_headers)
superclass_methods = get_superclass_methods(superclass_headers)

nrOvMethods = 0
for method in methods:
    if (method in inheritable_methods) or (method in superclass_methods):
        nrOvMethods = nrOvMethods + 1

effCoupling = 0
with open(file_path, 'r') as f:
    s = f.read()
    matches = re.findall('#import \".*\.h\"', s)
    effCoupling = len(matches) - 1

import_regexp = str("#import " + '"' + header_file_name + '"')
affCoupling = len(sh.grep("-r", import_regexp, ".").splitlines()) - 1

rfc = 0
with open(file_path, 'r') as f:
    s = f.read()
    matches = re.findall('\[', s)
    rfc = len(matches) - 1

nsb = get_nesting_level(file_path)

lcom = get_lcom(file_path, header_file_path)

sub = get_subclasses(file_name[:-2])

print(
    str(loc - nrComments) + "\t" + str(nrMethods) + "\t" + str(nrOvMethods) +
示例#52
0
        # Remove the duplicate case
        if i > 1 and char == input_string[i - 1]:
            continue
        res.append(char)
        get_jumble(input_string[:i] + input_string[i + 1:], dict_set, ret, res)
        res.pop()


if __name__ == '__main__':
    file_list = glob.glob('./dict/*')
    dict_set = get_dict(file_list)
    ret = []
    input_string = raw_input("Enter a jumble:").lower()

    # Sort the string to avoid duplicate
    input_string = sorted(input_string)
    get_jumble(input_string, dict_set, ret, [])
    print ret

    # The work below is irrelevant to the answer.
    print '-' * 20
    print 'This dictionary has so many single character that making me doubt my solution'
    print "So here's the proof:"

    import sh  # You may need to sudo pip install sh
    import os

    cur_path = os.getcwd()
    for result in ret:
        print sh.grep('^%s$' % result, os.getcwd(), '-r')
示例#53
0
#_*_ coding: utf-8 _*_
__version__ = '0.1'
import sh

check = sh.ifconfig
check = sh.grep("inet")
print check
示例#54
0
def get_list_of_running_process():
    process_name = sys.argv[1]
    list_of_process = sh.grep(sh.ps('-aux', _piped=True), '-v', 'python')
    return list_of_process
示例#55
0
import sh as bada

x = bada.curl("http://ndtv.com", "-v")
print x

print("the number of files in my currenyt directory  is : \n")

#print ( bada.wc(bada.find("/home/amardeep/"),  "-l"))

print(bada.grep(bada.dmidecode(), "AMD"))

print("printing my current processor and memory information: \n")

print(bada.tail(bada.head("-n", 130, "dmi.txt"), "-n", 20))

print "getting current directory \n", bada.pwd()
示例#56
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import sh,os,sys

try:
    sh.grep(sh.grep(sh.ps('ux'),'sslocal'),v='grep')
except Exception, e:
    os.system("sslocal -c $HOME/.outwall/sslocal_config.json &")
finally:

    command_string=' '.join(sys.argv[1:])
    print command_string
    os.system("$HOME/.outwall/proxychains-4.8.1/proxychains4  -f $HOME/.outwall/proxychains.conf "+command_string )




示例#57
0
        usage()
    i = None
    k = None
    o = None
    for opt, arg in opts:
        if opt in ("-h", "--help"):
            usage()
        elif opt in ("-i", "--input"):
            i = str(arg)
        elif opt in ("-k", "--keyword"):
            k = str(arg)
        elif opt in ("-o", "--output"):
            o = str(arg)
    if not any(var is None for var in [i, k, o]):
        return i, k, o
    print("The parameters are not yet specified!")
    usage()


def var_to_file(var_to_write, file_to_write):
    file = open(file_to_write, 'w')
    file.write(var_to_write)
    file.close()


###################################################
inputFile, inputKeyWord, outputFile = main()
output = str(sh.head("-n 1", inputFile)) + str(sh.grep(inputKeyWord,
                                                       inputFile))
var_to_file(output, outputFile)
示例#58
0
from sh import nova
from sh import grep
from pprint import pprint

print 70 * "-"
a = nova("list")
print a
print 70 * "-"
b = grep(a, "|")
print b
print 70 * "-"
c = b.split("\n")
print c
print 70 * "-"

lines = grep(nova("list"), "|").split("\n")

pprint(lines)
print " I am there"

lines = lines[1:-1]

pprint(lines)

print " I am here"
vms = {}

print 70 * "="
pprint(lines)
print 70 * "="
示例#59
0
#!/usr/bin/env python
from sh import grep, netstat

print(grep(netstat('-an'), 'LISTEN'))  # <1>
示例#60
0
# boucle sur les fichiers récupérés
for filename in os.listdir(temp_dir):
    if filename.endswith(".mp3"):
        cnt_f = cnt_f + 1
        # reencode pour prendre moins de place
        subprocess.call([
            'lame', '--mp3input', '-b', '32', temp_dir + "/" + filename,
            temp_dir + "/B_" + filename
        ],
                        shell=False)
        os.rename(temp_dir + "/B_" + filename, temp_dir + "/" + filename)
        # construction des items à ajouter dans le feed.xml
        titre = sh.cut(
            sh.grep(
                sh.ffprobe("-v", "error", "-show_format",
                           temp_dir + "/" + filename), "title"), "-d", "=",
            "-f2")
        titre = str(titre)
        titre = titre.replace('\n', '')
        fichier = open(temp_dir + "/" + "items.xml", "a")
        fichier.write("<item>\n")
        fichier.write("<title><![CDATA[" + titre + "]]></title>\n")
        fichier.write(
            "<link>https://raw.githubusercontent.com/tcaenen/tt_pod/master/media/"
            + filename + "</link>\n")
        fichier.write(
            '<enclosure url="https://raw.githubusercontent.com/tcaenen/tt_pod/master/media/'
            + filename +
            '" length="30066338" type="audio/mpeg"></enclosure>\n')
        fichier.write(