def _is_dir_in_repo(path): '''Does the directory path exist in the subversion repository? :param path: relative (to ASKAP_ROOT) path to directory. ''' comm = 'svn info %s | grep ^URL:' % ASKAP_ROOT (stdout, stderr, returncode) = runcmd(comm, shell=True) if returncode: raise BuildError(stderr) url = stdout.split(': ')[1].strip() # svn 1.5 bug, returncode is 0 on failure, so grep for known label. comm = 'svn info %s/%s 2>&1 | grep "Node Kind:"' % (url, path) (stdout, stderr, returncode) = runcmd(comm, shell=True) if not returncode: # i.e. grep found "Node Kind:" so is in repository. return True # check if there is an svn:external on parent dir. parentpath = os.path.split(path)[0] comm = "svn propget svn:externals %s/%s" % (url, parentpath) (stdout, stderr, returncode) = runcmd(comm, shell=True) if stdout: # should be local directory name and remote repository path return True # Nothing found so must be local print 'warn: %s is a local (uncommitted) directory' % path return False
def number_of_cpus(): '''Return the number of CPUs the current machine has.''' try: if sys.platform.startswith("linux"): output = runcmd("cat /proc/cpuinfo")[0] ncpu = 0 for line in output.split('\n'): if line.startswith('processor'): ncpu += 1 elif (sys.platform.startswith("darwin") or sys.platform.startswith("freebsd")): ncpu = runcmd("sysctl -n hw.ncpu")[0] ncpu = int(ncpu) else: q_print( "warn: Do not know how to find number of CPUs for %s platform" % sys.platform) ncpu = 1 except: q_print("warn: Exception in finding number of CPUs, setting to 1") ncpu = 1 if type(ncpu) != int: # Paranoia - want to guarantee an int return. q_print("warn: Problem in finding number of CPUs, setting to 1") ncpu = 1 return ncpu
def __k_wrex__(args): fsname = args[0] ost_id = args[1] pb_id = args[2] ost_file = args[3] t_start = time.time() #if not os.path.isfile(ost_file): # return (time.time(), fsname, ost_id, pb_id, 0, -1) try: cmd = ["dd"] opts = { "if": "/dev/zero", "of": ost_file, "bs": "4k", "oflag": "direct", "count": "1" } args = None ret = None (output, errput, ret) = runcmd(cmd, opts, args) if ret == 0: duration = errput.decode('utf8').split()[-4] return (t_start, fsname, ost_id, pb_id, duration, 0) else: t_end = time.time() return (t_start, fsname, ost_id, pb_id, t_end - t_start, -2) # dd if=/dev/zero of=./test bs=4k oflag=direct count=1 2>&1 | tail -1 | awk {'print $6'} except Exception as e: t_end = time.time() return (t_start, fsname, ost_id, pb_id, t_end - t_start, str(e))
def get_svn_branch_info(): '''Return the branch of the repository we are using. e.g. ['trunk'], ['releases', '0.3'], ['features', 'TOS', 'JC'] etc ''' ASKAP_ROOT = os.environ['ASKAP_ROOT'] # In future layout will find Code in src subdirectory. SRC_DIR = os.sep.join((ASKAP_ROOT, 'src')) if os.path.isdir(SRC_DIR): CODE_DIR = os.sep.join((SRC_DIR, 'Code')) else: # current (old) layout CODE_DIR = os.sep.join((ASKAP_ROOT, 'Code')) bi = ['Unknown'] # Define here to handle svn runtime failures or # svn output format changes. try: for line in runcmd('svn info %s' % CODE_DIR)[0].split('\n'): if line.startswith('URL:'): repo = line.split(os.sep)[3] # handle differences in layout of SDP and TOS. if repo == "askapsoft": topdir = 'Src' elif repo == "askapsdp": topdir = 'askapsdp' else: # just spit on the toplevel directory and down. # https://foo.com/topdir groups = line.split(os.sep) topdir = os.sep.join(groups[:4]) bi = line.split(topdir)[1].split(os.sep)[1:-1] break except: pass # bi already defined. return bi
def query(self, jobidL): """ Determine the state of the given job ids. Returns (cmd, out, err, stateD) where stateD is dictionary mapping the job ids to a string equal to 'pending', 'running', or '' (empty) and empty means either the job was not listed or it was listed but not pending or running. The err value contains an error message if an error occurred when getting the states. """ cmdL = ['showq'] cmd = ' '.join(cmdL) x, out = runcmd(cmdL) stateD = {} for jid in jobidL: stateD[jid] = '' # default to done err = '' for line in out.strip().split(os.linesep): try: L = line.strip().split() if len(L) >= 4: jid = L[0] st = L[2] if jid in stateD: if st in ['Running']: st = 'running' elif st in ['Deferred', 'Idle']: st = 'pending' else: st = '' stateD[jid] = st except Exception: e = sys.exc_info()[1] err = "failed to parse squeue output: " + str(e) return cmd, out, err, stateD
def syncdir(src_path, tgt_path, syncowner=False, syncgroup=False, syncperms=False, synctimes=False): """ lustre stripe aware directory sync syncs the directory inode only, does not recurse :param src_path FSItem: :param tgt_path FSItem: :param syncowner bool: sync file owner (default=False) :param syncgroup bool: sync file group (default=False) :param syncperms bool: sync file permissions (default=False) :param synctimes bool: sync file times (default=False) :return str: full path to tmpfile (even if keeptmp=False) """ cmd = [env['PYLUTRSYNCPATH']] opts = None # strip leaf name from tgtdir to ensure rsync does the right thing args = ['-X', '-A', '--super', '-d'] if synctimes: args.append('-t') if syncperms: args.append('-p') if syncowner: args.append('-o') if syncgroup: args.append('-g') args.append(src_path) args.append("{0}{1}".format(str(tgt_path.parent), os.sep)) return runcmd(cmd, opts, args)
def update_command(dirpath, recursive=False, extraopts=""): '''execute an svn up command :param dirpath: The path to update :param recursive: Do a recursive update :param extraopts: extra options to svn command ''' ropt = "-N" if recursive: ropt = "" comm = "svn up --non-interactive %s %s %s" % (ropt, extraopts, dirpath) cdir = os.path.abspath(os.curdir) os.chdir(ASKAP_ROOT) # Do update in root directory XXX Why? (stdout, stderr, returncode) = runcmd(comm, shell=True) if returncode: raise BuildError(stderr) if stdout.startswith("At "): got_updates = False else: got_updates = True q_print(stdout.rstrip()) os.chdir(cdir) return got_updates
def in_sync( src, tgt ): """ Return True if rsync finds no differences, False otherwise """ rv = False cmd = [ os.environ[ 'PYLUTRSYNCPATH' ] ] opts = { '--timeout': 30 } args = [ '-nirlHAtpog', '--specials' ] args.append( '{0}/'.format( src ) ) args.append( '{0}/'.format( tgt ) ) ( output, errput ) = runcmd( cmd, opts, args ) # the top level dirs will never match, this will always be the first line lines = output.splitlines() if lines[0] == '.d..t...... ./': lines.pop(0) output = lines # any output from rsync indicates incorrect sync behavior if len( errput ) < 1 and len( output ) < 1: rv = True else: print( 'RSYNC OUTPUT' ) pprint.pprint( output ) print( 'RSYNC ERRORS' ) pprint.pprint( errput ) return rv
def syncdir( src_path, tgt_path, syncowner=False, syncgroup=False, syncperms=False, synctimes=False ): """ lustre stripe aware directory sync syncs the directory inode only, does not recurse :param src_path FSItem: :param tgt_path FSItem: :param syncowner bool: sync file owner (default=False) :param syncgroup bool: sync file group (default=False) :param syncperms bool: sync file permissions (default=False) :param synctimes bool: sync file times (default=False) :return str: full path to tmpfile (even if keeptmp=False) """ cmd = [ env[ 'PYLUTRSYNCPATH' ] ] opts = None # strip leaf name from tgtdir to ensure rsync does the right thing args = [ '-X', '-A', '--super', '-d' ] if synctimes: args.append( '-t' ) if syncperms: args.append( '-p' ) if syncowner: args.append( '-o' ) if syncgroup: args.append( '-g' ) args.append( src_path ) args.append( "{0}{1}".format( str( tgt_path.parent ), os.sep ) ) return runcmd( cmd, opts, args )
def __k_crwr__(args): fsname = args[0] ost_id = args[1] pb_id = args[2] ost_file = args[3] if os.path.isfile(ost_file): os.remove(ost_file) t_start = time.time() try: if ost_id != -1: __setstripe__([ost_file, ost_id]) cmd = ["dd"] opts = { "if": "/dev/zero", "of": ost_file, "bs": "4k", "oflag": "direct", "count": "1" } args = None ret = None (output, errput, ret) = runcmd(cmd, opts, args) if ret == 0: duration = errput.decode('utf8').split()[-4] return (t_start, fsname, ost_id, pb_id, duration, 0) #TODO else: t_end = time.time() return (t_start, fsname, ost_id, pb_id, t_end - t_start, -2) except Exception as e: t_end = time.time() return (t_start, fsname, ost_id, pb_id, t_end - t_start, str(e))
def set_rand_stripeinfo(path): cnt = random.choice(config.FILE_STRIPE_COUNTS) sz = random.choice(config.FILE_STRIPE_SIZES) cmd = ['lfs', 'setstripe'] opts = None args = ['-c', cnt, '-S', sz, path] (out, err) = runcmd(cmd, opts, args) return (cnt, sz)
def set_rand_stripeinfo( path ): cnt = random.choice( config.FILE_STRIPE_COUNTS ) sz = random.choice( config.FILE_STRIPE_SIZES ) cmd = [ 'lfs', 'setstripe' ] opts = None args = [ '-c', cnt, '-S', sz, path ] ( out, err ) = runcmd( cmd, opts, args ) return ( cnt, sz )
def submit(self, fname, workdir, outfile, queue=None, account=None, confirm=False, **kwargs): """ Creates and executes a command to submit the given filename as a batch job to the resource manager. Returns (cmd, out, job id, error message) where 'cmd' is the submit command executed, 'out' is the output from running the command. The job id is None if an error occured, and error message is a string containing the error. If successful, job id is an integer. If 'confirm' is True, the job is submitted then the queue is queried until the job id shows up. If it does not show up in about 20 seconds, an error is returned. """ cmdL = ['msub'] if queue != None: cmdL.extend(['-q', queue]) if account != None: cmdL.extend(['-A', account]) cmdL.extend(['-o', outfile]) cmdL.extend(['-j', 'oe']) cmdL.extend(['-N', os.path.basename(fname)]) cmdL.append(fname) cmd = ' '.join(cmdL) x, out = runcmd(cmdL, workdir) # output should contain something like the following # 12345.ladmin1 or 12345.sdb jobid = None s = out.strip() if s: L = s.split() if len(L) == 1: jobid = s if jobid == None: return cmd, out, None, "batch submission failed or could not parse " + \ "output to obtain the job id" if confirm: time.sleep(1) ok = 0 for i in range(20): c, o, e, stateD = self.query([jobid]) if stateD.get(jobid, ''): ok = 1 break time.sleep(1) if not ok: return cmd, out, None, "could not confirm that the job entered " + \ "the queue after 20 seconds (job id " + str(jobid) + ")" return cmd, out, jobid, ""
def get_git_revision(): try: (stdout, stderr, returncode) = runcmd('git describe --tags --always', shell=True) if returncode == 0: return stdout.rstrip() else: return "unknown" except: return "unknown"
def get_svn_revision(): try: (stdout, stderr, returncode) = runcmd('svnversion', shell=True) if returncode == 0 and stdout and stdout[0].isdigit(): return stdout.rstrip() else: if is_git(): return get_git_revision() return "unknown" except: return "unknown"
def fid2path(fsname, fid): """ get all paths for a single fid """ cmd = [env['PYLUTLFSPATH'], 'fid2path'] opts = None args = [fsname, fid] retval = None (output, errput) = runcmd(cmd, opts, args) paths = output.split() return paths
def fid2path( fsname, fid ): """ get all paths for a single fid """ cmd = [ env[ 'PYLUTLFSPATH' ], 'fid2path' ] opts = None args = [ fsname, fid ] retval = None ( output, errput ) = runcmd( cmd, opts, args ) paths = output.split() return paths
def _touch(f, mtime=None): """ Interface to Linux 'touch' """ cmd = ['touch'] opts = None args = ['-h'] if mtime: args.extend(['-t', mtime.strftime('%Y%m%d%H%M.%S')]) args.append(str(f)) output, errput = runcmd(cmd, opts, args)
def _touch( f, mtime=None ): """ Interface to Linux 'touch' """ cmd = [ 'touch' ] opts = None args = [ '-h' ] if mtime: args.extend( [ '-t', mtime.strftime( '%Y%m%d%H%M.%S' ) ] ) args.append( str( f ) ) output, errput = runcmd( cmd, opts, args )
def is_vcs_check(testdir, probecmd): try: # Fast path ASKAP_ROOT = os.environ["ASKAP_ROOT"] if os.path.isdir(ASKAP_ROOT + '/' + testdir): return True # Fallback to slower check. ASKAP_ROOT might not be the repository root (stdout, stderr, returncode) = runcmd(probecmd, shell=False) return True if (returncode == 0) else False except: return False
def path2fid( path ): """ get fid for a single path return FID as string """ cmd = [ env[ 'PYLUTLFSPATH' ], 'path2fid' ] opts = None args = [ path ] retval = None ( output, errput ) = runcmd( cmd, opts, args ) retval = output.rstrip() return retval
def path2fid(path): """ get fid for a single path return FID as string """ cmd = [env['PYLUTLFSPATH'], 'path2fid'] opts = None args = [path] retval = None (output, errput) = runcmd(cmd, opts, args) retval = output.rstrip() return retval
def queues_are_empty(): """ """ cmd = [ os.environ[ 'PSYNCBASEDIR' ] + '/bin/rabbitmq_psync', 'lq' ] ( output, errput ) = runcmd( cmd ) all_lens = 0 for line in output.splitlines(): if line.startswith( 'Listing queues' ): continue parts = line.split() all_lens += sum( map( int, parts[1:] ) ) return all_lens == 0
def number_of_cpus(): '''Return the number of CPUs the current machine has.''' try: if sys.platform.startswith("linux"): output = runcmd("cat /proc/cpuinfo")[0] ncpu = 0 for line in output.split('\n'): if line.startswith('processor'): ncpu += 1 elif (sys.platform.startswith("darwin") or sys.platform.startswith("freebsd")): ncpu = runcmd("sysctl -n hw.ncpu")[0] ncpu = int(ncpu) else: q_print("warn: Do not know how to find number of CPUs for %s platform" % sys.platform) ncpu = 1 except: q_print("warn: Exception in finding number of CPUs, setting to 1") ncpu = 1 if type(ncpu) != int: # Paranoia - want to guarantee an int return. q_print("warn: Problem in finding number of CPUs, setting to 1") ncpu = 1 return ncpu
def _files_match( f1, f2, incoming_syncopts ): """ Files f1 and f2 match according to rsync Return if rsync finds no differences, False otherwise f1 and f2 must be instances of FSItem """ rv = True syncopts = dict( keeptmp = False, synctimes = False, syncperms = False, syncowner = False, syncgroup = False, pre_checksums = False, post_checksums = True ) syncopts.update( incoming_syncopts ) f1.update() f2.update() cmd = [ os.environ[ 'PYLUTRSYNCPATH' ] ] #opts = { '--timeout': 2 } opts = None args = [ '-nilHA', '--specials' ] if syncopts[ 'synctimes' ]: args.append( '-t' ) if syncopts[ 'syncperms' ]: args.append( '-p' ) if syncopts[ 'syncowner' ]: args.append( '-o' ) if syncopts[ 'syncgroup' ]: args.append( '-g' ) args.extend( [ f1.absname, f2.absname ] ) ( output, errput ) = runcmd( cmd, opts, args ) if len( errput ) > 0 and len( output ) > 0: rv = False pprint.pprint( output ) pprint.pprint( errput ) # check stripecount and size if f1.stripecount != f2.stripecount: rv = False print( 'stripecount mismatch' ) if f1.stripesize != f2.stripesize: rv = False print( 'stripesize mismatch' ) # verify checksums if f1.checksum() != f2.checksum(): rv = False print( 'checksum mismatch' ) return rv
def main(): ################################ #Searches for Active Devices ################################ # max number of devices to poll max_dev = 1000 c = pyhop.make_client() #result = c.search_devices_by_activity(-180000,0, "activity" ) result = c.get_active_devices(0,max_dev,-1800000, 0) for d in result.devices: if d.ipaddr4 != None: print d.ipaddr4 json_data = runcmd('aws ec2 describe-instances --filter \''+ '[{"Name":"private-ip-address","Values":["' + d.ipaddr4 + '"]}]\'') AWS_Instances_Metadata(c, d, json_data)
def get_svn_branch_info(): '''Return the branch of the repository we are using. e.g. ['trunk'], ['releases', '0.3'], ['features', 'TOS', 'JC'] etc ''' ASKAP_ROOT = os.environ['ASKAP_ROOT'] bi = ['Unknown'] # Define here to handle svn runtime failures or # svn output format changes. try: for line in runcmd('svn info %s' % ASKAP_ROOT)[0].split('\n'): if line.startswith('URL:'): bi = line.split(SRCDIR)[1].split(os.sep) break except: pass # bi already defined. return bi
def _files_match(f1, f2, incoming_syncopts): """ Files f1 and f2 match according to rsync Return if rsync finds no differences, False otherwise f1 and f2 must be instances of FSItem """ rv = True syncopts = dict(keeptmp=False, synctimes=False, syncperms=False, syncowner=False, syncgroup=False, pre_checksums=False, post_checksums=True) syncopts.update(incoming_syncopts) f1.update() f2.update() cmd = [os.environ['PYLUTRSYNCPATH']] #opts = { '--timeout': 2 } opts = None args = ['-nilHA', '--specials'] if syncopts['synctimes']: args.append('-t') if syncopts['syncperms']: args.append('-p') if syncopts['syncowner']: args.append('-o') if syncopts['syncgroup']: args.append('-g') args.extend([f1.absname, f2.absname]) (output, errput) = runcmd(cmd, opts, args) if len(errput) > 0 and len(output) > 0: rv = False pprint.pprint(output) pprint.pprint(errput) # check stripecount and size if f1.stripecount != f2.stripecount: rv = False print('stripecount mismatch') if f1.stripesize != f2.stripesize: rv = False print('stripesize mismatch') # verify checksums if f1.checksum() != f2.checksum(): rv = False print('checksum mismatch') return rv
def getstripeinfo(path): """ get lustre stripe information for path INPUT: path to file or dir OUTPUT: LustreStripeInfo instance NOTE: file type is NOT checked, ie: if called on a pipe i/o will block, if called on a socket or softlink an error will be thrown """ cmd = [env['PYLUTLFSPATH'], 'getstripe'] opts = None args = [path] # if os.path.isdir( path ): # args.insert( 0, '-d' ) (output, errput) = runcmd(cmd, opts, args) if True in ['has no stripe info' in x for x in (output, errput)]: sinfo = LustreStripeInfo() else: sinfo = LustreStripeInfo.from_lfs_getstripe(output.splitlines()) return sinfo
def getstripeinfo( path ): """ get lustre stripe information for path INPUT: path to file or dir OUTPUT: LustreStripeInfo instance NOTE: file type is NOT checked, ie: if called on a pipe i/o will block, if called on a socket or softlink an error will be thrown """ cmd = [ env[ 'PYLUTLFSPATH' ], 'getstripe' ] opts = None args = [ path ] # if os.path.isdir( path ): # args.insert( 0, '-d' ) ( output, errput ) = runcmd( cmd, opts, args ) if True in [ 'has no stripe info' in x for x in (output, errput) ]: sinfo = LustreStripeInfo() else: sinfo = LustreStripeInfo.from_lfs_getstripe( output.splitlines() ) return sinfo
def update_tree(dirpath, quiet=False): '''svn update a (directory) path in the repository. This will checkout parent directories as required. :param dirpath: The repository dirpath to update. :param quiet: suppress stdout output ''' if not is_svn(): return rpath = None if not quiet: if dirpath.find(ASKAP_ROOT) == 0: # start of string rpath = os.path.relpath(dirpath, ASKAP_ROOT) else: rpath = dirpath print "info: Updating '%s'" % rpath if os.path.exists(dirpath): tree_updated = update_command(dirpath, recursive=True) elif _is_dir_in_repo(rpath): pathelems = dirpath.split(os.path.sep) # get the directory to check out recursively pkgdir = pathelems.pop(-1) pathvisited = "" for pth in pathelems: pathvisited += pth + os.path.sep fullpath = os.path.join(ASKAP_ROOT, pathvisited) if not os.path.exists(fullpath): tree_updated = update_command(pathvisited) # This is a quick way of seeing if svn:external is defined # i.e. svn proget svn:externals # These directories need to be svn updated with --depth=infinity comm = "svn propget svn:externals %s" % fullpath (stdout, stderr, returncode) = runcmd(comm, shell=True) if len(stdout) > 0: update_command(pathvisited, extraopts="--depth=infinity", recursive=True) tree_updated = update_command(dirpath, recursive=True) else: tree_updated = False return tree_updated
def __setstripe__(args): fname = args[0] ost = args[1] cmd = ["lfs", "setstripe"] opts = None args = ["-c", "1", "-i" + str(ost), fname] status = None output = None errput = None ret = None try: (output, errput, ret) = runcmd(cmd, opts, args) except Exception as e: print(e.code, e.reason) if ret == 0: status = True #if __checkstripe__([fname, ost]): # status = True return status
def __checkstripe__(args): fname = args[0] ost = args[1] cmd = ['lfs', 'getstripe'] opts = None args = [fname] ret = None (output, errput, ret) = runcmd(cmd, opts, args) stripe_count = 0 stripe_index = -1 status = None if ret == 0: for line in output.decode('utf8').split('\n'): if "lmm_stripe_offset" in line: stripe_index = int(line.split(":")[1]) if "lmm_stripe_count" in line: stripe_count = int(line.split(":")[1]) if stripe_count == 1 and stripe_index == ost: status = True return status
def get_svn_files_list(): '''Return the list of subversion files likely to be used in the build process. Any files in the 'files*' directories are added via the package build.py files or the common builder.py file, and these rely on builder.py to add these when generating package signature. ''' cmd = 'svn info --depth infinity' ilist = [] for line in runcmd(cmd)[0].split('\n'): if line.startswith('Path:'): ilist.append(line.split(':')[1].strip()) flist = [] for item in ilist: if valid.match(item) and not item.startswith('files'): flist.append(item) return flist
def setstripeinfo(path, count=None, size=None, offset=None): """ set lustre stripe info for path path must be either an existing directory or non-existing file For efficiency reasons, no checks are done (it is assumed that the calling code already has "stat" information and will perform any necessary checks). If path is an existing socket or link, an error will be thrown If path is an existing fifo, i/o will block (forever?) Output: (no return value) """ cmd = [env['PYLUTLFSPATH'], 'setstripe'] opts = None args = [path] if count: args[0:0] = ['-c', int(count)] if size: args[0:0] = ['-S', int(size)] if offset: args[0:0] = ['-i', int(offset)] (output, errput) = runcmd(cmd, opts, args)
def setstripeinfo( path, count=None, size=None, offset=None ): """ set lustre stripe info for path path must be either an existing directory or non-existing file For efficiency reasons, no checks are done (it is assumed that the calling code already has "stat" information and will perform any necessary checks). If path is an existing socket or link, an error will be thrown If path is an existing fifo, i/o will block (forever?) Output: (no return value) """ cmd = [ env[ 'PYLUTLFSPATH' ], 'setstripe' ] opts = None args = [ path ] if count: args[0:0] = ['-c', int( count ) ] if size: args[0:0] = ['-S', int( size ) ] if offset: args[0:0] = ['-i', int( offset ) ] ( output, errput ) = runcmd( cmd, opts, args )
def __set_health__(self): cmd = ["lfs", "check", "servers"] opts = None args = None ret = None (output, errput, ret) = runcmd(cmd, opts, args) if ret == 0: for line in output.decode('utf8'): cols = line.rstrip().split("-") if len(cols) < 3: continue fsname = cols[0] re_result = re.search(".*(\d)", cols[1], re.IGNORECASE) target = None if re_result: target = int(re_result.group(1)[0]) status = True if "active" in line else False if fsname in self.layout: if "mdt" in cols[1].lower(): self.layout[fsname]["mdts_health"][target] = status elif "ost" in cols[1].lower(): self.layout[fsname]["osts_health"][target] = status
def syncfile(src_path, tgt_path, tmpbase=None, keeptmp=False, synctimes=False, syncperms=False, syncowner=False, syncgroup=False, pre_checksums=False, post_checksums=True): """ Lustre stripe aware file sync Copies a file to temporary location, then creates a hardlink for the target. If either the tmp or the target file already exist, that existing file will be checked for accuracy by checking size and mtime (and checksums if pre_checksum=True). If synctimes=False, tgt is assumed to be equal if tgt_mtime >= src_mtime; otherwise, if syntimes=True, tgt_mtime must be exactly equal to src_mtime or tgt will be assumed to be out of sync. If a valid tmp or tgt exist and one or more of synctimes, syncperms, syncowner, syncgroup are specified, the specified metadata attributes of tmp and/or tgt file will be checked and updated. If both tmp and tgt already exist, both will be checked for accuracy against src. If both tmp and tgt are valid (accurate matches), nothing happens. If at least one of tmp or tgt are found to exist and be valid, the invalid file will be removed and a hardlink created to point to the valid file, thus avoiding a full file copy. If keeptmp=False, the tmp file hardlink will be removed. When copying a file with multiple hard links, set keeptmp=True to keep the tempfile around so the other hard links will not result in additional file copies. It is up to the user of this function to remove the tmp files at a later time. The tmpbase parameter cannot be None (this requirement may be removed in a future version). tmpbase will be created if necessary. The tmpbase directory structure will not be removed and therefore must be cleaned up manually. If post_checksums=True (default), the checksums for src and tgt should be immediately available on the same parameters that were passed in (ie: src_path.checksum() and tgt_path.checksum() ) :param src_path FSItem: :param tgt_path FSItem: :param tmpbase str: absolute path to directory where tmp files will be created :param keeptmp bool: if True, do not delete tmpfile (default=False) :param synctimes bool: sync file times (default=False) :param syncperms bool: sync file permissions (default=False) :param syncowner bool: sync file owner (default=False) :param syncgroup bool: sync file group (default=False) :param pre_checksums bool: use checksum to determine if src and tgt differ (default=False) :param post_checksums bool: if source was copied to target, compare checksums to verify target was written correctly (default=True) :return two-tuple: 1. fsitem.FSItem: full path to tmpfile (even if keeptmp=False) 2. action_taken: dict with keys of 'data_copy' and 'meta_update' and values of True or False depending on the action taken 2. sync_results: output from rsync --itemize-changes """ if tmpbase is None: #TODO - If tmpbase is None, create one at the mountpoint # tmpbase = _pathjoin( # fsitem.getmountpoint( tgt_path ), # '.pylutsyncfiletmpbase' ) raise UserWarning('Default tmpbase not yet implemented') # Construct full path to tmpfile: base + <5-char hex value> + <INODE> try: srcfid = src_path.inode() except (Run_Cmd_Error) as e: raise SyncError(reason=e.reason, origin=e) tmpdir = _pathjoin(tmpbase, hex(hash(srcfid))[-5:]) tmp_path = fsitem.FSItem(os.path.join(tmpdir, srcfid)) log.debug('tmp_path:{0}'.format(tmp_path)) # rsync logic: what already exists on the tgt FS and what needs to be updated do_mktmpdir = False do_setstripe = False setstripe_tgt = None setstripe_stripeinfo = None do_rsync = False rsync_src = None rsync_tgt = None do_hardlink = False hardlink_src = None hardlink_tgt = None do_checksums = False sync_action = {'data_copy': False, 'meta_update': False} syncopts = { 'synctimes': synctimes, 'syncperms': syncperms, 'syncowner': syncowner, 'syncgroup': syncgroup, 'pre_checksums': pre_checksums, 'post_checksums': post_checksums, } tmp_exists, tmp_data_ok, tmp_meta_ok = (False, ) * 3 tgt_exists, tgt_data_ok, tgt_meta_ok = (False, ) * 3 tmp_exists = tmp_path.exists() if tmp_exists: log.debug('tmp exists, comparing tmp to src') tmp_data_ok, tmp_meta_ok = _compare_files(src_path, tmp_path, syncopts) tgt_exists = tgt_path.exists() if tgt_exists: log.debug('tgt exists, comparing tgt to src') tgt_data_ok, tgt_meta_ok = _compare_files(src_path, tgt_path, syncopts) if tmp_exists and tgt_exists: log.debug('tmp and tgt exist') if tmp_path.inode() == tgt_path.inode(): log.debug('tmp and tgt are same file') if tmp_data_ok: if not tmp_meta_ok: log.debug('tmp needs metadata update') sync_action['meta_update'] = True do_rsync = True rsync_src = src_path rsync_tgt = tmp_path else: log.debug('tmp not ok, unset all') os.unlink(str(tmp_path)) tmp_path.update() os.unlink(str(tgt_path)) tgt_path.update() tmp_exists, tmp_data_ok, tmp_meta_ok = (False, ) * 3 tgt_exists, tgt_data_ok, tgt_meta_ok = (False, ) * 3 else: log.debug('tmp and tgt are different files') # check if one of tmp or tgt are ok, to avoid unnecessary data transfer if tmp_data_ok: log.debug('tmp data ok, unset tgt vars') os.unlink(str(tgt_path)) tgt_path.update() tgt_exists, tgt_data_ok, tgt_meta_ok = (False, ) * 3 elif tgt_data_ok: log.debug('tgt data ok, unset tmp vars') os.unlink(str(tmp_path)) tmp_path.update() tmp_exists, tmp_data_ok, tmp_meta_ok = (False, ) * 3 else: log.debug('neither tmp nor tgt are ok, unset both') os.unlink(str(tmp_path)) tmp_path.update() os.unlink(str(tgt_path)) tgt_path.update() tmp_exists, tmp_data_ok, tmp_meta_ok = (False, ) * 3 tgt_exists, tgt_data_ok, tgt_meta_ok = (False, ) * 3 if tmp_exists != tgt_exists: # only one file exists if tmp_exists: log.debug('tmp exists, tgt doesnt') if tmp_data_ok: log.debug('tmp data ok, tgt needs hardlink') do_hardlink = True hardlink_src = tmp_path hardlink_tgt = tgt_path if not tmp_meta_ok: log.debug('tmp needs meta update') sync_action['meta_update'] = True do_rsync = True rsync_src = src_path rsync_tgt = tmp_path else: log.debug('tmp not ok, unset tmp vars') os.unlink(str(tmp_path)) tmp_path.update() tmp_exists, tmp_data_ok, tmp_meta_ok = (False, ) * 3 else: log.debug('tgt exists, tmp doesnt') if tgt_data_ok: log.debug('tgt data ok') if keeptmp: log.debug('keeptmp=True, tmp needs hardlink') do_mktmpdir = True do_hardlink = True hardlink_src = tgt_path hardlink_tgt = tmp_path else: log.debug('keeptmp=False, no action needed') if not tgt_meta_ok: log.debug('tgt needs metadata update') sync_action['meta_update'] = True do_rsync = True rsync_src = src_path rsync_tgt = tgt_path else: log.debug('tgt not ok, unset tgt vars') os.unlink(str(tgt_path)) tgt_path.update() tgt_exists, tgt_data_ok, tgt_meta_ok = (False, ) * 3 if not (tmp_exists or tgt_exists): log.debug('neither tmp nor tgt exist') sync_action.update(data_copy=True, meta_update=True) if src_path.is_regular(): do_setstripe = True setstripe_stripeinfo = src_path.stripeinfo() if keeptmp: do_mktmpdir = True setstripe_tgt = tmp_path #will be ignored if do_setstripe is False do_rsync = True rsync_src = src_path rsync_tgt = tmp_path do_hardlink = True hardlink_src = tmp_path hardlink_tgt = tgt_path do_checksums = True else: log.debug('keeptmp is false, skipping tmpfile creation') setstripe_tgt = tgt_path #will be ignored if do_setstripe is False do_rsync = True rsync_src = src_path rsync_tgt = tgt_path do_checksums = True if do_mktmpdir: # Ensure tmpdir exists log.debug('create tmpdir {0}'.format(tmpdir)) try: os.makedirs(tmpdir) except (OSError) as e: # OSError: [Errno 17] File exists if e.errno != 17: raise SyncError('Unable to create tmpdir {0}'.format(tmpdir), e) if do_setstripe: # Set stripe to create the new file with the expected stripe information log.debug('setstripe (create) {0}'.format(setstripe_tgt)) try: setstripeinfo(setstripe_tgt, count=setstripe_stripeinfo.count, size=setstripe_stripeinfo.size) except (Run_Cmd_Error) as e: msg = 'Setstripe failed for {0}'.format(setstripe_tgt) raise SyncError(msg, e) if rsync_src.size > env['PYLUTRSYNCMAXSIZE']: # DD for large files # TODO - replace dd with ddrescue (for efficient handling of sparse files) cmd = ['/bin/dd'] opts = { 'bs': 4194304, 'if': rsync_src, 'of': rsync_tgt, 'status': 'noxfer', } args = None (output, errput) = runcmd(cmd, opts, args) if len(errput.splitlines()) > 2: #TODO - it is hackish to ignore errors based on line count, better is to # use a dd that supports "status=none" raise UserWarning( "errors during dd of '{0}' -> '{1}': output='{2}' errors='{3}'" .format(rsync_src, rsync_tgt, output, errput)) if do_rsync: # Do the rsync cmd = [env['PYLUTRSYNCPATH']] opts = {'--compress-level': 0} args = ['-l', '-A', '-X', '--super', '--inplace', '--specials'] if synctimes: args.append('-t') if syncperms: args.append('-p') if syncowner: args.append('-o') if syncgroup: args.append('-g') args.extend([rsync_src, rsync_tgt]) try: (output, errput) = runcmd(cmd, opts, args) except (Run_Cmd_Error) as e: raise SyncError(reason=e.reason, origin=e) if len(errput) > 0: raise SyncError( reason="errors during sync of '{0}' -> '{1}'".format( rsync_src, rsync_tgt), origin="output='{0}' errors='{1}'".format(output, errput)) if do_hardlink: log.debug('hardlink {0} <- {1}'.format(hardlink_src, hardlink_tgt)) try: os.link(str(hardlink_src), str(hardlink_tgt)) except (OSError) as e: raise SyncError( reason='Caught exception for link {0} -> {1}'.format( hardlink_src, hardlink_tgt), origin=e) # Delete tmp if keeptmp is False: log.debug('unlink tmpfile {0}'.format(tmp_path)) try: os.unlink(str(tmp_path)) except (OSError) as e: # OSError: [Errno 2] No such file or directory if e.errno != 2: raise SyncError( 'Error attempting to delete tmp {0}'.format(tmp_path), e) #tmp_path.update() # TODO - replace rmtree with safer alternative # walk dirs backwards and rmdir each #shutil.rmtree( tmpbase ) #this will force delete everything, careful if do_checksums and post_checksums: # Compare checksums to verify target file was written accurately src_checksum = src_path.checksum() tgt_checksum = tgt_path.checksum() if src_checksum != tgt_checksum: reason = 'Checksum mismatch' origin = 'src_file={sf}, tgt_file={tf}, '\ 'src_checksum={sc}, tgt_checksum={tc}'.format( sf=src_path, tf=tgt_path, sc=src_checksum, tc=tgt_checksum ) raise SyncError(reason, origin) return (tmp_path, sync_action)
def _path2fid( path ): output, errput = runcmd( [ 'lfs', 'path2fid' ], opts=None, args=[ path ] ) if len( errput ) > 0: raise UserWarning() return output.rstrip()
def device_snapshot(): passwords = {} try: with open("/data/ops/python/wsgi/.runcmd.passwd.json", "r") as f: passwords = json.load(f) username = passwords["autouser"] password = passwords["autopass"].decode('base64') enable = passwords["accessenable"].decode('base64') except Exception as e: print device, " open password file failed!\n", e.message result['error'] = device + " open password file failed!\n" + e.message return result response.set_header('Cache-Control', 'no-cache, no-store, max-age=0, must-revalidate') response.set_header('Pragma', 'no-cache') #return request.query_string device = request.query.device result = {} #result['result']={} try: session = runcmd.runcmd(device, username, password) except Exception as e: print device, " open ssh session failed!\n", e.message result['error'] = device + " open ssh session failed!\n" + e.message return result print device, " open ssh session successful!\n" try: sessionoutput = session.run_cmd('undo smart') sessionoutput += session.run_cmd('scroll') sessionoutput += session.run_cmd('undo interactive') sessionoutput += session.run_cmd('enable') sessionoutput += session.enable_cmd(enable, 'super') sessionoutput += session.run_cmd('display time') sessionoutput += session.run_cmd('display sysuptime') output = session.run_cmd('display board 0') sessionoutput += output outputs = output.splitlines() check = True for line in outputs: searchObj = re.search(r'^ +(\d+) +(\w+) +(\S+)', line) if searchObj: slot = searchObj.group(1) boardName = searchObj.group(2) status = searchObj.group(3) if 'ormal' in status: pass else: error += " slot " + slot + " BoardName " + boardName + ": " + status + "\n" check = False if check: result['result'] = "board 0 : ok\n" else: result['result'] = "board 0 : <em style='color: red;'>nok</em>\n" result['result'] += error output = session.run_cmd('display alarm active alarmlevel critical') sessionoutput += output outputs = output.splitlines() check = False for line in outputs: searchObj = re.search(r'No alarm record', line) if searchObj: check = True if check: result['result'] += "alarm active critical : ok\n" else: result[ 'result'] += "alarm active critical : <em style='color: red;'>nok</em>\n" output = session.run_cmd('display alarm active alarmlevel major') sessionoutput += output outputs = output.splitlines() check = False for line in outputs: searchObj = re.search(r'No alarm record', line) if searchObj: check = True if check: result['result'] += "alarm active major : ok\n" else: result[ 'result'] += "alarm active major : <em style='color: red;'>nok</em>\n" output = session.run_cmd('display alarm active alarmlevel minor') sessionoutput += output outputs = output.splitlines() check = False for line in outputs: searchObj = re.search(r'No alarm record', line) if searchObj: check = True if check: result['result'] += "alarm active minor : ok\n" else: result[ 'result'] += "alarm active minor : <em style='color: red;'>nok</em>\n" output = session.run_cmd('display alarm active alarmlevel warning') sessionoutput += output outputs = output.splitlines() check = False for line in outputs: searchObj = re.search(r'No alarm record', line) if searchObj: check = True if check: result['result'] += "alarm active warning : ok\n" else: result[ 'result'] += "alarm active warning : <em style='color: red;'>nok</em>\n" except Exception as e: session.close() print device, " ssh session running command failed!\n", str(e) result[ 'error'] = device + " ssh session running command failed!\n" + e.message + sessionoutput result['raw'] = sessionoutput return result session.close() print device, " ssh session running command successful.\n" result['raw'] = sessionoutput if 'result' in result: pass else: result['result'] = 'null' if 'raw' in result: pass else: result['raw'] = 'null' if 'error' in result: pass else: result['error'] = 'null' return result
def device_snapshot(): passwords = {} try: with open("/data/ops/python/wsgi/.runcmd.passwd.json", "r") as f: passwords = json.load(f) username = passwords["autouser"] password = passwords["autopass"].decode('base64') except Exception as e: print device, " open password file failed!\n", e.message result['error'] = device + " open password file failed!\n" + e.message return result response.set_header('Cache-Control', 'no-cache, no-store, max-age=0, must-revalidate') response.set_header('Pragma', 'no-cache') #return request.query_string device = request.query.device result = {} result['result']='' try: session = runcmd.runcmd(device, username, password) except Exception as e: print device, " open ssh session failed!\n" , e.message result['error'] = device + " open ssh session failed!\n" + e.message return result print device, " open ssh session successful!\n" try: sessionoutput = session.run_cmd('set cli screen-width 300') sessionoutput += session.run_cmd('set cli screen-length 0') sessionoutput += session.run_cmd('show system uptime') output = session.run_cmd ('show chassis routing-engine 0') sessionoutput += output outputs = output.splitlines() check = True info = '' error = '' count = 0 for line in outputs: searchObj = re.search(r'^ +Current state +(\S+)', line) if searchObj: count += 1 state = searchObj.group(1) if 'Backup' in state or 'Master' in state: info += " state: " + state + "\n" else: error += " state: " + state + "\n" check = False searchObj = re.search(r'^ +Memory utilization +(\d+) +percent', line) if searchObj: count += 1 memorypercent = searchObj.group(1) if int(memorypercent) <= 70: info += " Memory utilization %: " + memorypercent + "\n" else: error += " Memory utilization %: " + memorypercent + "\n" check = False if count < 2: check = False error += " error: regex match line count is only: " + str(count) +"\n" if check: result['result'] += "routing-engine 0: ok\n" result['result'] += info else: result['result'] += "routing-engine 0 : <em style='color: red;'>nok</em>\n" result['result'] += error output = session.run_cmd ('show chassis routing-engine 1') sessionoutput += output outputs = output.splitlines() check = True info = '' error = '' count = 0 for line in outputs: searchObj = re.search(r'^ +Current state +(\S+)', line) if searchObj: count += 1 state = searchObj.group(1) if 'Backup' in state or 'Master' in state: info += " state: " + state + "\n" else: error = " state: " + state + "\n" check = False searchObj = re.search(r'^ +Memory utilization +(\d+) +percent', line) if searchObj: count += 1 memorypercent = searchObj.group(1) if int(memorypercent) <= 70: info += " Memory utilization %: " + memorypercent + "\n" else: error += " Memory utilization %: " + memorypercent + "\n" check = False if count < 2: check = False error += " error: regex match line count is only: " + str(count) +"\n" if check: result['result'] += "routing-engine 1: ok\n" result['result'] += info else: result['result'] += "routing-engine 1 : <em style='color: red;'>nok</em>\n" result['result'] += error output = session.run_cmd ('show chassis alarms') sessionoutput += output outputs = output.splitlines() check = False info = '' error = '' for line in outputs: searchObj = re.search(r'No alarms currently active', line) if searchObj: check = True continue else: pass output = session.run_cmd ('show chassis fpc') sessionoutput += output outputs = output.splitlines() check = True info = '' error = '' count = 0 for line in outputs: searchObj = re.search(r'^ +(\d+) +(\S+)', line) if searchObj: count += 1 slot = searchObj.group(1) state = searchObj.group(2) if state not in ['Online', 'Empty']: check = False error += " FPC " + slot + " state: " + state +"\n" searchObj = re.search(r'^ +(\d+) +(\S+) +(\d+) +(\d+)', line) if searchObj: count += 1 slot = searchObj.group(1) state = searchObj.group(2) temp = searchObj.group(3) cpu = searchObj.group(4) if int(temp) > 50: check = False error += " FPC " + slot + " temperature: " + temp +"\n" if int(cpu) > 60: check = False error += " FPC " + slot + " cpu %: " + cpu +"\n" if count < 6: check = False error += " error: regex match line count is only: " + str(count) +"\n" if check: result['result'] += "chassis fpc: ok\n" else: result['result'] += "chassis fpc : <em style='color: red;'>nok</em>\n" result['result'] += error output = session.run_cmd ('show chassis fpc pic-status') sessionoutput += output outputs = output.splitlines() check = True info = '' error = '' count = 0 for line in outputs: searchObj = re.search(r'^(Slot) +(\d+)', line) if searchObj: count += 1 slot = searchObj.group(1) searchObj = re.search(r'^ +PIC +(\d+) +(\S+)', line) if searchObj: count += 1 pic = searchObj.group(1) state = searchObj.group(2) if state != 'Online': check = False error += " FPC " + slot + " PIC " + pic + " state: "+ state +"\n" if count < 8: check = False error += " error: regex match line count is only: " + str(count) +"\n" if check: result['result'] += "chassis pic: ok\n" else: result['result'] += "chassis pic : <em style='color: red;'>nok</em>\n" result['result'] += error output = session.run_cmd ('show chassis ambient-temperature') sessionoutput += output outputs = output.splitlines() check = True info = '' error = '' count = 0 for line in outputs: searchObj = re.search(r'^Ambient Temperature: +(\d+)', line) if searchObj: count += 1 temp = searchObj.group(1) if int(temp) > 50: check = False error += " Chassis Ambient Temperature C: " + temp +"\n" else: check = True if count < 1: check = False error += " error: regex match line count is only: " + str(count) +"\n" if check: result['result'] += "chassis Ambient Temperature ok\n" result['result'] += info else: result['result'] += "chassis Ambient Temperature : <em style='color: red;'>nok</em>\n" result['result'] += error output = session.run_cmd ('show chassis fabric summary extended') sessionoutput += output outputs = output.splitlines() check = True info = '' error = '' count = 0 for line in outputs: searchObj = re.search(r'^ +(\d+) +(\w+) +(\S.+?\S) +\d', line) if searchObj: count += 1 plane = searchObj.group(1) state = searchObj.group(2) errors = searchObj.group(3) if state not in ['Online', 'Spare'] or errors != "NO NO NO/ NO": check = False error += " chassis fabric summary: " + plane + " " + state + " " + errors + "\n" if count < 6: check = False error += " error: regex match line count is only: " + str(count) +"\n" if check: result['result'] += "chassis fabric summary ok\n" result['result'] += info else: result['result'] += "chassis fabric summary : <em style='color: red;'>nok</em>\n" result['result'] += error except Exception as e: session.close() print device, " ssh session running command failed!\n" , str(e) result['error'] = device + " ssh session running command failed!\n" + e.message + sessionoutput result['raw']=sessionoutput return result session.close() print device, " ssh session running command successful.\n" result['raw']=sessionoutput if 'result' in result: pass else: result['result'] = 'null' if 'raw' in result: pass else: result['raw'] = 'null' if 'error' in result: pass else: result['error'] = 'null' return result
def mx_re_sub_summary(): passwords = {} try: with open("/data/ops/python/wsgi/.runcmd.passwd.json", "r") as f: passwords = json.load(f) username = passwords["autouser"] password = passwords["autopass"].decode('base64') except Exception as e: print device, " open password file failed!\n", e.message result['error'] = device + " open password file failed!\n" + e.message return result response.set_header('Cache-Control', 'no-cache, no-store, max-age=0, must-revalidate') response.set_header('Pragma', 'no-cache') #return request.query_string device = request.query.device result = {} try: session = runcmd.runcmd(device, username, password) except Exception as e: print device, " open ssh session failed!\n", e.message result['error'] = device + " open ssh session failed!\n" + e.message return result print device, " open ssh session successful!\n" try: sessionoutput = session.run_cmd('set cli screen-width 300') sessionoutput += session.run_cmd('set cli screen-length 0') sessionoutput += session.run_cmd('show system uptime') output = session.run_cmd('show subscribers summary') sessionoutput += output outputs = output.splitlines() result['result'] = "Subscribers Summary:\n" for line in outputs: searchObj = re.search(r'DHCP: +(\d+)', line) if searchObj: result['result'] += " DHCP: " + searchObj.group(1) + "\n" searchObj = re.search(r'VLAN: +(\d+)', line) if searchObj: result['result'] += " VLAN: " + searchObj.group(1) + "\n" except Exception as e: session.close() print device, " ssh session running command failed!\n", str(e) result[ 'error'] = device + " ssh session running command failed!\n" + e.message + sessionoutput result['raw'] = sessionoutput return result session.close() print device, " ssh session running command successful.\n" print device, " result is:\n" print json.dumps(result, indent=2) result['raw'] = sessionoutput if 'result' in result: pass else: result['result'] = 'null' if 'raw' in result: pass else: result['raw'] = 'null' if 'error' in result: pass else: result['error'] = 'null' return result
reader=csv.reader(csvread,delimiter=',',lineterminator='\n') reader.next() for row in reader: kbkg=1-kliv-kspl-kwashout-float(row[8]) cwd = os.getcwd() #print cwd RD=[cwd,'/',int(percent_inj*100),'/',row[2],'/',row[4],'/'] rd="".join([str(i) for i in RD]) if int(row[0]) < numNoDefUrs+1: #sorting based on numNoDefUrs, here use the first half urs for defect-free urs FILE=[rd,'sprj.','ur',int(row[0]),'.',row[1],'.im'] file= "".join([str(i) for i in FILE]) if os.path.exists(file) is not True: #print file CMD=['./sprj_cv.py',' ',row[1],' ',round(float(row[7]),3),' ',round(float(row[8]),3),' ',round(kliv,3),' ',round(kspl,3),' ',round(float(row[9]),3),' ',1920,' ',1,' ',rd,'sprj.','ur',int(row[0]),'.',row[1],'.im','>>',rd,row[1],'.log',' ','2>&1'] cmd="".join([str(i) for i in CMD]) runcmd(cmd,maxruns=32,rundir=cwd,waittime=2,debug=True) elif int(row[0]) > numNoDefUrs and int(row[0]) < numUpDefUrs+1: FILE=[rd,'sprj.','ur',int(row[0]),'.',row[1],'.im'] file= "".join([str(i) for i in FILE]) if os.path.exists(file) is not True: #print file UPCMD=['./sprj_cv.py',' ',row[1],' ',round(float(row[7]),3),' ',round(float(row[8]),3),' ',round(kliv,3),' ',round(kspl,3),' ',round(float(row[9]),3),' ',1920,' ',2,' ',rd,'sprj.','ur',int(row[0]),'.',row[1],'.im','>>',rd,row[1],'.log',' ','2>&1'] upcmd="".join([str(i) for i in UPCMD]) runcmd(upcmd,maxruns=32,rundir=cwd,waittime=2,debug=True) elif int(row[0]) > numUpDefUrs and int(row[0]) < numLatDefUrs+1: FILE=[rd,'sprj.','ur',int(row[0]),'.',row[1],'.im'] file= "".join([str(i) for i in FILE]) if os.path.exists(file) is not True: #print file LATCMD=['./sprj_cv.py',' ',row[1],' ',round(float(row[7]),3),' ',round(float(row[8]),3),' ',round(kliv,3),' ',round(kspl,3),' ',round(float(row[9]),3),' ',1920,' ',4,' ',rd,'sprj.','ur',int(row[0]),'.',row[1],'.im','>>',rd,row[1],'.log',' ','2>&1'] latcmd="".join([str(i) for i in LATCMD])
def syncfile( src_path, tgt_path, tmpbase=None, keeptmp=False, synctimes=False, syncperms=False, syncowner=False, syncgroup=False, pre_checksums=False, post_checksums=True ): """ Lustre stripe aware file sync Copies a file to temporary location, then creates a hardlink for the target. If either the tmp or the target file already exist, that existing file will be checked for accuracy by checking size and mtime (and checksums if pre_checksum=True). If synctimes=False, tgt is assumed to be equal if tgt_mtime >= src_mtime; otherwise, if syntimes=True, tgt_mtime must be exactly equal to src_mtime or tgt will be assumed to be out of sync. If a valid tmp or tgt exist and one or more of synctimes, syncperms, syncowner, syncgroup are specified, the specified metadata attributes of tmp and/or tgt file will be checked and updated. If both tmp and tgt already exist, both will be checked for accuracy against src. If both tmp and tgt are valid (accurate matches), nothing happens. If at least one of tmp or tgt are found to exist and be valid, the invalid file will be removed and a hardlink created to point to the valid file, thus avoiding a full file copy. If keeptmp=False, the tmp file hardlink will be removed. When copying a file with multiple hard links, set keeptmp=True to keep the tempfile around so the other hard links will not result in additional file copies. It is up to the user of this function to remove the tmp files at a later time. The tmpbase parameter cannot be None (this requirement may be removed in a future version). tmpbase will be created if necessary. The tmpbase directory structure will not be removed and therefore must be cleaned up manually. If post_checksums=True (default), the checksums for src and tgt should be immediately available on the same parameters that were passed in (ie: src_path.checksum() and tgt_path.checksum() ) :param src_path FSItem: :param tgt_path FSItem: :param tmpbase str: absolute path to directory where tmp files will be created :param keeptmp bool: if True, do not delete tmpfile (default=False) :param synctimes bool: sync file times (default=False) :param syncperms bool: sync file permissions (default=False) :param syncowner bool: sync file owner (default=False) :param syncgroup bool: sync file group (default=False) :param pre_checksums bool: use checksum to determine if src and tgt differ (default=False) :param post_checksums bool: if source was copied to target, compare checksums to verify target was written correctly (default=True) :return two-tuple: 1. fsitem.FSItem: full path to tmpfile (even if keeptmp=False) 2. action_taken: dict with keys of 'data_copy' and 'meta_update' and values of True or False depending on the action taken 2. sync_results: output from rsync --itemize-changes """ if tmpbase is None: #TODO - If tmpbase is None, create one at the mountpoint # tmpbase = _pathjoin( # fsitem.getmountpoint( tgt_path ), # '.pylutsyncfiletmpbase' ) raise UserWarning( 'Default tmpbase not yet implemented' ) # Construct full path to tmpfile: base + <5-char hex value> + <INODE> try: srcfid = src_path.inode() except ( Run_Cmd_Error ) as e: raise SyncError( reason=e.reason, origin=e ) tmpdir = _pathjoin( tmpbase, hex( hash( srcfid ) )[-5:] ) tmp_path = fsitem.FSItem( os.path.join( tmpdir, srcfid ) ) log.debug( 'tmp_path:{0}'.format( tmp_path ) ) # rsync logic: what already exists on the tgt FS and what needs to be updated do_mktmpdir = False do_setstripe = False setstripe_tgt = None setstripe_stripeinfo = None do_rsync = False rsync_src = None rsync_tgt = None do_hardlink = False hardlink_src = None hardlink_tgt = None do_checksums = False sync_action = { 'data_copy': False, 'meta_update': False } syncopts = { 'synctimes': synctimes, 'syncperms': syncperms, 'syncowner': syncowner, 'syncgroup': syncgroup, 'pre_checksums': pre_checksums, 'post_checksums': post_checksums, } tmp_exists, tmp_data_ok, tmp_meta_ok = ( False, ) * 3 tgt_exists, tgt_data_ok, tgt_meta_ok = ( False, ) * 3 tmp_exists = tmp_path.exists() if tmp_exists: log.debug( 'tmp exists, comparing tmp to src' ) tmp_data_ok, tmp_meta_ok = _compare_files( src_path, tmp_path, syncopts ) tgt_exists = tgt_path.exists() if tgt_exists: log.debug( 'tgt exists, comparing tgt to src' ) tgt_data_ok, tgt_meta_ok = _compare_files( src_path, tgt_path, syncopts ) if tmp_exists and tgt_exists: log.debug( 'tmp and tgt exist' ) if tmp_path.inode() == tgt_path.inode(): log.debug( 'tmp and tgt are same file' ) if tmp_data_ok: if not tmp_meta_ok: log.debug( 'tmp needs metadata update' ) sync_action[ 'meta_update' ] = True do_rsync = True rsync_src = src_path rsync_tgt = tmp_path else: log.debug( 'tmp not ok, unset all' ) os.unlink( str( tmp_path ) ) tmp_path.update() os.unlink( str( tgt_path ) ) tgt_path.update() tmp_exists, tmp_data_ok, tmp_meta_ok = ( False, ) * 3 tgt_exists, tgt_data_ok, tgt_meta_ok = ( False, ) * 3 else: log.debug( 'tmp and tgt are different files' ) # check if one of tmp or tgt are ok, to avoid unnecessary data transfer if tmp_data_ok: log.debug( 'tmp data ok, unset tgt vars' ) os.unlink( str( tgt_path ) ) tgt_path.update() tgt_exists, tgt_data_ok, tgt_meta_ok = ( False, ) * 3 elif tgt_data_ok: log.debug( 'tgt data ok, unset tmp vars' ) os.unlink( str( tmp_path ) ) tmp_path.update() tmp_exists, tmp_data_ok, tmp_meta_ok = ( False, ) * 3 else: log.debug( 'neither tmp nor tgt are ok, unset both' ) os.unlink( str( tmp_path ) ) tmp_path.update() os.unlink( str( tgt_path ) ) tgt_path.update() tmp_exists, tmp_data_ok, tmp_meta_ok = ( False, ) * 3 tgt_exists, tgt_data_ok, tgt_meta_ok = ( False, ) * 3 if tmp_exists != tgt_exists: # only one file exists if tmp_exists: log.debug( 'tmp exists, tgt doesnt' ) if tmp_data_ok: log.debug( 'tmp data ok, tgt needs hardlink' ) do_hardlink = True hardlink_src = tmp_path hardlink_tgt = tgt_path if not tmp_meta_ok: log.debug( 'tmp needs meta update' ) sync_action[ 'meta_update' ] = True do_rsync = True rsync_src = src_path rsync_tgt = tmp_path else: log.debug( 'tmp not ok, unset tmp vars' ) os.unlink( str( tmp_path ) ) tmp_path.update() tmp_exists, tmp_data_ok, tmp_meta_ok = ( False, ) * 3 else: log.debug( 'tgt exists, tmp doesnt' ) if tgt_data_ok: log.debug( 'tgt data ok' ) if keeptmp: log.debug( 'keeptmp=True, tmp needs hardlink' ) do_mktmpdir = True do_hardlink = True hardlink_src = tgt_path hardlink_tgt = tmp_path else: log.debug( 'keeptmp=False, no action needed' ) if not tgt_meta_ok: log.debug( 'tgt needs metadata update' ) sync_action[ 'meta_update' ] = True do_rsync = True rsync_src = src_path rsync_tgt = tgt_path else: log.debug( 'tgt not ok, unset tgt vars' ) os.unlink( str( tgt_path ) ) tgt_path.update() tgt_exists, tgt_data_ok, tgt_meta_ok = ( False, ) * 3 if not ( tmp_exists or tgt_exists ): log.debug( 'neither tmp nor tgt exist' ) sync_action.update( data_copy = True, meta_update = True ) if src_path.is_regular(): do_setstripe = True setstripe_stripeinfo = src_path.stripeinfo() if keeptmp: do_mktmpdir = True setstripe_tgt = tmp_path #will be ignored if do_setstripe is False do_rsync = True rsync_src = src_path rsync_tgt = tmp_path do_hardlink = True hardlink_src = tmp_path hardlink_tgt = tgt_path do_checksums = True else: log.debug( 'keeptmp is false, skipping tmpfile creation' ) setstripe_tgt = tgt_path #will be ignored if do_setstripe is False do_rsync = True rsync_src = src_path rsync_tgt = tgt_path do_checksums = True if do_mktmpdir: # Ensure tmpdir exists log.debug( 'create tmpdir {0}'.format( tmpdir ) ) try: os.makedirs( tmpdir ) except ( OSError ) as e: # OSError: [Errno 17] File exists if e.errno != 17: raise SyncError( 'Unable to create tmpdir {0}'.format( tmpdir ), e ) if do_setstripe: # Set stripe to create the new file with the expected stripe information log.debug( 'setstripe (create) {0}'.format( setstripe_tgt ) ) try: setstripeinfo( setstripe_tgt, count=setstripe_stripeinfo.count, size=setstripe_stripeinfo.size ) except ( Run_Cmd_Error ) as e: msg = 'Setstripe failed for {0}'.format( setstripe_tgt ) raise SyncError( msg, e ) if rsync_src.size > env[ 'PYLUTRSYNCMAXSIZE' ]: # DD for large files # TODO - replace dd with ddrescue (for efficient handling of sparse files) cmd = [ '/bin/dd' ] opts = { 'bs': 4194304, 'if': rsync_src, 'of': rsync_tgt, 'status': 'noxfer', } args = None ( output, errput ) = runcmd( cmd, opts, args ) if len( errput.splitlines() ) > 2: #TODO - it is hackish to ignore errors based on line count, better is to # use a dd that supports "status=none" raise UserWarning( "errors during dd of '{0}' -> '{1}': output='{2}' errors='{3}'".format( rsync_src, rsync_tgt, output, errput ) ) if do_rsync: # Do the rsync cmd = [ env[ 'PYLUTRSYNCPATH' ] ] opts = { '--compress-level': 0 } args = [ '-l', '-A', '-X', '--super', '--inplace', '--specials' ] if synctimes: args.append( '-t' ) if syncperms: args.append( '-p' ) if syncowner: args.append( '-o' ) if syncgroup: args.append( '-g' ) args.extend( [ rsync_src, rsync_tgt ] ) try: ( output, errput ) = runcmd( cmd, opts, args ) except ( Run_Cmd_Error ) as e: raise SyncError( reason=e.reason, origin=e ) if len( errput ) > 0: raise SyncError( reason="errors during sync of '{0}' -> '{1}'".format( rsync_src, rsync_tgt), origin="output='{0}' errors='{1}'".format( output, errput ) ) if do_hardlink: log.debug( 'hardlink {0} <- {1}'.format( hardlink_src, hardlink_tgt ) ) try: os.link( str( hardlink_src ), str( hardlink_tgt ) ) except ( OSError ) as e: raise SyncError( reason='Caught exception for link {0} -> {1}'.format( hardlink_src, hardlink_tgt ), origin=e ) # Delete tmp if keeptmp is False: log.debug( 'unlink tmpfile {0}'.format( tmp_path ) ) try: os.unlink( str( tmp_path ) ) except ( OSError ) as e: # OSError: [Errno 2] No such file or directory if e.errno != 2: raise SyncError( 'Error attempting to delete tmp {0}'.format( tmp_path ), e ) #tmp_path.update() # TODO - replace rmtree with safer alternative # walk dirs backwards and rmdir each #shutil.rmtree( tmpbase ) #this will force delete everything, careful if do_checksums and post_checksums: # Compare checksums to verify target file was written accurately src_checksum = src_path.checksum() tgt_checksum = tgt_path.checksum() if src_checksum != tgt_checksum: reason = 'Checksum mismatch' origin = 'src_file={sf}, tgt_file={tf}, '\ 'src_checksum={sc}, tgt_checksum={tc}'.format( sf=src_path, tf=tgt_path, sc=src_checksum, tc=tgt_checksum ) raise SyncError( reason, origin ) return ( tmp_path, sync_action )
def remove_docker_containers(n): for i in range(1, n + 1): cname = container_name_format.format(i) command = "docker container rm {}".format(cname) subprocess.run(command, shell=True) if __name__ == "__main__": n = int(sys.argv[1]) # Delete existing raw logs. subprocess.run("rm {}/*.log".format(log_directory), shell=True) if sys.argv[2] == '-c': # Client mode. # Start Docker containers. host = sys.argv[3] runcmd(n, client_getcmd) # Copy logfiles. copy_logs(n, client_logfile_format) # Remove Docker containers. remove_docker_containers(n) elif sys.argv[2] == '-s': # Server mode. runcmd(n, server_getcmd) else: raise ValueError("The second argument must be either -c or -s.")