def worker_dsh_cp(addr): '''do remote copy to node''' nodename = NODESET.get_nodename_from_address(addr) if nodename == synctool.param.NODENAME: # do not copy to local node; files are already here return # the fileset already has been added to DSH_CP_CMD_ARR # create local copy of DSH_CP_CMD_ARR # or parallelism may screw things up dsh_cp_cmd_arr = DSH_CP_CMD_ARR[:] dsh_cp_cmd_arr.append('%s:%s' % (addr, DESTDIR)) msg = 'copy %s to %s' % (FILES_STR, DESTDIR) if synctool.lib.DRY_RUN: msg += ' (dry run)' if synctool.lib.OPT_NODENAME: msg = ('%s: ' % nodename) + msg stdout(msg) unix_out(' '.join(dsh_cp_cmd_arr)) if not synctool.lib.DRY_RUN: synctool.lib.run_with_nodename(dsh_cp_cmd_arr, nodename)
def compare(self, _src_path, dest_stat): # type: (str, SyncStat) -> bool '''see if devs are the same''' if not self.exists: return False # dest_stat is a SyncStat object and it's useless here # I need a real, fresh statbuf that includes st_rdev field try: dest_stat = os.lstat(self.name) except OSError as err: error('error checking %s : %s' % (self.name, err.strerror)) return False # Note: mypy triggers false errors here # Also, no luck with Union[SyncStat, posix.stat_result] # In any case, for VNodeChrDev and VNodeBlkDev, # the self.src_stat is of type posix.stat_result src_major = os.major(self.src_stat.st_rdev) # type: ignore src_minor = os.minor(self.src_stat.st_rdev) # type: ignore dest_major = os.major(dest_stat.st_rdev) # type: ignore dest_minor = os.minor(dest_stat.st_rdev) # type: ignore if src_major != dest_major or src_minor != dest_minor: stdout('%s should have major,minor %d,%d but has %d,%d' % (self.name, src_major, src_minor, dest_major, dest_minor)) unix_out('# updating major,minor %s' % self.name) terse(synctool.lib.TERSE_SYNC, self.name) return False return True
def _single_overlay_callback(obj, post_dict, updated, *args): '''do overlay function for single files''' if obj.ov_type == synctool.overlay.OV_TEMPLATE: return generate_template(obj, post_dict), False go_on = True if _match_single(obj.dest_path): _, updated = _overlay_callback(obj, post_dict, False, *args) if not updated: stdout('%s is up to date' % obj.dest_path) terse(synctool.lib.TERSE_OK, obj.dest_path) unix_out('# %s is up to date\n' % obj.dest_path) else: # register .post on the parent dir, if it has a .post script obj.dest_path = os.path.dirname(obj.dest_path) obj.dest_stat = synctool.syncstat.SyncStat(obj.dest_path) if obj.dest_path in post_dict: changed_dict = args[0] changed_dict[obj.dest_path] = (obj, post_dict[obj.dest_path]) if not SINGLE_FILES: return False, updated return go_on, updated
def _compare_checksums(self, src_path): # type: (str) -> bool '''compare checksum of src_path and dest: self.name Return True if the same''' try: f1 = open(src_path, 'rb') except IOError as err: error('failed to open %s : %s' % (src_path, err.strerror)) # return True because we can't fix an error in src_path return True sum1 = hashlib.md5() sum2 = hashlib.md5() with f1: try: f2 = open(self.name, 'rb') except IOError as err: error('failed to open %s : %s' % (self.name, err.strerror)) return False with f2: ended = False while not ended and (sum1.digest() == sum2.digest()): try: data1 = f1.read(IO_SIZE) except IOError as err: error('failed to read file %s: %s' % (src_path, err.strerror)) return False if not data1: ended = True else: sum1.update(data1) try: data2 = f2.read(IO_SIZE) except IOError as err: error('failed to read file %s: %s' % (self.name, err.strerror)) return False if not data2: ended = True else: sum2.update(data2) if sum1.digest() != sum2.digest(): if synctool.lib.DRY_RUN: stdout('%s mismatch (MD5 checksum)' % self.name) else: stdout('%s updated (MD5 mismatch)' % self.name) unix_out('# updating file %s' % self.name) terse(synctool.lib.TERSE_SYNC, self.name) return False return True
def run_command_in_dir(dest_dir, cmd): '''change directory to dest_dir, and run the shell command''' verbose(' os.chdir(%s)' % dest_dir) unix_out('cd %s' % dest_dir) cwd = os.getcwd() # if dry run, the target directory may not exist yet # (mkdir has not been called for real, for a dry run) if synctool.lib.DRY_RUN: run_command(cmd) verbose(' os.chdir(%s)' % cwd) unix_out('cd %s' % cwd) unix_out('') return try: os.chdir(dest_dir) except OSError as err: stderr('error changing directory to %s: %s' % (dest_dir, err.strerror)) else: run_command(cmd) verbose(' os.chdir(%s)' % cwd) unix_out('cd %s' % cwd) unix_out('') try: os.chdir(cwd) except OSError as err: stderr('error changing directory to %s: %s' % (cwd, err.strerror))
def ping_node(addr): # type: (str) -> None '''ping a single node''' node = NODESET.get_nodename_from_address(addr) verbose('pinging %s' % node) unix_out('%s %s' % (param.PING_CMD, addr)) packets_received = 0 # execute ping command and show output with the nodename cmd = '%s %s' % (param.PING_CMD, addr) cmd_arr = shlex.split(cmd) try: f = subprocess.Popen(cmd_arr, shell=False, bufsize=4096, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout except OSError as err: error('failed to run command %s: %s' % (cmd_arr[0], err.strerror)) return with f: for line in f: line = line.strip() # argh, we have to parse output here # # on BSD, ping says something like: # "2 packets transmitted, 0 packets received, 100.0% packet loss" # # on Linux, ping says something like: # "2 packets transmitted, 0 received, 100.0% packet loss, " \ # "time 1001ms" arr = line.split() if len(arr) > 3 and (arr[1] == 'packets' and arr[2] == 'transmitted,'): try: packets_received = int(arr[3]) except ValueError: pass break # some ping implementations say "hostname is alive" # or "hostname is unreachable" elif len(arr) == 3 and arr[1] == 'is': if arr[2] == 'alive': packets_received = 100 elif arr[2] == 'unreachable': packets_received = -1 if packets_received > 0: print '%s: up' % node else: print '%s: not responding' % node
def compare(self, src_path, dest_stat): '''see if devs are the same''' if not self.exists: return False # dest_stat is a SyncStat object and it's useless here # I need a real, fresh statbuf that includes st_rdev field try: dest_stat = os.lstat(self.name) except OSError as err: error('error checking %s : %s' % (self.name, err.strerror)) return False src_major = os.major(self.src_stat.st_rdev) src_minor = os.minor(self.src_stat.st_rdev) dest_major = os.major(dest_stat.st_rdev) dest_minor = os.minor(dest_stat.st_rdev) if src_major != dest_major or src_minor != dest_minor: stdout('%s should have major,minor %d,%d but has %d,%d' % (self.name, src_major, src_minor, dest_major, dest_minor)) unix_out('# updating major,minor %s' % self.name) terse(synctool.lib.TERSE_SYNC, self.name) return False return True
def _single_purge_callback(obj, pre_dict, post_dict): # type: (SyncObject, Dict[str, str], Dict[str, str]) -> Tuple[bool, bool] '''do purge function for single files''' # The same as _single_overlay_callback(), except that # purge entries may differ in timestamp. synctool has to report # this because pure rsync will as well (which is bloody annoying) # # For normal synctool overlay/, it's regarded as not important # and synctool will not complain about it # # This actually leaves a final wart; synctool --single may create # purge entries that rsync will complain about and sync again # Anyway, I don't think it's a big deal, and that's what you get # when you mix up synctool and rsync go_on = True updated = False if _match_single(obj.dest_path): _, updated = _overlay_callback(obj, pre_dict, post_dict) if not updated: if obj.check_purge_timestamp(): stdout('%s is up to date' % obj.dest_path) terse(synctool.lib.TERSE_OK, obj.dest_path) unix_out('# %s is up to date\n' % obj.dest_path) # else: pass if not SINGLE_FILES: return False, updated return go_on, updated
def compare(self, _src_path, dest_stat): # type: (str, SyncStat) -> bool '''see if devs are the same''' if not self.exists: return False # dest_stat is a SyncStat object and it's useless here # I need a real, fresh statbuf that includes st_rdev field try: dest_stat = os.lstat(self.name) except OSError as err: error('error checking %s : %s' % (self.name, err.strerror)) return False src_major = os.major(self.src_stat.st_rdev) # type: ignore src_minor = os.minor(self.src_stat.st_rdev) # type: ignore dest_major = os.major(dest_stat.st_rdev) # type: ignore dest_minor = os.minor(dest_stat.st_rdev) # type: ignore if src_major != dest_major or src_minor != dest_minor: stdout('%s should have major,minor %d,%d but has %d,%d' % (self.name, src_major, src_minor, dest_major, dest_minor)) unix_out('# updating major,minor %s' % self.name) terse(synctool.lib.TERSE_SYNC, self.name) return False return True
def run_local_synctool(): '''run synctool on the master node itself''' cmd_arr = shlex.split(synctool.param.SYNCTOOL_CMD) + PASS_ARGS verbose('running synctool on node %s' % synctool.param.NODENAME) unix_out(' '.join(cmd_arr)) synctool.lib.run_with_nodename(cmd_arr, synctool.param.NODENAME)
def _remote_stat(up): '''Get stat info of the remote object Returns array of RemoteStat data, or None on error ''' # use ssh connection multiplexing (if possible) cmd_arr = shlex.split(synctool.param.SSH_CMD) use_multiplex = synctool.multiplex.use_mux(up.node) if use_multiplex: synctool.multiplex.ssh_args(cmd_arr, up.node) list_cmd = os.path.join(synctool.param.ROOTDIR, 'sbin', 'synctool_list.py') cmd_arr.extend(['--', up.address, list_cmd, up.filename]) verbose('running synctool_list %s:%s' % (up.node, up.filename)) unix_out(' '.join(cmd_arr)) try: proc = subprocess.Popen(cmd_arr, shell=False, bufsize=4096, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError as err: error('failed to run command %s: %s' % (cmd_arr[0], err.strerror)) return None out, err = proc.communicate() if proc.returncode == 255: error('ssh connection to %s failed' % up.node) return None elif proc.returncode == 127: error('remote list command failed') return None # parse synctool_list output into array of RemoteStat info data = [] for line in out.split('\n'): if not line: continue arr = line.split() if arr[0] == 'error:': # relay error message error(' '.join(arr[1:])) return None try: remote_stat = RemoteStat(arr) except ValueError: error('unexpected output from synctool_list %s:%s' % (up.node, up.filename)) return None verbose('remote: %r' % remote_stat) data.append(remote_stat) return data
def worker_synctool(addr): '''run rsync of ROOTDIR to the nodes and ssh+synctool, in parallel''' nodename = NODESET.get_nodename_from_address(addr) if nodename == synctool.param.NODENAME: run_local_synctool() return # rsync ROOTDIR/dirs/ to the node # if "it wants it" if not (OPT_SKIP_RSYNC or nodename in synctool.param.NO_RSYNC): verbose('running rsync $SYNCTOOL/ to node %s' % nodename) unix_out('%s %s %s:%s/' % (synctool.param.RSYNC_CMD, synctool.param.ROOTDIR, addr, synctool.param.ROOTDIR)) # make rsync filter to include the correct dirs tmp_filename = rsync_include_filter(nodename) cmd_arr = shlex.split(synctool.param.RSYNC_CMD) cmd_arr.append('--filter=. %s' % tmp_filename) cmd_arr.append('--') cmd_arr.append('%s/' % synctool.param.ROOTDIR) cmd_arr.append('%s:%s/' % (addr, synctool.param.ROOTDIR)) # double check the rsync destination # our filters are like playing with fire if not synctool.param.ROOTDIR or ( synctool.param.ROOTDIR == os.sep): stderr('cowardly refusing to rsync with rootdir == %s' % synctool.param.ROOTDIR) sys.exit(-1) synctool.lib.run_with_nodename(cmd_arr, nodename) # delete temp file try: os.unlink(tmp_filename) except OSError: # silently ignore unlink error pass # run 'ssh node synctool_cmd' cmd_arr = shlex.split(synctool.param.SSH_CMD) cmd_arr.append('--') cmd_arr.append(addr) cmd_arr.extend(shlex.split(synctool.param.SYNCTOOL_CMD)) cmd_arr.append('--nodename=%s' % nodename) cmd_arr.extend(PASS_ARGS) verbose('running synctool on node %s' % nodename) unix_out(' '.join(cmd_arr)) synctool.lib.run_with_nodename(cmd_arr, nodename)
def _exec_diff(src, dest): '''execute diff_cmd to display diff between dest and src''' verbose('%s %s %s' % (synctool.param.DIFF_CMD, dest, prettypath(src))) unix_out('%s %s %s' % (synctool.param.DIFF_CMD, dest, src)) cmd_arr = shlex.split(synctool.param.DIFF_CMD) cmd_arr.append(dest) cmd_arr.append(src) synctool.lib.exec_command(cmd_arr)
def _run_rsync_purge(cmd_arr): '''run rsync for purging cmd_arr holds already prepared rsync command + arguments Returns: None ''' unix_out(' '.join(cmd_arr)) sys.stdout.flush() sys.stderr.flush() try: # run rsync proc = subprocess.Popen(cmd_arr, shell=False, bufsize=4096, stdout=subprocess.PIPE) except OSError as err: error('failed to run command %s: %s' % (cmd_arr[0], err.strerror)) return out, _ = proc.communicate() if synctool.lib.VERBOSE: print out out = out.split('\n') for line in out: line = line.strip() if not line: continue code, filename = line.split(' ', 1) if code[:6] == 'ERROR:' or code[:8] == 'WARNING:': # output rsync errors and warnings stderr(line) continue if filename == './': # rsync has a habit of displaying ugly "./" path # cmd_arr[-1] is the destination path path = cmd_arr[-1] else: # cmd_arr[-1] is the destination path path = os.path.join(cmd_arr[-1], filename) if code[0] == '*': # rsync has a message for us # most likely "deleting" msg = code[1:] msg = msg.strip() stdout('%s %s (purge)' % (msg, prettypath(path))) else: stdout('%s mismatch (purge)' % prettypath(path))
def _remote_isdir(up): '''See if the remote rsync source is a directory or a file Parameter 'up' is an instance of UploadFile Returns: tuple of booleans: (exists, isdir)''' cmd_arr = shlex.split(synctool.param.RSYNC_CMD)[:1] cmd_arr.append('--list-only') cmd_arr.append(up.address + ':' + up.filename) verbose('running rsync --list-only %s:%s' % (up.node, up.filename)) unix_out(' '.join(cmd_arr)) try: proc = subprocess.Popen(cmd_arr, shell=False, bufsize=4096, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError as err: stderr('failed to run command %s: %s' % (cmd_arr[0], err.strerror)) return False, False out, err = proc.communicate() if proc.returncode != 0: if proc.returncode == 255: stderr('failed to connect to %s' % up.node) elif proc.returncode == 23: stderr('error: no such file or directory') else: stderr('failed rsync %s:%s' % (up.node, up.filename)) return False, False # output should be an 'ls -l' like line, with first a mode string for line in out.split('\n'): arr = line.split() mode = arr[0] if len(mode) == 10: # crude test if mode[0] == 'd': # it's a directory verbose('remote rsync source is a directory') return True, True if mode[0] in '-lpcbs': # accept it as a file entry verbose('remote rsync source is a file entry') return True, False # some other line on stdout; just ignore it # got no good output stderr('failed to parse rsync --list-only output') return False, False
def _run_rsync_purge(cmd_arr): # type: (List[str]) -> None '''run rsync for purging cmd_arr holds already prepared rsync command + arguments ''' unix_out(' '.join(cmd_arr)) sys.stdout.flush() sys.stderr.flush() try: # run rsync proc = subprocess.Popen(cmd_arr, shell=False, bufsize=4096, stdout=subprocess.PIPE) except OSError as err: error('failed to run command %s: %s' % (cmd_arr[0], err.strerror)) return out, _ = proc.communicate() if synctool.lib.VERBOSE: print out out = out.split('\n') for line in out: line = line.strip() if not line: continue code, filename = line.split(' ', 1) if code[:6] == 'ERROR:' or code[:8] == 'WARNING:': # output rsync errors and warnings stderr(line) continue if filename == './': # rsync has a habit of displaying ugly "./" path # cmd_arr[-1] is the destination path path = cmd_arr[-1] else: # cmd_arr[-1] is the destination path path = os.path.join(cmd_arr[-1], filename) if code[0] == '*': # rsync has a message for us # most likely "deleting" msg = code[1:] msg = msg.strip() stdout('%s %s (purge)' % (msg, prettypath(path))) else: stdout('%s mismatch (purge)' % prettypath(path))
def create(self): '''make a fifo''' verbose(dryrun_msg(' os.mkfifo(%s)' % self.name)) unix_out('mkfifo %s' % self.name) terse(synctool.lib.TERSE_NEW, self.name) if not synctool.lib.DRY_RUN: try: os.mkfifo(self.name, self.stat.mode & 0777) except OSError as err: error('failed to create fifo %s : %s' % (self.name, err.strerror)) terse(TERSE_FAIL, 'fifo %s' % self.name)
def set_permissions(self): '''set access permission bits equal to source''' verbose(dryrun_msg(' os.chmod(%s, %04o)' % (self.name, self.stat.mode & 07777))) unix_out('chmod 0%o %s' % (self.stat.mode & 07777, self.name)) if not synctool.lib.DRY_RUN: try: os.chmod(self.name, self.stat.mode & 07777) except OSError as err: error('failed to chmod %04o %s : %s' % (self.stat.mode & 07777, self.name, err.strerror)) terse(synctool.lib.TERSE_FAIL, 'mode %s' % self.name)
def detect_ssh(): # type: () -> int '''detect ssh version Set global SSH_VERSION to 2-digit int number: eg. version "5.6p1" -> SSH_VERSION = 56 Returns: SSH_VERSION This routine only works for OpenSSH; otherwise return -1 ''' global SSH_VERSION if SSH_VERSION is not None: return SSH_VERSION cmd_arr = shlex.split(synctool.param.SSH_CMD) # only use first item: the path to the ssh command cmd_arr = cmd_arr[:1] cmd_arr.append('-V') unix_out(' '.join(cmd_arr)) try: # OpenSSH may print version information on stderr proc = subprocess.Popen(cmd_arr, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) except OSError as err: error('failed to execute %s: %s' % (cmd_arr[0], err.strerror)) SSH_VERSION = -1 return SSH_VERSION # stderr was redirected to stdout data, _ = proc.communicate() if not data: SSH_VERSION = -1 return SSH_VERSION data = data.strip() verbose('ssh version string: ' + data) # data should be a single line matching "OpenSSH_... SSL ... date\n" m = MATCH_SSH_VERSION.match(data) if not m: SSH_VERSION = -1 return SSH_VERSION groups = m.groups() SSH_VERSION = int(groups[0]) * 10 + int(groups[1]) verbose('SSH_VERSION: %d' % SSH_VERSION) return SSH_VERSION
def mkdir_basepath(self): '''call mkdir -p to create leading path''' if synctool.lib.DRY_RUN: return basedir = os.path.dirname(self.name) # be a bit quiet about it if synctool.lib.VERBOSE or synctool.lib.UNIX_CMD: verbose('making directory %s' % prettypath(basedir)) unix_out('mkdir -p %s' % basedir) synctool.lib.mkdir_p(basedir)
def compare(self, src_path, dest_stat): '''see if files are the same Return True if the same''' if self.stat.size != dest_stat.size: if synctool.lib.DRY_RUN: stdout('%s mismatch (file size)' % self.name) else: stdout('%s updated (file size mismatch)' % self.name) terse(synctool.lib.TERSE_SYNC, self.name) unix_out('# updating file %s' % self.name) return False return self._compare_checksums(src_path)
def create(self): '''create symbolic link''' verbose(dryrun_msg(' os.symlink(%s, %s)' % (self.oldpath, self.name))) unix_out('ln -s %s %s' % (self.oldpath, self.name)) terse(synctool.lib.TERSE_LINK, self.name) if not synctool.lib.DRY_RUN: try: os.symlink(self.oldpath, self.name) except OSError as err: error('failed to create symlink %s -> %s : %s' % (self.name, self.oldpath, err.strerror)) terse(synctool.lib.TERSE_FAIL, 'link %s' % self.name)
def create(self): # type: () -> None '''make a fifo''' verbose(dryrun_msg(' os.mkfifo(%s)' % self.name)) unix_out('mkfifo %s' % self.name) terse(synctool.lib.TERSE_NEW, self.name) if not synctool.lib.DRY_RUN: try: os.mkfifo(self.name, self.stat.mode & 0777) except OSError as err: error('failed to create fifo %s : %s' % (self.name, err.strerror)) terse(TERSE_FAIL, 'fifo %s' % self.name)
def create(self): '''create symbolic link''' verbose(dryrun_msg(' os.symlink(%s, %s)' % (self.oldpath, self.name))) unix_out('ln -s %s %s' % (self.oldpath, self.name)) terse(synctool.lib.TERSE_LINK, self.name) if not synctool.lib.DRY_RUN: try: os.symlink(self.oldpath, self.name) except OSError as err: error('failed to create symlink %s -> %s : %s' % (self.name, self.oldpath, err.strerror)) terse(TERSE_FAIL, 'link %s' % self.name)
def purge_files(): '''run the purge function''' paths = [] purge_groups = os.listdir(synctool.param.PURGE_DIR) # find the source purge paths that we need to copy # scan only the group dirs that apply for g in synctool.param.MY_GROUPS: if g in purge_groups: purge_root = os.path.join(synctool.param.PURGE_DIR, g) if not os.path.isdir(purge_root): continue for path, subdirs, files in os.walk(purge_root): # rsync only purge dirs that actually contain files # otherwise rsync --delete would wreak havoc if not files: continue if path == purge_root: # root contains files; guard against user mistakes # rsync --delete would destroy the whole filesystem stderr('cowardly refusing to purge the root directory') stderr('please remove any files directly under %s/' % prettypath(purge_root)) return # paths has (src_dir, dest_dir) paths.append((path, path[len(purge_root):])) # do not recurse into this dir any deeper del subdirs[:] cmd_rsync, opts_string = _make_rsync_purge_cmd() # call rsync to copy the purge dirs for src, dest in paths: # trailing slash on source path is important for rsync src += os.sep dest += os.sep cmd_arr = cmd_rsync[:] cmd_arr.append(src) cmd_arr.append(dest) verbose('running rsync%s%s %s' % (opts_string, prettypath(src), dest)) unix_out(' '.join(cmd_arr)) _run_rsync_purge(cmd_arr)
def move_saved(self): '''move existing entry to .saved''' verbose(dryrun_msg('saving %s as %s.saved' % (self.name, self.name))) unix_out('mv %s %s.saved' % (self.name, self.name)) if not synctool.lib.DRY_RUN: verbose(' os.rename(%s, %s.saved)' % (self.name, self.name)) try: os.rename(self.name, '%s.saved' % self.name) except OSError as err: stderr('failed to save %s as %s.saved : %s' % (self.name, self.name, err.strerror)) terse(synctool.lib.TERSE_FAIL, 'save %s.saved' % self.name)
def compare(self, src_path, dest_stat): '''see if files are the same Return True if the same ''' if self.stat.size != dest_stat.size: if synctool.lib.DRY_RUN: stdout('%s mismatch (file size)' % self.name) else: stdout('%s updated (file size mismatch)' % self.name) terse(synctool.lib.TERSE_SYNC, self.name) unix_out('# updating file %s' % self.name) return False return self._compare_checksums(src_path)
def set_owner(self): '''set ownership equal to source''' verbose(dryrun_msg(' os.chown(%s, %d, %d)' % (self.name, self.stat.uid, self.stat.gid))) unix_out('chown %s.%s %s' % (self.stat.ascii_uid(), self.stat.ascii_gid(), self.name)) if not synctool.lib.DRY_RUN: try: os.chown(self.name, self.stat.uid, self.stat.gid) except OSError as err: error('failed to chown %s.%s %s : %s' % (self.stat.ascii_uid(), self.stat.ascii_gid(), self.name, err.strerror)) terse(synctool.lib.TERSE_FAIL, 'owner %s' % self.name)
def set_owner(self): # type: () -> None '''set ownership equal to source''' verbose(dryrun_msg(' os.chown(%s, %d, %d)' % (self.name, self.stat.uid, self.stat.gid))) unix_out('chown %s.%s %s' % (self.stat.ascii_uid(), self.stat.ascii_gid(), self.name)) if not synctool.lib.DRY_RUN: try: os.chown(self.name, self.stat.uid, self.stat.gid) except OSError as err: error('failed to chown %s.%s %s : %s' % (self.stat.ascii_uid(), self.stat.ascii_gid(), self.name, err.strerror)) terse(TERSE_FAIL, 'owner %s' % self.name)
def worker_pkg(addr): '''runs ssh + synctool-pkg to the nodes in parallel''' nodename = NODESET.get_nodename_from_address(addr) # run 'ssh node pkg_cmd' cmd_arr = shlex.split(synctool.param.SSH_CMD) cmd_arr.append('--') cmd_arr.append(addr) cmd_arr.extend(shlex.split(synctool.param.PKG_CMD)) cmd_arr.extend(PASS_ARGS) verbose('running synctool-pkg on node %s' % nodename) unix_out(' '.join(cmd_arr)) synctool.lib.run_with_nodename(cmd_arr, nodename)
def create(self): '''copy file''' if not self.exists: terse(synctool.lib.TERSE_NEW, self.name) verbose(dryrun_msg(' copy %s %s' % (self.src_path, self.name))) unix_out('cp %s %s' % (self.src_path, self.name)) if not synctool.lib.DRY_RUN: try: # copy file shutil.copy(self.src_path, self.name) except (OSError, IOError) as err: error('failed to copy %s to %s: %s' % (prettypath(self.src_path), self.name, err.strerror)) terse(TERSE_FAIL, self.name)
def create(self): # type: () -> None '''copy file''' if not self.exists: terse(synctool.lib.TERSE_NEW, self.name) verbose(dryrun_msg(' copy %s %s' % (self.src_path, self.name))) unix_out('cp %s %s' % (self.src_path, self.name)) if not synctool.lib.DRY_RUN: try: # copy file shutil.copy(self.src_path, self.name) except (OSError, IOError) as err: error('failed to copy %s to %s: %s' % (prettypath(self.src_path), self.name, err.strerror)) terse(TERSE_FAIL, self.name)
def set_times(self): '''set access and modification times''' # only mtime is shown verbose(dryrun_msg(' os.utime(%s, %s)' % (self.name, print_timestamp(self.stat.mtime)))) # print timestamp in other format dt = datetime.datetime.fromtimestamp(self.stat.mtime) time_str = dt.strftime('%Y%m%d%H%M.%S') unix_out('touch -t %s %s' % (time_str, self.name)) if not synctool.lib.DRY_RUN: try: os.utime(self.name, (self.stat.atime, self.stat.mtime)) except OSError as err: error('failed to set utime on %s : %s' % (self.name, err.strerror)) terse(TERSE_FAIL, 'utime %s' % self.name)
def check_purge_timestamp(self): '''check timestamp between src and dest Returns True if same, False if not ''' # This is only used for purge/ # check() has already determined that the files are the same # Now only check the timestamp ... # FIXME have SyncStat time fields # Note that SyncStat objects do not know the timestamps; # they are not cached only to save memory # So now we have to os.stat() again to get the times; it is # not a big problem because this func is used for purge_single only # src_path is under $purge/ # dest_path is in the filesystem try: src_stat = os.lstat(self.src_path) except OSError as err: error('stat(%s) failed: %s' % (self.src_path, err.strerror)) return False try: dest_stat = os.lstat(self.dest_path) except OSError as err: error('stat(%s) failed: %s' % (self.dest_path, err.strerror)) return False # FIXME set_times() should not be called for symlinks if src_stat.st_mtime > dest_stat.st_mtime: stdout('%s mismatch (only timestamp)' % self.dest_path) terse(synctool.lib.TERSE_WARNING, '%s (only timestamp)' % self.dest_path) verbose(dryrun_msg(' os.utime(%s, %s)' '' % (self.dest_path, time.ctime(src_stat.st_mtime)))) unix_out('touch -r %s %s' % (self.src_path, self.dest_path)) vnode = self.vnode_obj() vnode.set_times(src_stat.st_atime, src_stat.st_mtime) return False return True
def set_permissions(self): '''set permissions of symlink (if possible)''' # check if this platform supports lchmod() # Linux does not have lchmod: its symlinks are always mode 0777 if not hasattr(os, 'lchmod'): return verbose(dryrun_msg(' os.lchmod(%s, %04o)' % (self.name, self.stat.mode & 07777))) unix_out('lchmod 0%o %s' % (self.stat.mode & 07777, self.name)) if not synctool.lib.DRY_RUN: try: os.lchmod(self.name, self.stat.mode & 07777) except OSError as err: error('failed to lchmod %04o %s : %s' % (self.stat.mode & 07777, self.name, err.strerror)) terse(synctool.lib.TERSE_FAIL, 'mode %s' % self.name)
def set_times(self): # type: () -> None '''set access and modification times''' # only mtime is shown verbose(dryrun_msg(' os.utime(%s, %s)' % (self.name, print_timestamp(self.stat.mtime)))) # print timestamp in other format dt = datetime.datetime.fromtimestamp(self.stat.mtime) time_str = dt.strftime('%Y%m%d%H%M.%S') unix_out('touch -t %s %s' % (time_str, self.name)) if not synctool.lib.DRY_RUN: try: os.utime(self.name, (self.stat.atime, self.stat.mtime)) except OSError as err: error('failed to set utime on %s : %s' % (self.name, err.strerror)) terse(TERSE_FAIL, 'utime %s' % self.name)
def create(self): '''make a block device file''' major = os.major(self.src_stat.st_rdev) minor = os.minor(self.src_stat.st_rdev) verbose(dryrun_msg(' os.mknod(%s, BLK %d,%d)' % (self.name, major, minor))) unix_out('mknod %s b %d %d' % (self.name, major, minor)) terse(synctool.lib.TERSE_NEW, self.name) if not synctool.lib.DRY_RUN: try: os.mknod(self.name, (self.src_stat.st_mode & 0777) | stat.S_IFBLK, os.makedev(major, minor)) except OSError as err: error('failed to create device %s : %s' % (self.name, err.strerror)) terse(TERSE_FAIL, 'device %s' % self.name)
def check_purge_timestamp(self): '''check timestamp between src and dest Returns True if same, False if not ''' # This is only used for purge/ # check() has already determined that the files are the same # Now only check the timestamp ... # Note that SyncStat objects do not know the timestamps; # they are not cached only to save memory # So now we have to os.stat() again to get the times; it is # not a big problem because this func is used for purge_single only # src_path is under $purge/ # dest_path is in the filesystem try: src_stat = os.lstat(self.src_path) except OSError as err: error('stat(%s) failed: %s' % (self.src_path, err.strerror)) return False try: dest_stat = os.lstat(self.dest_path) except OSError as err: error('stat(%s) failed: %s' % (self.dest_path, err.strerror)) return False if src_stat.st_mtime > dest_stat.st_mtime: stdout('%s mismatch (only timestamp)' % self.dest_path) terse(synctool.lib.TERSE_WARNING, '%s (only timestamp)' % self.dest_path) verbose( dryrun_msg(' os.utime(%s, %s)' '' % (self.dest_path, time.ctime(src_stat.st_mtime)))) unix_out('touch -r %s %s' % (self.src_path, self.dest_path)) vnode = self.vnode_obj() vnode.set_times(src_stat.st_atime, src_stat.st_mtime) return False return True
def set_owner(self): '''set ownership of symlink''' if not hasattr(os, 'lchown'): # you never know return verbose(dryrun_msg(' os.lchown(%s, %d, %d)' % (self.name, self.stat.uid, self.stat.gid))) unix_out('lchown %s.%s %s' % (self.stat.ascii_uid(), self.stat.ascii_gid(), self.name)) if not synctool.lib.DRY_RUN: try: os.lchown(self.name, self.stat.uid, self.stat.gid) except OSError as err: error('failed to lchown %s.%s %s : %s' % (self.stat.ascii_uid(), self.stat.ascii_gid(), self.name, err.strerror)) terse(synctool.lib.TERSE_FAIL, 'owner %s' % self.name)
def create(self): # type: () -> None '''make a block device file''' major = os.major(self.src_stat.st_rdev) # type: ignore minor = os.minor(self.src_stat.st_rdev) # type: ignore verbose(dryrun_msg(' os.mknod(%s, BLK %d,%d)' % (self.name, major, minor))) unix_out('mknod %s b %d %d' % (self.name, major, minor)) terse(synctool.lib.TERSE_NEW, self.name) if not synctool.lib.DRY_RUN: try: os.mknod(self.name, (self.src_stat.st_mode & 0777) | stat.S_IFBLK, os.makedev(major, minor)) except OSError as err: error('failed to create device %s : %s' % (self.name, err.strerror)) terse(TERSE_FAIL, 'device %s' % self.name)
def move_saved(self): '''move existing entry to .saved''' # do not save files that already are .saved _, ext = os.path.splitext(self.name) if ext == '.saved': return verbose(dryrun_msg('saving %s as %s.saved' % (self.name, self.name))) unix_out('mv %s %s.saved' % (self.name, self.name)) if not synctool.lib.DRY_RUN: verbose(' os.rename(%s, %s.saved)' % (self.name, self.name)) try: os.rename(self.name, '%s.saved' % self.name) except OSError as err: error('failed to save %s as %s.saved : %s' % (self.name, self.name, err.strerror)) terse(synctool.lib.TERSE_FAIL, 'save %s.saved' % self.name)
def create(self): # type: () -> None '''create directory''' if synctool.lib.path_exists(self.name): # it can happen that the dir already exists # due to recursion in visit() + VNode.mkdir_basepath() # So this is double checked for dirs that did not exist return verbose(dryrun_msg(' os.mkdir(%s)' % self.name)) unix_out('mkdir %s' % self.name) terse(synctool.lib.TERSE_MKDIR, self.name) if not synctool.lib.DRY_RUN: try: os.mkdir(self.name, self.stat.mode & 07777) except OSError as err: error('failed to make directory %s : %s' % (self.name, err.strerror)) terse(TERSE_FAIL, 'mkdir %s' % self.name)
def set_owner(self): # type: () -> None '''set ownership of symlink''' if not hasattr(os, 'lchown'): # you never know return verbose(dryrun_msg(' os.lchown(%s, %d, %d)' % (self.name, self.stat.uid, self.stat.gid))) unix_out('lchown %s.%s %s' % (self.stat.ascii_uid(), self.stat.ascii_gid(), self.name)) if not synctool.lib.DRY_RUN: try: os.lchown(self.name, self.stat.uid, self.stat.gid) except OSError as err: error('failed to lchown %s.%s %s : %s' % (self.stat.ascii_uid(), self.stat.ascii_gid(), self.name, err.strerror)) terse(TERSE_FAIL, 'owner %s' % self.name)
def create(self): '''make a character device file''' major = os.major(self.src_stat.st_rdev) minor = os.minor(self.src_stat.st_rdev) verbose( dryrun_msg(' os.mknod(%s, CHR %d,%d)' % (self.name, major, minor))) unix_out('mknod %s c %d %d' % (self.name, major, minor)) terse(synctool.lib.TERSE_NEW, self.name) if not synctool.lib.DRY_RUN: try: os.mknod(self.name, (self.src_stat.st_mode & 0777) | stat.S_IFCHR, os.makedev(major, minor)) except OSError as err: error('failed to create device %s : %s' % (self.name, err.strerror)) terse(synctool.lib.TERSE_FAIL, 'device %s' % self.name)
def harddelete(self): # type: () -> None '''delete directory''' if synctool.lib.DRY_RUN: not_str = 'not ' else: not_str = '' stdout('%sremoving %s' % (not_str, self.name + os.sep)) unix_out('rmdir %s' % self.name) terse(synctool.lib.TERSE_DELETE, self.name + os.sep) if not synctool.lib.DRY_RUN: verbose(' os.rmdir(%s)' % self.name) try: os.rmdir(self.name) except OSError: # probably directory not empty # refuse to delete dir, just move it aside verbose('refusing to delete directory %s' % self.name) self.move_saved()
def harddelete(self): '''delete existing entry''' if synctool.lib.DRY_RUN: not_str = 'not ' else: not_str = '' stdout('%sdeleting %s' % (not_str, self.name)) unix_out('rm %s' % self.name) terse(synctool.lib.TERSE_DELETE, self.name) if not synctool.lib.DRY_RUN: verbose(' os.unlink(%s)' % self.name) try: os.unlink(self.name) except OSError as err: error('failed to delete %s : %s' % (self.name, err.strerror)) terse(synctool.lib.TERSE_FAIL, 'delete %s' % self.name) else: log('deleted %s' % self.name)
def _single_overlay_callback(obj, pre_dict, post_dict): '''do overlay function for single files''' if not SINGLE_FILES: # proceed quickly return True, False if obj.ov_type == synctool.overlay.OV_TEMPLATE: return generate_template(obj, post_dict), False go_on = True updated = False if _match_single(obj.dest_path): _, updated = _overlay_callback(obj, pre_dict, post_dict) if not updated: stdout('%s is up to date' % obj.dest_path) terse(synctool.lib.TERSE_OK, obj.dest_path) unix_out('# %s is up to date\n' % obj.dest_path) return go_on, updated
def create(self): '''copy file''' if not self.exists: terse(synctool.lib.TERSE_NEW, self.name) verbose(dryrun_msg(' copy %s %s' % (self.src_path, self.name))) unix_out('cp %s %s' % (self.src_path, self.name)) if not synctool.lib.DRY_RUN: try: # copy file shutil.copy(self.src_path, self.name) if synctool.param.SYNC_TIMES: shutil.copystat(self.src_path, self.name) except IOError as err: error('failed to copy %s to %s: %s' % (prettypath(self.src_path), self.name, err.strerror)) terse(synctool.lib.TERSE_FAIL, self.name)
def worker_dsh_cp(addr): # type: (str) -> None '''do remote copy to node''' nodename = NODESET.get_nodename_from_address(addr) if nodename == param.NODENAME: # do not copy to local node; files are already here return # the fileset already has been added to DSH_CP_CMD_ARR # use ssh connection multiplexing (if possible) use_multiplex = synctool.multiplex.use_mux(nodename) # create local copy of DSH_CP_CMD_ARR # or parallelism may screw things up dsh_cp_cmd_arr = DSH_CP_CMD_ARR[:] # add ssh cmd ssh_cmd_arr = shlex.split(param.SSH_CMD) if use_multiplex: synctool.multiplex.ssh_args(ssh_cmd_arr, nodename) dsh_cp_cmd_arr.extend(['-e', ' '.join(ssh_cmd_arr)]) dsh_cp_cmd_arr.append('--') dsh_cp_cmd_arr.extend(SOURCE_LIST) dsh_cp_cmd_arr.append('%s:%s' % (addr, DESTDIR)) msg = 'copy %s to %s' % (FILES_STR, DESTDIR) if synctool.lib.DRY_RUN: msg += ' (dry run)' if synctool.lib.OPT_NODENAME: msg = ('%s: ' % nodename) + msg stdout(msg) if not synctool.lib.DRY_RUN: synctool.lib.run_with_nodename(dsh_cp_cmd_arr, nodename) else: unix_out(' '.join(dsh_cp_cmd_arr) + ' # dry run')
def main(): # type: () -> None '''run the program''' param.init() action = get_options() config.init_mynodename() if not param.NODENAME: error('unable to determine my nodename (hostname: %s)' % param.HOSTNAME) stderr('please check %s' % param.CONF_FILE) sys.exit(-1) if param.NODENAME not in param.NODES: error("unknown node '%s'" % param.NODENAME) stderr('please check %s' % param.CONF_FILE) sys.exit(-1) if param.NODENAME in param.IGNORE_GROUPS: # this is only a warning ... # you can still run synctool-pkg on the client by hand warning('node %s is disabled in %s' % (param.NODENAME, param.CONF_FILE)) if synctool.lib.UNIX_CMD: t = time.localtime(time.time()) unix_out('#') unix_out('# script generated by synctool on ' '%04d/%02d/%02d %02d:%02d:%02d' % (t[0], t[1], t[2], t[3], t[4], t[5])) unix_out('#') unix_out('# my hostname: %s' % param.HOSTNAME) unix_out('# SYNCTOOL_NODE=%s' % param.NODENAME) unix_out('# SYNCTOOL_ROOT=%s' % param.ROOTDIR) unix_out('#') if not synctool.lib.DRY_RUN: unix_out('# NOTE: --fix specified, applying updates') unix_out('#') unix_out('') else: if not synctool.lib.MASTERLOG: # only print this when running stand-alone if not synctool.lib.QUIET: if synctool.lib.DRY_RUN: stdout('DRY RUN, not doing any updates') terse(synctool.lib.TERSE_DRYRUN, 'not doing any updates') else: stdout('--fix specified, applying changes') terse(synctool.lib.TERSE_FIXING, ' applying changes') else: if synctool.lib.DRY_RUN: verbose('DRY RUN, not doing any updates') else: verbose('--fix specified, applying changes') verbose('my nodename: %s' % param.NODENAME) verbose('my hostname: %s' % param.HOSTNAME) verbose('rootdir: %s' % param.ROOTDIR) os.environ['SYNCTOOL_NODE'] = param.NODENAME os.environ['SYNCTOOL_ROOT'] = param.ROOTDIR unix_out('umask 077') unix_out('') os.umask(077) if action == ACTION_DIFF: diff_files() elif action == ACTION_REFERENCE: reference_files() elif action == ACTION_ERASE_SAVED: if SINGLE_FILES: single_erase_saved() else: erase_saved() elif SINGLE_FILES: single_files() else: purge_files() overlay_files() delete_files() unix_out('# EOB')
def generate_template(obj, post_dict): # type: (SyncObject, Dict[str, str]) -> bool '''run template .post script, generating a new file The script will run in the source dir (overlay tree) and it will run even in dry-run mode Returns: True or False on error ''' # Note: this func modifies input parameter 'obj' # when it succesfully generates output, it will change obj's paths # and it will be picked up again in overlay._walk_subtree() if synctool.lib.NO_POST: verbose('skipping template generation of %s' % obj.src_path) obj.ov_type = synctool.overlay.OV_IGNORE return True if SINGLE_FILES and obj.dest_path not in SINGLE_FILES: verbose('skipping template generation of %s' % obj.src_path) obj.ov_type = synctool.overlay.OV_IGNORE return True verbose('generating template %s' % obj.print_src()) src_dir = os.path.dirname(obj.src_path) newname = os.path.join(src_dir, os.path.basename(obj.dest_path)) template = newname + '._template' # add most important extension newname += '._' + param.NODENAME verbose('generating template as %s' % newname) statbuf = synctool.syncstat.SyncStat(newname) if statbuf.exists(): verbose('template destination %s already exists' % newname) if param.SYNC_TIMES and statbuf.mtime != obj.src_stat.mtime: # force the mtime of the template onto the existing output verbose('forcing mtime %s => %s' % (obj.src_path, newname)) synctool.lib.set_filetimes(newname, statbuf.atime, obj.src_stat.mtime) # modify the object; set new src and dest filenames # later, visit() will call obj.make(), which will make full paths obj.src_path = newname obj.dest_path = os.path.basename(obj.dest_path) return True # get the .post script for the template file if template not in post_dict: if param.TERSE: terse(synctool.lib.TERSE_ERROR, 'no .post %s' % obj.src_path) else: error('template generator for %s not found' % obj.src_path) return False generator = post_dict[template] # chdir to source directory # Note: the change dir is not really needed # but the documentation promises that .post scripts run in # the dir where the new file will be put verbose(' os.chdir(%s)' % src_dir) unix_out('cd %s' % src_dir) cwd = os.getcwd() try: os.chdir(src_dir) except OSError as err: if param.TERSE: terse(synctool.lib.TERSE_ERROR, 'chdir %s' % src_dir) else: error('failed to change directory to %s: %s' % (src_dir, err.strerror)) return False # temporarily restore original umask # so the script runs with the umask set by the sysadmin os.umask(param.ORIG_UMASK) # run the script # pass template and newname as "$1" and "$2" cmd_arr = [generator, obj.src_path, newname] verbose(' os.system(%s, %s, %s)' % (prettypath(cmd_arr[0]), cmd_arr[1], cmd_arr[2])) unix_out('# run command %s' % os.path.basename(cmd_arr[0])) have_error = False if synctool.lib.exec_command(cmd_arr) == -1: have_error = True statbuf = synctool.syncstat.SyncStat(newname) if not statbuf.exists(): if not have_error: if param.TERSE: terse(synctool.lib.TERSE_WARNING, 'no output %s' % newname) else: warning('expected output %s was not generated' % newname) obj.ov_type = synctool.overlay.OV_IGNORE else: # an error message was already printed when exec() failed earlier # so, only when --verbose is used, print additional debug info verbose('error: expected output %s was not generated' % newname) else: verbose('found generated output %s' % newname) if param.SYNC_TIMES: # force the mtime of the template onto the generated output verbose('forcing mtime %s => %s' % (obj.src_path, newname)) synctool.lib.set_filetimes(newname, statbuf.atime, obj.src_stat.mtime) os.umask(077) # chdir back to original location # chdir to source directory verbose(' os.chdir(%s)' % cwd) unix_out('cd %s' % cwd) try: os.chdir(cwd) except OSError as err: if param.TERSE: terse(synctool.lib.TERSE_ERROR, 'chdir %s' % src_dir) else: error('failed to change directory to %s: %s' % (cwd, err.strerror)) return False if have_error: return False # modify the object; set new src and dest filenames # later, visit() will call obj.make(), which will make full paths obj.src_path = newname obj.dest_path = os.path.basename(obj.dest_path) return True
def rsync_upload(up): '''upload a file/dir to $overlay/group/ or $purge/group/''' up.make_repos_path() # check whether the remote entry exists remote_stats = _remote_stat(up) if remote_stats is None: # error message was already printed return # first element in array is our 'target' isdir = remote_stats[0].is_dir() if isdir and synctool.param.REQUIRE_EXTENSION and not up.purge: error('remote is a directory') stderr('synctool can not upload directories to $overlay ' 'when require_extension is set') return if isdir: up.filename += os.sep up.repos_path += os.sep # make command: rsync [-n] [-v] node:/path/ $overlay/group/path/ cmd_arr = shlex.split(synctool.param.RSYNC_CMD) # opts is just for the 'visual aspect'; it is displayed when --verbose opts = ' ' if synctool.lib.DRY_RUN: # cmd_arr.append('-n') opts += '-n ' if synctool.lib.VERBOSE: cmd_arr.append('-v') opts += '-v ' if '-q' in cmd_arr: cmd_arr.remove('-q') if '--quiet' in cmd_arr: cmd_arr.remove('--quiet') # use ssh connection multiplexing (if possible) ssh_cmd_arr = shlex.split(synctool.param.SSH_CMD) use_multiplex = synctool.multiplex.use_mux(up.node) if use_multiplex: synctool.multiplex.ssh_args(ssh_cmd_arr, up.node) cmd_arr.extend(['-e', ' '.join(ssh_cmd_arr)]) cmd_arr.extend(['--', up.address + ':' + up.filename, up.repos_path]) verbose_path = prettypath(up.repos_path) if synctool.lib.DRY_RUN: stdout('would be uploaded as %s' % verbose_path) else: dest_dir = os.path.dirname(up.repos_path) _makedir(dest_dir, remote_stats[1:]) if not synctool.lib.path_exists(dest_dir): error('failed to create %s/' % dest_dir) return # for $overlay, never do rsync --delete / --delete-excluded # for $purge, don't use rsync --delete on single files # because it would (inadvertently) delete all existing files in the repos if not up.purge or not isdir: if '--delete' in cmd_arr: cmd_arr.remove('--delete') if '--delete-excluded' in cmd_arr: cmd_arr.remove('--delete-excluded') verbose('running rsync%s%s:%s to %s' % (opts, up.node, up.filename, verbose_path)) if not synctool.lib.DRY_RUN: synctool.lib.run_with_nodename(cmd_arr, up.node) if not synctool.lib.path_exists(up.repos_path): error('upload failed') else: stdout('uploaded %s' % verbose_path) else: # in dry-run mode, show the command anyway unix_out('# dry run, rsync not performed') unix_out(' '.join(cmd_arr))
def _makedir(path, remote_stats): '''make directory in repository, copying over mode and ownership of the directories as they are on the remote side remote_stats is array holding stat info of the remote side Returns True on success, False on error Note that this function creates directories even if the remote path component may be a symbolic link ''' if not path or not remote_stats: error("recursion too deep") return False if synctool.lib.path_exists(path): return True verbose('_makedir %s %r' % (path, remote_stats)) # recursively make parent directory if not _makedir(os.path.dirname(path), remote_stats[1:]): return False # do a simple check against the names of the dir # (are we still 'in sync' with remote_stats?) basename = os.path.basename(path) remote_basename = os.path.basename(remote_stats[0].filename) if remote_basename and basename != remote_basename: error("out of sync with remote stat information, I'm lost") return False # temporarily restore admin's umask mask = os.umask(synctool.param.ORIG_UMASK) mode = remote_stats[0].mode & 0777 try: os.mkdir(path, mode) except OSError as err: error('failed to create directory %s: %s' % (path, err.strerror)) os.umask(mask) return False else: unix_out('mkdir -p -m %04o %s' % (mode, path)) os.umask(mask) # the mkdir mode is affected by umask # so set the mode the way we want it try: os.chmod(path, mode) except OSError as err: warning('failed to chmod %04o %s: %s' % (mode, path, err.strerror)) # also set the owner & group # uid/gid are translated from remote owner/group, # unless --numeric-ids is wanted rsync_cmd_arr = shlex.split(synctool.param.RSYNC_CMD) if '--numeric-ids' in rsync_cmd_arr: uid = remote_stats[0].uid gid = remote_stats[0].gid else: uid = remote_stats[0].translate_uid() gid = remote_stats[0].translate_gid() try: os.lchown(path, uid, gid) except OSError as err: warning('failed to chown %s.%s %s: %s' % (synctool.pwdgrp.pw_name(uid), synctool.pwdgrp.grp_name(gid), path, err.strerror)) return True
def setup_master(node_list, persist): '''setup master connections to all nodes in node_list node_list is a list of pairs: (addr, nodename) Argument 'persist' is the SSH ControlPersist parameter Returns True on success, False on error ''' detect_ssh() if SSH_VERSION < 39: error('unsupported version of ssh') return False if persist == 'none': persist = None procs = [] ssh_cmd_arr = shlex.split(synctool.param.SSH_CMD) ssh_cmd_arr.extend(['-M', '-N', '-n']) if SSH_VERSION >= 56 and persist is not None: ssh_cmd_arr.extend(['-o', 'ControlPersist=' + persist]) verbose('spawning ssh master connections') errors = 0 for addr, nodename in node_list: control_path = _make_control_path(nodename) if not control_path: # error message already printed return False # see if the control path already exists statbuf = synctool.syncstat.SyncStat(control_path) if statbuf.exists(): if not statbuf.is_sock(): warning('control path %s: not a socket file' % control_path) errors += 1 continue if statbuf.uid != os.getuid(): warning('control path: %s: incorrect owner uid %u' % (control_path, statbuf.uid)) errors += 1 continue if statbuf.mode & 077 != 0: warning('control path %s: suspicious file mode %04o' % (control_path, statbuf.mode & 0777)) errors += 1 continue verbose('control path %s already exists' % control_path) continue # start ssh in master mode to create a new control path verbose('creating master control path to %s' % nodename) cmd_arr = ssh_cmd_arr[:] cmd_arr.extend(['-o', 'ControlPath=' + control_path, '--', addr]) # start in background unix_out(' '.join(cmd_arr)) try: proc = subprocess.Popen(cmd_arr, shell=False) except OSError as err: error('failed to execute %s: %s' % (cmd_arr[0], err.strerror)) errors += 1 continue procs.append(proc)