Example #1
0
def _write_purge_filter(f):
    '''write rsync filter rules for purge/ tree
    Returns False on error
    '''

    f.write('+ /var/purge/\n')

    purge_groups = os.listdir(synctool.param.PURGE_DIR)

    # add only the group dirs that apply
    for g in synctool.param.MY_GROUPS:
        if g in purge_groups:
            purge_root = os.path.join(synctool.param.PURGE_DIR, g)
            if not os.path.isdir(purge_root):
                continue

            for path, _, files in os.walk(purge_root):
                if path == purge_root:
                    # guard against user mistakes;
                    # danger of destroying the entire filesystem
                    # if it would rsync --delete the root
                    if len(files) > 0:
                        warning('cowardly refusing to purge the root '
                                'directory')
                        stderr('please remove any files directly '
                               'under %s/' % prettypath(purge_root))
                        return False
                else:
                    f.write('+ /var/purge/%s/' % g)
                    break

    f.write('- /var/purge/*\n')
    return True
Example #2
0
def _write_purge_filter(f):
    '''write rsync filter rules for purge/ tree
    Returns False on error
    '''

    f.write('+ /var/purge/\n')

    purge_groups = os.listdir(synctool.param.PURGE_DIR)

    # add only the group dirs that apply
    for g in synctool.param.MY_GROUPS:
        if g in purge_groups:
            purge_root = os.path.join(synctool.param.PURGE_DIR, g)
            if not os.path.isdir(purge_root):
                continue

            for path, _, files in os.walk(purge_root):
                if path == purge_root:
                    # guard against user mistakes;
                    # danger of destroying the entire filesystem
                    # if it would rsync --delete the root
                    if len(files) > 0:
                        warning('cowardly refusing to purge the root '
                                'directory')
                        stderr('please remove any files directly '
                               'under %s/' % prettypath(purge_root))
                        return False
                else:
                    f.write('+ /var/purge/%s/' % g)
                    break

    f.write('- /var/purge/*\n')
    return True
Example #3
0
def worker_synctool(addr):
    '''run rsync of ROOTDIR to the nodes and ssh+synctool, in parallel'''

    nodename = NODESET.get_nodename_from_address(addr)

    if nodename == param.NODENAME:
        run_local_synctool()
        return

    # use ssh connection multiplexing (if possible)
    use_multiplex = synctool.multiplex.use_mux(nodename)

    ssh_cmd_arr = shlex.split(param.SSH_CMD)
    if use_multiplex:
        synctool.multiplex.ssh_args(ssh_cmd_arr, nodename)

    # rsync ROOTDIR/dirs/ to the node
    # if "it wants it"
    if not (OPT_SKIP_RSYNC or nodename in param.NO_RSYNC):
        verbose('running rsync $SYNCTOOL/ to node %s' % nodename)

        # make rsync filter to include the correct dirs
        tmp_filename = rsync_include_filter(nodename)

        cmd_arr = shlex.split(param.RSYNC_CMD)
        cmd_arr.append('--filter=. %s' % tmp_filename)

        # add "-e ssh_cmd" to rsync command
        cmd_arr.extend(['-e', ' '.join(ssh_cmd_arr)])

        cmd_arr.append('--')
        cmd_arr.append('%s/' % param.ROOTDIR)
        cmd_arr.append('%s:%s/' % (addr, param.ROOTDIR))

        # double check the rsync destination
        # our filters are like playing with fire
        if not param.ROOTDIR or (param.ROOTDIR == os.sep):
            warning('cowardly refusing to rsync with rootdir == %s' %
                    param.ROOTDIR)
            sys.exit(-1)

        synctool.lib.run_with_nodename(cmd_arr, nodename)

        # delete temp file
        try:
            os.unlink(tmp_filename)
        except OSError:
            # silently ignore unlink error
            pass

    # run 'ssh node synctool_cmd'
    cmd_arr = ssh_cmd_arr[:]
    cmd_arr.append('--')
    cmd_arr.append(addr)
    cmd_arr.extend(shlex.split(param.SYNCTOOL_CMD))
    cmd_arr.append('--nodename=%s' % nodename)
    cmd_arr.extend(PASS_ARGS)

    verbose('running synctool on node %s' % nodename)
    synctool.lib.run_with_nodename(cmd_arr, nodename)
Example #4
0
def use_mux(nodename):
    '''Returns True if it's OK to use a master connection to node
    Otherwise returns False -> don't use multiplexing
    '''

    control_path = _make_control_path(nodename)
    if not control_path:
        # error message already printed
        return False

    # see if the control path already exists
    statbuf = synctool.syncstat.SyncStat(control_path)
    if statbuf.exists():
        if not statbuf.is_sock():
            warning('control path %s: not a socket file' %
                    control_path)
            return False

        if statbuf.uid != os.getuid():
            warning('control path: %s: incorrect owner uid %u' %
                    (control_path, statbuf.uid))
            return False

        if statbuf.mode & 077 != 0:
            warning('control path %s: suspicious file mode %04o' %
                    (control_path, statbuf.mode & 0777))
            return False

        verbose('control path %s already exists' % control_path)
        return True
Example #5
0
def _delete_callback(obj, _pre_dict, post_dict):
    # type: (SyncObject, Dict[str, str], Dict[str, str]) -> Tuple[bool, bool]
    '''delete files'''

    if obj.ov_type == synctool.overlay.OV_TEMPLATE:
        return generate_template(obj, post_dict), False

    # don't delete directories
    if obj.src_stat.is_dir():
#       verbose('refusing to delete directory %s' % (obj.dest_path + os.sep))
        return True, False

    if obj.dest_stat.is_dir():
        warning('destination is a directory: %s, skipped' % obj.print_src())
        return True, False

    verbose('checking %s' % obj.print_src())

    if obj.dest_stat.exists():
        vnode = obj.vnode_dest_obj()
        vnode.harddelete()
        obj.run_script(post_dict)
        return True, True

    return True, False
Example #6
0
def worker_synctool(addr):
    '''run rsync of ROOTDIR to the nodes and ssh+synctool, in parallel'''

    nodename = NODESET.get_nodename_from_address(addr)

    if nodename == synctool.param.NODENAME:
        run_local_synctool()
        return

    # use ssh connection multiplexing (if possible)
    use_multiplex = synctool.multiplex.use_mux(nodename, addr)

    ssh_cmd_arr = shlex.split(synctool.param.SSH_CMD)
    if use_multiplex:
        synctool.multiplex.ssh_args(ssh_cmd_arr, nodename)

    # rsync ROOTDIR/dirs/ to the node
    # if "it wants it"
    if not (OPT_SKIP_RSYNC or nodename in synctool.param.NO_RSYNC):
        verbose('running rsync $SYNCTOOL/ to node %s' % nodename)

        # make rsync filter to include the correct dirs
        tmp_filename = rsync_include_filter(nodename)

        cmd_arr = shlex.split(synctool.param.RSYNC_CMD)
        cmd_arr.append('--filter=. %s' % tmp_filename)

        # add "-e ssh_cmd" to rsync command
        cmd_arr.extend(['-e', ' '.join(ssh_cmd_arr)])

        cmd_arr.append('--')
        cmd_arr.append('%s/' % synctool.param.ROOTDIR)
        cmd_arr.append('%s:%s/' % (addr, synctool.param.ROOTDIR))

        # double check the rsync destination
        # our filters are like playing with fire
        if not synctool.param.ROOTDIR or (synctool.param.ROOTDIR == os.sep):
            warning('cowardly refusing to rsync with rootdir == %s' %
                    synctool.param.ROOTDIR)
            sys.exit(-1)

        synctool.lib.run_with_nodename(cmd_arr, nodename)

        # delete temp file
        try:
            os.unlink(tmp_filename)
        except OSError:
            # silently ignore unlink error
            pass

    # run 'ssh node synctool_cmd'
    cmd_arr = ssh_cmd_arr[:]
    cmd_arr.append('--')
    cmd_arr.append(addr)
    cmd_arr.extend(shlex.split(synctool.param.SYNCTOOL_CMD))
    cmd_arr.append('--nodename=%s' % nodename)
    cmd_arr.extend(PASS_ARGS)

    verbose('running synctool on node %s' % nodename)
    synctool.lib.run_with_nodename(cmd_arr, nodename)
Example #7
0
def purge_files():
    # type: () -> None
    '''run the purge function'''

    paths = []
    purge_groups = os.listdir(param.PURGE_DIR)

    # find the source purge paths that we need to copy
    # scan only the group dirs that apply
    for g in param.MY_GROUPS:
        if g in purge_groups:
            purge_root = os.path.join(param.PURGE_DIR, g)
            if not os.path.isdir(purge_root):
                continue

            for path, subdirs, files in os.walk(purge_root):
                # rsync only purge dirs that actually contain files
                # otherwise rsync --delete would wreak havoc
                if not files:
                    continue

                if path == purge_root:
                    # root contains files; guard against user mistakes
                    # rsync --delete would destroy the whole filesystem
                    warning('cowardly refusing to purge the root directory')
                    stderr('please remove any files directly under %s/' %
                           prettypath(purge_root))
                    return

                # paths has (src_dir, dest_dir)
                paths.append((path, path[len(purge_root):]))

                # do not recurse into this dir any deeper
                del subdirs[:]

    cmd_rsync, opts_string = _make_rsync_purge_cmd()

    # call rsync to copy the purge dirs
    for src, dest in paths:
        # trailing slash on source path is important for rsync
        src += os.sep
        dest += os.sep

        cmd_arr = cmd_rsync[:]
        cmd_arr.append(src)
        cmd_arr.append(dest)

        verbose('running rsync%s%s %s' % (opts_string, prettypath(src), dest))
        _run_rsync_purge(cmd_arr)
Example #8
0
def main():
    # type: () -> None
    '''run the program'''

    synctool.param.init()

    get_options()

    synctool.lib.QUIET = not synctool.lib.VERBOSE

    if synctool.param.NODENAME in synctool.param.IGNORE_GROUPS:
        # this is only a warning ...
        # you can still run synctool-pkg on the client by hand
        warning('warning: node %s is disabled in the config file' %
                synctool.param.NODENAME)

    pkg = package_manager()

    if ACTION == ACTION_LIST:
        pkg.list(PKG_LIST)

    elif ACTION == ACTION_INSTALL:
        pkg.install(PKG_LIST)

    elif ACTION == ACTION_REMOVE:
        pkg.remove(PKG_LIST)

    elif ACTION == ACTION_UPDATE:
        pkg.update()

    elif ACTION == ACTION_UPGRADE:
        pkg.upgrade()

    elif ACTION == ACTION_CLEAN:
        pkg.clean()

    else:
        raise RuntimeError('BUG: unknown ACTION code %d' % ACTION)
Example #9
0
def main():
    # type: () -> None
    '''run the program'''

    synctool.param.init()

    get_options()

    synctool.lib.QUIET = not synctool.lib.VERBOSE

    if synctool.param.NODENAME in synctool.param.IGNORE_GROUPS:
        # this is only a warning ...
        # you can still run synctool-pkg on the client by hand
        warning('warning: node %s is disabled in the config file' %
                synctool.param.NODENAME)

    pkg = package_manager()

    if ACTION == ACTION_LIST:
        pkg.list(PKG_LIST)

    elif ACTION == ACTION_INSTALL:
        pkg.install(PKG_LIST)

    elif ACTION == ACTION_REMOVE:
        pkg.remove(PKG_LIST)

    elif ACTION == ACTION_UPDATE:
        pkg.update()

    elif ACTION == ACTION_UPGRADE:
        pkg.upgrade()

    elif ACTION == ACTION_CLEAN:
        pkg.clean()

    else:
        raise RuntimeError('BUG: unknown ACTION code %d' % ACTION)
Example #10
0
def main():
    # type: () -> None
    '''run the program'''

    param.init()

    action = get_options()

    config.init_mynodename()

    if not param.NODENAME:
        error('unable to determine my nodename (hostname: %s)' %
              param.HOSTNAME)
        stderr('please check %s' % param.CONF_FILE)
        sys.exit(-1)

    if param.NODENAME not in param.NODES:
        error("unknown node '%s'" % param.NODENAME)
        stderr('please check %s' % param.CONF_FILE)
        sys.exit(-1)

    if param.NODENAME in param.IGNORE_GROUPS:
        # this is only a warning ...
        # you can still run synctool-pkg on the client by hand
        warning('node %s is disabled in %s' %
                (param.NODENAME, param.CONF_FILE))

    if synctool.lib.UNIX_CMD:
        t = time.localtime(time.time())

        unix_out('#')
        unix_out('# script generated by synctool on '
                 '%04d/%02d/%02d %02d:%02d:%02d' %
                 (t[0], t[1], t[2], t[3], t[4], t[5]))
        unix_out('#')
        unix_out('# my hostname: %s' % param.HOSTNAME)
        unix_out('# SYNCTOOL_NODE=%s' % param.NODENAME)
        unix_out('# SYNCTOOL_ROOT=%s' % param.ROOTDIR)
        unix_out('#')

        if not synctool.lib.DRY_RUN:
            unix_out('# NOTE: --fix specified, applying updates')
            unix_out('#')

        unix_out('')
    else:
        if not synctool.lib.MASTERLOG:
            # only print this when running stand-alone
            if not synctool.lib.QUIET:
                if synctool.lib.DRY_RUN:
                    stdout('DRY RUN, not doing any updates')
                    terse(synctool.lib.TERSE_DRYRUN, 'not doing any updates')
                else:
                    stdout('--fix specified, applying changes')
                    terse(synctool.lib.TERSE_FIXING, ' applying changes')

            else:
                if synctool.lib.DRY_RUN:
                    verbose('DRY RUN, not doing any updates')
                else:
                    verbose('--fix specified, applying changes')

        verbose('my nodename: %s' % param.NODENAME)
        verbose('my hostname: %s' % param.HOSTNAME)
        verbose('rootdir: %s' % param.ROOTDIR)

    os.environ['SYNCTOOL_NODE'] = param.NODENAME
    os.environ['SYNCTOOL_ROOT'] = param.ROOTDIR

    unix_out('umask 077')
    unix_out('')
    os.umask(077)

    if action == ACTION_DIFF:
        diff_files()

    elif action == ACTION_REFERENCE:
        reference_files()

    elif action == ACTION_ERASE_SAVED:
        if SINGLE_FILES:
            single_erase_saved()
        else:
            erase_saved()

    elif SINGLE_FILES:
        single_files()

    else:
        purge_files()
        overlay_files()
        delete_files()

    unix_out('# EOB')
Example #11
0
def generate_template(obj, post_dict):
    # type: (SyncObject, Dict[str, str]) -> bool
    '''run template .post script, generating a new file
    The script will run in the source dir (overlay tree) and
    it will run even in dry-run mode
    Returns: True or False on error
    '''

    # Note: this func modifies input parameter 'obj'
    # when it succesfully generates output, it will change obj's paths
    # and it will be picked up again in overlay._walk_subtree()

    if synctool.lib.NO_POST:
        verbose('skipping template generation of %s' % obj.src_path)
        obj.ov_type = synctool.overlay.OV_IGNORE
        return True

    if SINGLE_FILES and obj.dest_path not in SINGLE_FILES:
        verbose('skipping template generation of %s' % obj.src_path)
        obj.ov_type = synctool.overlay.OV_IGNORE
        return True

    verbose('generating template %s' % obj.print_src())

    src_dir = os.path.dirname(obj.src_path)
    newname = os.path.join(src_dir, os.path.basename(obj.dest_path))
    template = newname + '._template'
    # add most important extension
    newname += '._' + param.NODENAME

    verbose('generating template as %s' % newname)

    statbuf = synctool.syncstat.SyncStat(newname)
    if statbuf.exists():
        verbose('template destination %s already exists' % newname)

        if param.SYNC_TIMES and statbuf.mtime != obj.src_stat.mtime:
            # force the mtime of the template onto the existing output
            verbose('forcing mtime %s => %s' % (obj.src_path, newname))
            synctool.lib.set_filetimes(newname, statbuf.atime,
                                       obj.src_stat.mtime)

        # modify the object; set new src and dest filenames
        # later, visit() will call obj.make(), which will make full paths
        obj.src_path = newname
        obj.dest_path = os.path.basename(obj.dest_path)
        return True

    # get the .post script for the template file
    if template not in post_dict:
        if param.TERSE:
            terse(synctool.lib.TERSE_ERROR, 'no .post %s' % obj.src_path)
        else:
            error('template generator for %s not found' % obj.src_path)
        return False

    generator = post_dict[template]

    # chdir to source directory
    # Note: the change dir is not really needed
    # but the documentation promises that .post scripts run in
    # the dir where the new file will be put
    verbose('  os.chdir(%s)' % src_dir)
    unix_out('cd %s' % src_dir)
    cwd = os.getcwd()
    try:
        os.chdir(src_dir)
    except OSError as err:
        if param.TERSE:
            terse(synctool.lib.TERSE_ERROR, 'chdir %s' % src_dir)
        else:
            error('failed to change directory to %s: %s' % (src_dir,
                                                            err.strerror))
        return False

    # temporarily restore original umask
    # so the script runs with the umask set by the sysadmin
    os.umask(param.ORIG_UMASK)

    # run the script
    # pass template and newname as "$1" and "$2"
    cmd_arr = [generator, obj.src_path, newname]
    verbose('  os.system(%s, %s, %s)' % (prettypath(cmd_arr[0]),
                                         cmd_arr[1], cmd_arr[2]))
    unix_out('# run command %s' % os.path.basename(cmd_arr[0]))

    have_error = False
    if synctool.lib.exec_command(cmd_arr) == -1:
        have_error = True

    statbuf = synctool.syncstat.SyncStat(newname)
    if not statbuf.exists():
        if not have_error:
            if param.TERSE:
                terse(synctool.lib.TERSE_WARNING, 'no output %s' % newname)
            else:
                warning('expected output %s was not generated' % newname)
            obj.ov_type = synctool.overlay.OV_IGNORE
        else:
            # an error message was already printed when exec() failed earlier
            # so, only when --verbose is used, print additional debug info
            verbose('error: expected output %s was not generated' % newname)
    else:
        verbose('found generated output %s' % newname)
        if param.SYNC_TIMES:
            # force the mtime of the template onto the generated output
            verbose('forcing mtime %s => %s' % (obj.src_path, newname))
            synctool.lib.set_filetimes(newname, statbuf.atime,
                                       obj.src_stat.mtime)

    os.umask(077)

    # chdir back to original location
    # chdir to source directory
    verbose('  os.chdir(%s)' % cwd)
    unix_out('cd %s' % cwd)
    try:
        os.chdir(cwd)
    except OSError as err:
        if param.TERSE:
            terse(synctool.lib.TERSE_ERROR, 'chdir %s' % src_dir)
        else:
            error('failed to change directory to %s: %s' % (cwd,
                                                            err.strerror))
        return False

    if have_error:
        return False

    # modify the object; set new src and dest filenames
    # later, visit() will call obj.make(), which will make full paths
    obj.src_path = newname
    obj.dest_path = os.path.basename(obj.dest_path)
    return True
Example #12
0
    def addresses(self, silent=False):
        '''return list of addresses of relevant nodes'''

        # by default, work on default_nodeset
        if not self.nodelist and not self.grouplist:
            if not param.DEFAULT_NODESET:
                return []

            self.nodelist = param.DEFAULT_NODESET

        # check if the nodes exist at all
        # the user may have given bogus names
        all_nodes = set(config.get_all_nodes())
        unknown = (self.nodelist | self.exclude_nodes) - all_nodes
        if len(unknown) > 0:
            # it's nice to display "the first" unknown node
            # (at least, for numbered nodes)
            arr = list(unknown)
            arr.sort()
            stderr("no such node '%s'" % arr[0])
            return None

        # check if the groups exist at all
        unknown = ((self.grouplist | self.exclude_groups) - param.ALL_GROUPS)
        for group in unknown:
            stderr("no such group '%s'" % group)
            return None

        self.nodelist |= config.get_nodes_in_groups(self.grouplist)
        self.exclude_nodes |= config.get_nodes_in_groups(self.exclude_groups)
        # remove excluded nodes from nodelist
        self.nodelist -= self.exclude_nodes

        if not self.nodelist:
            return []

        addrs = []

        ignored_nodes = self.nodelist & param.IGNORE_GROUPS
        self.nodelist -= ignored_nodes

        for node in self.nodelist:
            # ignoring a group results in also ignoring the node
            my_groups = set(config.get_groups(node))
            my_groups &= param.IGNORE_GROUPS
            if len(my_groups) > 0:
                verbose('node %s is ignored due to an ignored group' % node)
                ignored_nodes.add(node)
                continue

            addr = config.get_node_ipaddress(node)
            self.namemap[addr] = node

            # make sure we do not have duplicates
            if addr not in addrs:
                addrs.append(addr)

        # print message about ignored nodes
        if not silent and len(ignored_nodes) > 0 and not synctool.lib.QUIET:
            if param.TERSE:
                synctool.lib.terse(synctool.lib.TERSE_WARNING, 'ignored nodes')
            else:
                arr = list(ignored_nodes)
                arr.sort()
                ignored_str = ('ignored: ' + synctool.range.compress(arr))
                if len(ignored_str) < 70:
                    warning(ignored_str)
                else:
                    warning('some nodes are ignored')
                    if synctool.lib.VERBOSE:
                        for node in ignored_nodes:
                            verbose('ignored: %s' % node)

        return addrs
Example #13
0
    def addresses(self, silent=False):
        '''return list of addresses of relevant nodes'''

        # by default, work on default_nodeset
        if not self.nodelist and not self.grouplist:
            if not param.DEFAULT_NODESET:
                return []

            self.nodelist = param.DEFAULT_NODESET

        # check if the nodes exist at all
        # the user may have given bogus names
        all_nodes = set(config.get_all_nodes())
        unknown = (self.nodelist | self.exclude_nodes) - all_nodes
        if len(unknown) > 0:
            # it's nice to display "the first" unknown node
            # (at least, for numbered nodes)
            arr = list(unknown)
            arr.sort()
            stderr("no such node '%s'" % arr[0])
            return None

        # check if the groups exist at all
        unknown = ((self.grouplist | self.exclude_groups) -
                   param.ALL_GROUPS)
        for group in unknown:
            stderr("no such group '%s'" % group)
            return None

        self.nodelist |= config.get_nodes_in_groups(self.grouplist)
        self.exclude_nodes |= config.get_nodes_in_groups(self.exclude_groups)
        # remove excluded nodes from nodelist
        self.nodelist -= self.exclude_nodes

        if not self.nodelist:
            return []

        addrs = []

        ignored_nodes = self.nodelist & param.IGNORE_GROUPS
        self.nodelist -= ignored_nodes

        for node in self.nodelist:
            # ignoring a group results in also ignoring the node
            my_groups = set(config.get_groups(node))
            my_groups &= param.IGNORE_GROUPS
            if len(my_groups) > 0:
                verbose('node %s is ignored due to an ignored group' % node)
                ignored_nodes.add(node)
                continue

            addr = config.get_node_ipaddress(node)
            self.namemap[addr] = node

            # make sure we do not have duplicates
            if addr not in addrs:
                addrs.append(addr)

        # print message about ignored nodes
        if not silent and len(ignored_nodes) > 0 and not synctool.lib.QUIET:
            if param.TERSE:
                synctool.lib.terse(synctool.lib.TERSE_WARNING,
                                   'ignored nodes')
            else:
                arr = list(ignored_nodes)
                arr.sort()
                ignored_str = ('ignored: ' + synctool.range.compress(arr))
                if len(ignored_str) < 70:
                    warning(ignored_str)
                else:
                    warning('some nodes are ignored')
                    if synctool.lib.VERBOSE:
                        for node in ignored_nodes:
                            verbose('ignored: %s' % node)

        return addrs
Example #14
0
def _makedir(path, remote_stats):
    '''make directory in repository, copying over mode and ownership
    of the directories as they are on the remote side
    remote_stats is array holding stat info of the remote side
    Returns True on success, False on error

    Note that this function creates directories even if the remote
    path component may be a symbolic link
    '''

    if not path or not remote_stats:
        error("recursion too deep")
        return False

    if synctool.lib.path_exists(path):
        return True

    verbose('_makedir %s %r' % (path, remote_stats))

    # recursively make parent directory
    if not _makedir(os.path.dirname(path), remote_stats[1:]):
        return False

    # do a simple check against the names of the dir
    # (are we still 'in sync' with remote_stats?)
    basename = os.path.basename(path)
    remote_basename = os.path.basename(remote_stats[0].filename)
    if remote_basename and basename != remote_basename:
        error("out of sync with remote stat information, I'm lost")
        return False

    # temporarily restore admin's umask
    mask = os.umask(synctool.param.ORIG_UMASK)
    mode = remote_stats[0].mode & 0777
    try:
        os.mkdir(path, mode)
    except OSError as err:
        error('failed to create directory %s: %s' % (path, err.strerror))
        os.umask(mask)
        return False
    else:
        unix_out('mkdir -p -m %04o %s' % (mode, path))

    os.umask(mask)

    # the mkdir mode is affected by umask
    # so set the mode the way we want it
    try:
        os.chmod(path, mode)
    except OSError as err:
        warning('failed to chmod %04o %s: %s' % (mode, path, err.strerror))

    # also set the owner & group
    # uid/gid are translated from remote owner/group,
    # unless --numeric-ids is wanted
    rsync_cmd_arr = shlex.split(synctool.param.RSYNC_CMD)
    if '--numeric-ids' in rsync_cmd_arr:
        uid = remote_stats[0].uid
        gid = remote_stats[0].gid
    else:
        uid = remote_stats[0].translate_uid()
        gid = remote_stats[0].translate_gid()
    try:
        os.lchown(path, uid, gid)
    except OSError as err:
        warning('failed to chown %s.%s %s: %s' %
                (synctool.pwdgrp.pw_name(uid), synctool.pwdgrp.grp_name(gid),
                 path, err.strerror))

    return True
Example #15
0
def detect_installer():
    """Attempt to detect the operating system and package system
    Returns instance of a SyncPkg installer class
    """

    # attempt a best effort at detecting OSes for the purpose of
    # choosing a package manager
    # It's probably not 100% fool-proof, but as said, it's a best effort
    #
    # Problems:
    # - there are too many platforms and too many Linux distros
    # - there are too many different packaging systems
    # - there are RedHat variants that all have /etc/redhat-release but
    #   use different package managers
    # - SuSE has three (!) package managers that are all in use
    #   and it seems to be by design (!?)
    # - I've seen apt-get work with dpkg, and/or with rpm
    # - OS X has no 'standard' software packaging (the App store??)
    #   There are ports, fink, brew. I prefer 'brew'
    # - The *BSDs have both pkg_add and ports
    # - FreeBSD has freebsd-update to upgrade packages

    platform = os.uname()[0]

    if platform == "Linux":
        verbose("detected platform Linux")

        stat = synctool.syncstat.SyncStat()

        # use release file to detect Linux distro,
        # and choose package manager based on that

        for (release_file, pkgmgr) in LINUX_PACKAGE_MANAGERS:
            stat.stat(release_file)
            if stat.exists():
                verbose("detected %s" % release_file)
                verbose("choosing package manager %s" % pkgmgr)
                synctool.param.PACKAGE_MANAGER = pkgmgr
                return

        warning("unknown Linux distribution")

    elif platform == "Darwin":  # assume OS X
        verbose("detected platform OS X")
        # some people like port
        # some people like fink
        # I like homebrew
        verbose("choosing package manager brew")
        synctool.param.PACKAGE_MANAGER = "brew"

    elif platform in ("NetBSD", "OpenBSD", "FreeBSD"):
        verbose("detected platform %s" % platform)

        # choose bsdpkg
        # I know there are ports, but you can 'make' those easily in *BSD
        # or maybe ports will be a seperate module in the future

        verbose("choosing package manager bsdpkg")
        synctool.param.PACKAGE_MANAGER = "bsdpkg"

    # platforms that are not supported yet, but I would like to support
    # or well, most of them
    # Want to know more OSes? See the source of autoconf's config.guess

    elif platform in (
        "4.4BSD",
        "4.3bsd",
        "BSD/OS",
        "SunOS",
        "AIX",
        "OSF1",
        "HP-UX",
        "HI-UX",
        "IRIX",
        "UNICOS",
        "UNICOS/mp",
        "ConvexOS",
        "Minix",
        "Windows_95",
        "Windows_NT",
        "CYGWIN",
        "MinGW",
        "LynxOS",
        "UNIX_System_V",
        "BeOS",
        "TOPS-10",
        "TOPS-20",
    ):
        verbose("detected platform %s" % platform)
        warning("synctool package management under %s is not yet supported" % platform)

    else:
        warning("unknown platform '%s'" % platform)
Example #16
0
def detect_installer():
    '''Attempt to detect the operating system and package system
    Returns instance of a SyncPkg installer class
    '''

    # attempt a best effort at detecting OSes for the purpose of
    # choosing a package manager
    # It's probably not 100% fool-proof, but as said, it's a best effort
    #
    # Problems:
    # - there are too many platforms and too many Linux distros
    # - there are too many different packaging systems
    # - there are RedHat variants that all have /etc/redhat-release but
    #   use different package managers
    # - SuSE has three (!) package managers that are all in use
    #   and it seems to be by design (!?)
    # - I've seen apt-get work with dpkg, and/or with rpm
    # - OS X has no 'standard' software packaging (the App store??)
    #   There are ports, fink, brew. I prefer 'brew'
    # - The *BSDs have both pkg_add and ports
    # - FreeBSD has freebsd-update to upgrade packages

    platform = os.uname()[0]

    if platform == 'Linux':
        verbose('detected platform Linux')

        stat = synctool.syncstat.SyncStat()

        # use release file to detect Linux distro,
        # and choose package manager based on that

        for (release_file, pkgmgr) in LINUX_PACKAGE_MANAGERS:
            stat.stat(release_file)
            if stat.exists():
                verbose('detected %s' % release_file)
                verbose('choosing package manager %s' % pkgmgr)
                synctool.param.PACKAGE_MANAGER = pkgmgr
                return

        warning('unknown Linux distribution')

    elif platform == 'Darwin':            # assume OS X
        verbose('detected platform OS X')
        # some people like port
        # some people like fink
        # I like homebrew
        verbose('choosing package manager brew')
        synctool.param.PACKAGE_MANAGER = 'brew'

    elif platform in ('NetBSD', 'OpenBSD', 'FreeBSD'):
        verbose('detected platform %s' % platform)

        # choose bsdpkg
        # I know there are ports, but you can 'make' those easily in *BSD
        # or maybe ports will be a seperate module in the future

        verbose('choosing package manager bsdpkg')
        synctool.param.PACKAGE_MANAGER = 'bsdpkg'

    # platforms that are not supported yet, but I would like to support
    # or well, most of them
    # Want to know more OSes? See the source of autoconf's config.guess

    elif platform in ('4.4BSD', '4.3bsd', 'BSD/OS', 'SunOS', 'AIX', 'OSF1',
        'HP-UX', 'HI-UX', 'IRIX', 'UNICOS', 'UNICOS/mp', 'ConvexOS', 'Minix',
        'Windows_95', 'Windows_NT', 'CYGWIN', 'MinGW',
        'LynxOS', 'UNIX_System_V', 'BeOS', 'TOPS-10', 'TOPS-20'):
        verbose('detected platform %s' % platform)
        warning('synctool package management under %s is not yet supported' %
                platform)

    else:
        warning("unknown platform '%s'" % platform)
Example #17
0
def _walk_subtree(src_dir, dest_dir, duplicates, callback):
    '''walk subtree under overlay/group/
    duplicates is a set that keeps us from selecting any duplicate matches
    Returns pair of booleans: ok, dir was updated
    '''

#    verbose('_walk_subtree(%s)' % src_dir)

    arr = []
    for entry in os.listdir(src_dir):
        if entry in synctool.param.IGNORE_FILES:
            verbose('ignoring %s' % prettypath(os.path.join(src_dir, entry)))
            continue

        # check any ignored files with wildcards
        # before any group extension is examined
        wildcard_match = False
        for wildcard_entry in synctool.param.IGNORE_FILES_WITH_WILDCARDS:
            if fnmatch.fnmatchcase(entry, wildcard_entry):
                wildcard_match = True
                verbose('ignoring %s (pattern match)' %
                        prettypath(os.path.join(src_dir, entry)))
                break

        if wildcard_match:
            continue

        obj, importance = _split_extension(entry, src_dir)
        if not obj:
            continue

        arr.append((obj, importance))

    # sort with .pre and .post scripts first
    # this ensures that post_dict will have the required script when needed
    arr.sort(_sort_by_importance_post_first)

    pre_dict = {}
    post_dict = {}
    dir_changed = False

    for obj, importance in arr:
        obj.make(src_dir, dest_dir)

        if obj.ov_type == OV_PRE:
            # register the .pre script and continue
            if obj.dest_path in pre_dict:
                continue

            pre_dict[obj.dest_path] = obj.src_path
            continue

        if obj.ov_type == OV_POST:
            # register the .post script and continue
            if obj.dest_path in post_dict:
                continue

            post_dict[obj.dest_path] = obj.src_path
            continue

        if obj.ov_type == OV_TEMPLATE_POST:
            # register the template generator and continue
            # put the dest for the template in the overlay (source) dir
            obj.dest_path = os.path.join(os.path.dirname(obj.src_path),
                                         os.path.basename(obj.dest_path))
            if obj.dest_path in post_dict:
                continue

            post_dict[obj.dest_path] = obj.src_path
            continue

        if obj.src_stat.is_dir():
            if synctool.param.IGNORE_DOTDIRS:
                name = os.path.basename(obj.src_path)
                if name[0] == '.':
                    verbose('ignoring dotdir %s' % obj.print_src())
                    continue

            updated = False
            if obj.dest_path not in duplicates:
                # this is the most important source for this dir
                duplicates.add(obj.dest_path)

                # run callback on the directory itself
                # this will create or fix directory entry if needed
                # a .pre script may be run
                # a .post script should not be run
                ok, updated = callback(obj, pre_dict, {})
                if not ok:
                    # quick exit
                    return False, dir_changed

            # recurse down into the directory
            # with empty pre_dict and post_dict parameters
            ok, updated2 = _walk_subtree(obj.src_path, obj.dest_path,
                                         duplicates, callback)
            if not ok:
                # quick exit
                return False, dir_changed

            # we still need to run the .post script on the dir (if any)
            if updated or updated2:
                obj.run_script(post_dict)

            # finished checking directory
            continue

        if synctool.param.IGNORE_DOTFILES:
            name = os.path.basename(obj.src_path)
            if name[0] == '.':
                verbose('ignoring dotfile %s' % obj.print_src())
                continue

        if synctool.param.REQUIRE_EXTENSION and obj.ov_type == OV_NO_EXT:
            if synctool.param.TERSE:
                terse(synctool.lib.TERSE_ERROR, ('no group on %s' %
                                                 obj.src_path))
            else:
                warning('no group extension on %s, skipped' % obj.print_src())
            continue

        if obj.dest_path in duplicates:
            # there already was a more important source for this destination
            continue

        duplicates.add(obj.dest_path)

        ok, updated = callback(obj, pre_dict, post_dict)
        if not ok:
            # quick exit
            return False, dir_changed

        if obj.ov_type == OV_IGNORE:
            # OV_IGNORE may be set by templates that didn't finish
            continue

        if obj.ov_type == OV_TEMPLATE:
            # a new file was generated
            # call callback on the generated file
            obj.ov_type = OV_REG
            obj.make(src_dir, dest_dir)

            ok, updated = callback(obj, pre_dict, post_dict)
            if not ok:
                # quick exit
                return False, dir_changed

        dir_changed |= updated

    return True, dir_changed
Example #18
0
def _walk_subtree(src_dir, dest_dir, duplicates, callback):
    '''walk subtree under overlay/group/
    duplicates is a set that keeps us from selecting any duplicate matches
    Returns pair of booleans: ok, dir was updated
    '''

    #    verbose('_walk_subtree(%s)' % src_dir)

    arr = []
    for entry in os.listdir(src_dir):
        if entry in synctool.param.IGNORE_FILES:
            verbose('ignoring %s' % prettypath(os.path.join(src_dir, entry)))
            continue

        # check any ignored files with wildcards
        # before any group extension is examined
        wildcard_match = False
        for wildcard_entry in synctool.param.IGNORE_FILES_WITH_WILDCARDS:
            if fnmatch.fnmatchcase(entry, wildcard_entry):
                wildcard_match = True
                verbose('ignoring %s (pattern match)' %
                        prettypath(os.path.join(src_dir, entry)))
                break

        if wildcard_match:
            continue

        obj, importance = _split_extension(entry, src_dir)
        if not obj:
            continue

        arr.append((obj, importance))

    # sort with .pre and .post scripts first
    # this ensures that post_dict will have the required script when needed
    arr.sort(_sort_by_importance_post_first)

    pre_dict = {}
    post_dict = {}
    dir_changed = False

    for obj, importance in arr:
        obj.make(src_dir, dest_dir)

        if obj.ov_type == OV_PRE:
            # register the .pre script and continue
            if obj.dest_path in pre_dict:
                continue

            pre_dict[obj.dest_path] = obj.src_path
            continue

        if obj.ov_type == OV_POST:
            # register the .post script and continue
            if obj.dest_path in post_dict:
                continue

            post_dict[obj.dest_path] = obj.src_path
            continue

        if obj.ov_type == OV_TEMPLATE_POST:
            # register the template generator and continue
            # put the dest for the template in the overlay (source) dir
            obj.dest_path = os.path.join(os.path.dirname(obj.src_path),
                                         os.path.basename(obj.dest_path))
            if obj.dest_path in post_dict:
                continue

            post_dict[obj.dest_path] = obj.src_path
            continue

        if obj.src_stat.is_dir():
            if synctool.param.IGNORE_DOTDIRS:
                name = os.path.basename(obj.src_path)
                if name[0] == '.':
                    verbose('ignoring dotdir %s' % obj.print_src())
                    continue

            updated = False
            if obj.dest_path not in duplicates:
                # this is the most important source for this dir
                duplicates.add(obj.dest_path)

                # run callback on the directory itself
                # this will create or fix directory entry if needed
                # a .pre script may be run
                # a .post script should not be run
                ok, updated = callback(obj, pre_dict, {})
                if not ok:
                    # quick exit
                    return False, dir_changed

            # recurse down into the directory
            # with empty pre_dict and post_dict parameters
            ok, updated2 = _walk_subtree(obj.src_path, obj.dest_path,
                                         duplicates, callback)
            if not ok:
                # quick exit
                return False, dir_changed

            # we still need to run the .post script on the dir (if any)
            if updated or updated2:
                obj.run_script(post_dict)

            # finished checking directory
            continue

        if synctool.param.IGNORE_DOTFILES:
            name = os.path.basename(obj.src_path)
            if name[0] == '.':
                verbose('ignoring dotfile %s' % obj.print_src())
                continue

        if synctool.param.REQUIRE_EXTENSION and obj.ov_type == OV_NO_EXT:
            if synctool.param.TERSE:
                terse(synctool.lib.TERSE_ERROR,
                      ('no group on %s' % obj.src_path))
            else:
                warning('no group extension on %s, skipped' % obj.print_src())
            continue

        if obj.dest_path in duplicates:
            # there already was a more important source for this destination
            continue

        duplicates.add(obj.dest_path)

        ok, updated = callback(obj, pre_dict, post_dict)
        if not ok:
            # quick exit
            return False, dir_changed

        if obj.ov_type == OV_IGNORE:
            # OV_IGNORE may be set by templates that didn't finish
            continue

        if obj.ov_type == OV_TEMPLATE:
            # a new file was generated
            # call callback on the generated file
            obj.ov_type = OV_REG
            obj.make(src_dir, dest_dir)

            ok, updated = callback(obj, pre_dict, post_dict)
            if not ok:
                # quick exit
                return False, dir_changed

        dir_changed |= updated

    return True, dir_changed
Example #19
0
def setup_master(node_list, persist):
    '''setup master connections to all nodes in node_list
    node_list is a list of pairs: (addr, nodename)
    Argument 'persist' is the SSH ControlPersist parameter
    Returns True on success, False on error
    '''

    detect_ssh()
    if SSH_VERSION < 39:
        error('unsupported version of ssh')
        return False

    if persist == 'none':
        persist = None

    procs = []

    ssh_cmd_arr = shlex.split(synctool.param.SSH_CMD)
    ssh_cmd_arr.extend(['-M', '-N', '-n'])
    if SSH_VERSION >= 56 and persist is not None:
        ssh_cmd_arr.extend(['-o', 'ControlPersist=' + persist])

    verbose('spawning ssh master connections')
    errors = 0
    for addr, nodename in node_list:
        control_path = _make_control_path(nodename)
        if not control_path:
            # error message already printed
            return False

        # see if the control path already exists
        statbuf = synctool.syncstat.SyncStat(control_path)
        if statbuf.exists():
            if not statbuf.is_sock():
                warning('control path %s: not a socket file' % control_path)
                errors += 1
                continue

            if statbuf.uid != os.getuid():
                warning('control path: %s: incorrect owner uid %u' %
                        (control_path, statbuf.uid))
                errors += 1
                continue

            if statbuf.mode & 077 != 0:
                warning('control path %s: suspicious file mode %04o' %
                        (control_path, statbuf.mode & 0777))
                errors += 1
                continue

            verbose('control path %s already exists' % control_path)
            continue

        # start ssh in master mode to create a new control path
        verbose('creating master control path to %s' % nodename)

        cmd_arr = ssh_cmd_arr[:]
        cmd_arr.extend(['-o', 'ControlPath=' + control_path, '--', addr])

        # start in background
        unix_out(' '.join(cmd_arr))
        try:
            proc = subprocess.Popen(cmd_arr, shell=False)
        except OSError as err:
            error('failed to execute %s: %s' % (cmd_arr[0], err.strerror))
            errors += 1
            continue

        procs.append(proc)
Example #20
0
def _split_extension(filename, src_dir):
    '''filename in the overlay tree, without leading path
    src_dir is passed for the purpose of printing error messages
    Returns tuple: SyncObject, importance
    '''

    (name, ext) = os.path.splitext(filename)
    if not ext:
        return SyncObject(filename, name, OV_NO_EXT), _group_all()

    if ext == '.pre':
        # it's a generic .pre script
        return SyncObject(filename, name, OV_PRE), _group_all()

    if ext == '.post':
        (name2, ext) = os.path.splitext(name)
        if ext == '._template':
            # it's a generic template generator
            return SyncObject(filename, name, OV_TEMPLATE_POST), _group_all()

        # it's a generic .post script
        return SyncObject(filename, name, OV_POST), _group_all()

    if ext[:2] != '._':
        return SyncObject(filename, filename, OV_NO_EXT), _group_all()

    ext = ext[2:]
    if not ext:
        return SyncObject(filename, filename, OV_NO_EXT), _group_all()

    if ext == 'template':
        return SyncObject(filename, name, OV_TEMPLATE), _group_all()

    try:
        importance = synctool.param.MY_GROUPS.index(ext)
    except ValueError:
        if ext not in synctool.param.ALL_GROUPS:
            src_path = os.path.join(src_dir, filename)
            if synctool.param.TERSE:
                terse(synctool.lib.TERSE_ERROR,
                      ('invalid group on %s' % src_path))
            else:
                warning('unknown group on %s, skipped' % prettypath(src_path))
            return None, -1

        # it is not one of my groups
        verbose('skipping %s, it is not one of my groups' %
                prettypath(os.path.join(src_dir, filename)))
        return None, -1

    (name2, ext) = os.path.splitext(name)

    if ext == '.pre':
        # register group-specific .pre script
        return SyncObject(filename, name2, OV_PRE), importance

    elif ext == '.post':
        _, ext = os.path.splitext(name2)
        if ext == '._template':
            # it's a group-specific template generator
            return (SyncObject(filename, name2, OV_TEMPLATE_POST), importance)

        # register group-specific .post script
        return SyncObject(filename, name2, OV_POST), importance

    elif ext == '._template':
        return SyncObject(filename, name2, OV_TEMPLATE), importance

    return SyncObject(filename, name), importance
Example #21
0
def _split_extension(filename, src_dir):
    '''filename in the overlay tree, without leading path
    src_dir is passed for the purpose of printing error messages
    Returns tuple: SyncObject, importance
    '''

    (name, ext) = os.path.splitext(filename)
    if not ext:
        return SyncObject(filename, name, OV_NO_EXT), _group_all()

    if ext == '.pre':
        # it's a generic .pre script
        return SyncObject(filename, name, OV_PRE), _group_all()

    if ext == '.post':
        (name2, ext) = os.path.splitext(name)
        if ext == '._template':
            # it's a generic template generator
            return SyncObject(filename, name, OV_TEMPLATE_POST), _group_all()

        # it's a generic .post script
        return SyncObject(filename, name, OV_POST), _group_all()

    if ext[:2] != '._':
        return SyncObject(filename, filename, OV_NO_EXT), _group_all()

    ext = ext[2:]
    if not ext:
        return SyncObject(filename, filename, OV_NO_EXT), _group_all()

    if ext == 'template':
        return SyncObject(filename, name, OV_TEMPLATE), _group_all()

    try:
        importance = synctool.param.MY_GROUPS.index(ext)
    except ValueError:
        if ext not in synctool.param.ALL_GROUPS:
            src_path = os.path.join(src_dir, filename)
            if synctool.param.TERSE:
                terse(synctool.lib.TERSE_ERROR, ('invalid group on %s' %
                                                 src_path))
            else:
                warning('unknown group on %s, skipped' % prettypath(src_path))
            return None, -1

        # it is not one of my groups
        verbose('skipping %s, it is not one of my groups' %
                prettypath(os.path.join(src_dir, filename)))
        return None, -1

    (name2, ext) = os.path.splitext(name)

    if ext == '.pre':
        # register group-specific .pre script
        return SyncObject(filename, name2, OV_PRE), importance

    elif ext == '.post':
        _, ext = os.path.splitext(name2)
        if ext == '._template':
            # it's a group-specific template generator
            return (SyncObject(filename, name2, OV_TEMPLATE_POST), importance)

        # register group-specific .post script
        return SyncObject(filename, name2, OV_POST), importance

    elif ext == '._template':
        return SyncObject(filename, name2, OV_TEMPLATE), importance

    return SyncObject(filename, name), importance
Example #22
0
def detect_installer():
    # type: () -> None
    '''Attempt to detect the operating system and package system'''

    # attempt a best effort at detecting OSes for the purpose of
    # choosing a package manager
    # It's probably not 100% fool-proof, but as said, it's a best effort
    #
    # Problems:
    # - there are too many platforms and too many Linux distros
    # - there are too many different packaging systems
    # - there are RedHat variants that all have /etc/redhat-release but
    #   use different package managers
    # - SuSE has three (!) package managers that are all in use
    #   and it seems to be by design (!?)
    # - I've seen apt-get work with dpkg, and/or with rpm
    # - OS X has no 'standard' software packaging (the App store??)
    #   There are ports, fink, brew. I prefer 'brew'
    # - FreeBSD has pkg and ports
    # - Most other BSDs have pkg_add and ports

    platform = os.uname()[0]

    if platform == 'Linux':
        verbose('detected platform Linux')

        stat = synctool.syncstat.SyncStat()

        # use release file to detect Linux distro,
        # and choose package manager based on that

        for (release_file, pkgmgr) in LINUX_PACKAGE_MANAGERS:
            stat.stat(release_file)
            if stat.exists():
                verbose('detected %s' % release_file)
                verbose('choosing package manager %s' % pkgmgr)
                synctool.param.PACKAGE_MANAGER = pkgmgr
                return

        warning('unknown Linux distribution')

    elif platform == 'Darwin':            # assume OS X
        verbose('detected platform OS X')
        # some people like port
        # some people like fink
        # I like homebrew
        verbose('choosing package manager brew')
        synctool.param.PACKAGE_MANAGER = 'brew'

    elif platform == 'FreeBSD':
        verbose('detected platform FreeBSD')
        synctool.param.PACKAGE_MANAGER = 'pkg'

    elif platform in ('NetBSD', 'OpenBSD'):
        verbose('detected platform %s' % platform)

        # choose bsdpkg
        # I know there are ports, but you can 'make' those easily in *BSD
        # or maybe ports will be a seperate module in the future

        verbose('choosing package manager bsdpkg')
        synctool.param.PACKAGE_MANAGER = 'bsdpkg'

    # platforms that are not supported yet, but I would like to support
    # or well, most of them
    # Want to know more OSes? See the source of autoconf's config.guess

    elif platform in ('4.4BSD', '4.3bsd', 'BSD/OS', 'SunOS', 'AIX', 'OSF1',
                      'HP-UX', 'HI-UX', 'IRIX', 'UNICOS', 'UNICOS/mp',
                      'ConvexOS', 'Minix', 'Windows_95', 'Windows_NT',
                      'CYGWIN', 'MinGW', 'LynxOS', 'UNIX_System_V', 'BeOS',
                      'TOPS-10', 'TOPS-20'):
        verbose('detected platform %s' % platform)
        warning('synctool package management under %s is not yet supported' %
                platform)

    else:
        warning("unknown platform '%s'" % platform)