def clear_lock(remote=None): ''' Clear update.lk ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. ''' def _do_clear_lock(repo): def _add_error(errlist, repo, exc): msg = ('Unable to remove update lock for {0} ({1}): {2} '.format( repo['url'], repo['lockfile'], exc)) log.debug(msg) errlist.append(msg) success = [] failed = [] if os.path.exists(repo['lockfile']): try: os.remove(repo['lockfile']) except OSError as exc: if exc.errno == errno.EISDIR: # Somehow this path is a directory. Should never happen # unless some wiseguy manually creates a directory at this # path, but just in case, handle it. try: shutil.rmtree(repo['lockfile']) except OSError as exc: _add_error(failed, repo, exc) else: _add_error(failed, repo, exc) else: msg = 'Removed lock for {0}'.format(repo['url']) log.debug(msg) success.append(msg) return success, failed if isinstance(remote, dict): return _do_clear_lock(remote) cleared = [] errors = [] for repo in init(): if remote: try: if remote not in repo['url']: continue except TypeError: # remote was non-string, try again if _text_type(remote) not in repo['url']: continue success, failed = _do_clear_lock(repo) cleared.extend(success) errors.extend(failed) return cleared, errors
def clear_lock(remote=None): ''' Clear update.lk ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. ''' def _do_clear_lock(repo): def _add_error(errlist, repo, exc): msg = ('Unable to remove update lock for {0} ({1}): {2} ' .format(repo['url'], repo['lockfile'], exc)) log.debug(msg) errlist.append(msg) success = [] failed = [] if os.path.exists(repo['lockfile']): try: os.remove(repo['lockfile']) except OSError as exc: if exc.errno == errno.EISDIR: # Somehow this path is a directory. Should never happen # unless some wiseguy manually creates a directory at this # path, but just in case, handle it. try: shutil.rmtree(repo['lockfile']) except OSError as exc: _add_error(failed, repo, exc) else: _add_error(failed, repo, exc) else: msg = 'Removed lock for {0}'.format(repo['url']) log.debug(msg) success.append(msg) return success, failed if isinstance(remote, dict): return _do_clear_lock(remote) cleared = [] errors = [] for repo in init(): if remote: try: if remote not in repo['url']: continue except TypeError: # remote was non-string, try again if _text_type(remote) not in repo['url']: continue success, failed = _do_clear_lock(repo) cleared.extend(success) errors.extend(failed) return cleared, errors
def lock(remote=None): ''' Place an update.lk ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. ''' def _do_lock(repo): success = [] failed = [] if not os.path.exists(repo['lockfile']): try: with salt.utils.fopen(repo['lockfile'], 'w+') as fp_: fp_.write('') except (IOError, OSError) as exc: msg = ('Unable to set update lock for {0} ({1}): {2} '.format( repo['url'], repo['lockfile'], exc)) log.debug(msg) failed.append(msg) else: msg = 'Set lock for {0}'.format(repo['url']) log.debug(msg) success.append(msg) return success, failed if isinstance(remote, dict): return _do_lock(remote) locked = [] errors = [] for repo in init(): if remote: try: if not fnmatch.fnmatch(repo['url'], remote): continue except TypeError: # remote was non-string, try again if not fnmatch.fnmatch(repo['url'], _text_type(remote)): continue success, failed = _do_lock(repo) locked.extend(success) errors.extend(failed) return locked, errors
def lock(remote=None): ''' Place an update.lk ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. ''' def _do_lock(repo): success = [] failed = [] if not os.path.exists(repo['lockfile']): try: with salt.utils.fopen(repo['lockfile'], 'w+') as fp_: fp_.write('') except (IOError, OSError) as exc: msg = ('Unable to set update lock for {0} ({1}): {2} ' .format(repo['url'], repo['lockfile'], exc)) log.debug(msg) failed.append(msg) else: msg = 'Set lock for {0}'.format(repo['url']) log.debug(msg) success.append(msg) return success, failed if isinstance(remote, dict): return _do_lock(remote) locked = [] errors = [] for repo in init(): if remote: try: if not fnmatch.fnmatch(repo['url'], remote): continue except TypeError: # remote was non-string, try again if not fnmatch.fnmatch(repo['url'], _text_type(remote)): continue success, failed = _do_lock(repo) locked.extend(success) errors.extend(failed) return locked, errors
def init(): ''' Return the list of svn remotes and their configuration information ''' bp_ = os.path.join(__opts__['cachedir'], 'svnfs') new_remote = False repos = [] per_remote_defaults = {} for param in PER_REMOTE_PARAMS: per_remote_defaults[param] = \ _text_type(__opts__['svnfs_{0}'.format(param)]) for remote in __opts__['svnfs_remotes']: repo_conf = copy.deepcopy(per_remote_defaults) if isinstance(remote, dict): repo_url = next(iter(remote)) per_remote_conf = dict([(key, _text_type(val)) for key, val in salt.utils.repack_dictlist( remote[repo_url]).items()]) if not per_remote_conf: log.error( 'Invalid per-remote configuration for remote {0}. If no ' 'per-remote parameters are being specified, there may be ' 'a trailing colon after the URL, which should be removed. ' 'Check the master configuration file.'.format(repo_url)) _failhard() per_remote_errors = False for param in (x for x in per_remote_conf if x not in PER_REMOTE_PARAMS): log.error( 'Invalid configuration parameter {0!r} for remote {1}. ' 'Valid parameters are: {2}. See the documentation for ' 'further information.'.format( param, repo_url, ', '.join(PER_REMOTE_PARAMS))) per_remote_errors = True if per_remote_errors: _failhard() repo_conf.update(per_remote_conf) else: repo_url = remote if not isinstance(repo_url, string_types): log.error( 'Invalid svnfs remote {0}. Remotes must be strings, you may ' 'need to enclose the URL in quotes'.format(repo_url)) _failhard() try: repo_conf['mountpoint'] = salt.utils.strip_proto( repo_conf['mountpoint']) except TypeError: # mountpoint not specified pass hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5')) repo_hash = hash_type(repo_url).hexdigest() rp_ = os.path.join(bp_, repo_hash) if not os.path.isdir(rp_): os.makedirs(rp_) if not os.listdir(rp_): # Only attempt a new checkout if the directory is empty. try: CLIENT.checkout(repo_url, rp_) repos.append(rp_) new_remote = True except pysvn._pysvn.ClientError as exc: log.error( 'Failed to initialize svnfs remote {0!r}: {1}'.format( repo_url, exc)) _failhard() else: # Confirm that there is an svn checkout at the necessary path by # running pysvn.Client().status() try: CLIENT.status(rp_) except pysvn._pysvn.ClientError as exc: log.error( 'Cache path {0} (corresponding remote: {1}) exists but is ' 'not a valid subversion checkout. You will need to ' 'manually delete this directory on the master to continue ' 'to use this svnfs remote.'.format(rp_, repo_url)) _failhard() repo_conf.update({ 'repo': rp_, 'url': repo_url, 'hash': repo_hash, 'cachedir': rp_, 'lockfile': os.path.join(rp_, 'update.lk') }) repos.append(repo_conf) if new_remote: remote_map = os.path.join(__opts__['cachedir'], 'svnfs/remote_map.txt') try: with salt.utils.fopen(remote_map, 'w+') as fp_: timestamp = datetime.now().strftime('%d %b %Y %H:%M:%S.%f') fp_.write('# svnfs_remote map as of {0}\n'.format(timestamp)) for repo_conf in repos: fp_.write('{0} = {1}\n'.format(repo_conf['hash'], repo_conf['url'])) except OSError: pass else: log.info('Wrote new svnfs_remote map to {0}'.format(remote_map)) return repos
def init(): ''' Return the list of svn remotes and their configuration information ''' bp_ = os.path.join(__opts__['cachedir'], 'svnfs') new_remote = False repos = [] per_remote_defaults = {} for param in PER_REMOTE_PARAMS: per_remote_defaults[param] = \ _text_type(__opts__['svnfs_{0}'.format(param)]) for remote in __opts__['svnfs_remotes']: repo_conf = copy.deepcopy(per_remote_defaults) if isinstance(remote, dict): repo_url = next(iter(remote)) per_remote_conf = dict( [(key, _text_type(val)) for key, val in salt.utils.repack_dictlist(remote[repo_url]).items()] ) if not per_remote_conf: log.error( 'Invalid per-remote configuration for remote {0}. If no ' 'per-remote parameters are being specified, there may be ' 'a trailing colon after the URL, which should be removed. ' 'Check the master configuration file.'.format(repo_url) ) _failhard() per_remote_errors = False for param in (x for x in per_remote_conf if x not in PER_REMOTE_PARAMS): log.error( 'Invalid configuration parameter {0!r} for remote {1}. ' 'Valid parameters are: {2}. See the documentation for ' 'further information.'.format( param, repo_url, ', '.join(PER_REMOTE_PARAMS) ) ) per_remote_errors = True if per_remote_errors: _failhard() repo_conf.update(per_remote_conf) else: repo_url = remote if not isinstance(repo_url, string_types): log.error( 'Invalid svnfs remote {0}. Remotes must be strings, you may ' 'need to enclose the URL in quotes'.format(repo_url) ) _failhard() try: repo_conf['mountpoint'] = salt.utils.strip_proto( repo_conf['mountpoint'] ) except TypeError: # mountpoint not specified pass hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5')) repo_hash = hash_type(repo_url).hexdigest() rp_ = os.path.join(bp_, repo_hash) if not os.path.isdir(rp_): os.makedirs(rp_) if not os.listdir(rp_): # Only attempt a new checkout if the directory is empty. try: CLIENT.checkout(repo_url, rp_) repos.append(rp_) new_remote = True except pysvn._pysvn.ClientError as exc: log.error( 'Failed to initialize svnfs remote {0!r}: {1}' .format(repo_url, exc) ) _failhard() else: # Confirm that there is an svn checkout at the necessary path by # running pysvn.Client().status() try: CLIENT.status(rp_) except pysvn._pysvn.ClientError as exc: log.error( 'Cache path {0} (corresponding remote: {1}) exists but is ' 'not a valid subversion checkout. You will need to ' 'manually delete this directory on the master to continue ' 'to use this svnfs remote.'.format(rp_, repo_url) ) _failhard() repo_conf.update({ 'repo': rp_, 'url': repo_url, 'hash': repo_hash, 'cachedir': rp_, 'lockfile': os.path.join(rp_, 'update.lk') }) repos.append(repo_conf) if new_remote: remote_map = os.path.join(__opts__['cachedir'], 'svnfs/remote_map.txt') try: with salt.utils.fopen(remote_map, 'w+') as fp_: timestamp = datetime.now().strftime('%d %b %Y %H:%M:%S.%f') fp_.write('# svnfs_remote map as of {0}\n'.format(timestamp)) for repo_conf in repos: fp_.write( '{0} = {1}\n'.format( repo_conf['hash'], repo_conf['url'] ) ) except OSError: pass else: log.info('Wrote new svnfs_remote map to {0}'.format(remote_map)) return repos
def init(): ''' Return a list of hglib objects for the various hgfs remotes ''' bp_ = os.path.join(__opts__['cachedir'], 'hgfs') new_remote = False repos = [] per_remote_defaults = {} for param in PER_REMOTE_PARAMS: per_remote_defaults[param] = \ _text_type(__opts__['hgfs_{0}'.format(param)]) for remote in __opts__['hgfs_remotes']: repo_conf = copy.deepcopy(per_remote_defaults) if isinstance(remote, dict): repo_url = next(iter(remote)) per_remote_conf = dict( [(key, _text_type(val)) for key, val in salt.utils.repack_dictlist(remote[repo_url]).items()] ) if not per_remote_conf: log.error( 'Invalid per-remote configuration for hgfs remote {0}. If ' 'no per-remote parameters are being specified, there may ' 'be a trailing colon after the URL, which should be ' 'removed. Check the master configuration file.' .format(repo_url) ) _failhard() branch_method = \ per_remote_conf.get('branch_method', per_remote_defaults['branch_method']) if branch_method not in VALID_BRANCH_METHODS: log.error( 'Invalid branch_method {0!r} for remote {1}. Valid ' 'branch methods are: {2}. This remote will be ignored.' .format(branch_method, repo_url, ', '.join(VALID_BRANCH_METHODS)) ) _failhard() per_remote_errors = False for param in (x for x in per_remote_conf if x not in PER_REMOTE_PARAMS): log.error( 'Invalid configuration parameter {0!r} for remote {1}. ' 'Valid parameters are: {2}. See the documentation for ' 'further information.'.format( param, repo_url, ', '.join(PER_REMOTE_PARAMS) ) ) per_remote_errors = True if per_remote_errors: _failhard() repo_conf.update(per_remote_conf) else: repo_url = remote if not isinstance(repo_url, string_types): log.error( 'Invalid hgfs remote {0}. Remotes must be strings, you may ' 'need to enclose the URL in quotes'.format(repo_url) ) _failhard() try: repo_conf['mountpoint'] = salt.utils.strip_proto( repo_conf['mountpoint'] ) except TypeError: # mountpoint not specified pass hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5')) repo_hash = hash_type(repo_url).hexdigest() rp_ = os.path.join(bp_, repo_hash) if not os.path.isdir(rp_): os.makedirs(rp_) if not os.listdir(rp_): # Only init if the directory is empty. hglib.init(rp_) new_remote = True try: repo = hglib.open(rp_) except hglib.error.ServerError: log.error( 'Cache path {0} (corresponding remote: {1}) exists but is not ' 'a valid mercurial repository. You will need to manually ' 'delete this directory on the master to continue to use this ' 'hgfs remote.'.format(rp_, repo_url) ) _failhard() except Exception as exc: log.error( 'Exception \'{0}\' encountered while initializing hgfs remote ' '{1}'.format(exc, repo_url) ) _failhard() try: refs = repo.config(names='paths') except hglib.error.CommandError: refs = None # Do NOT put this if statement inside the except block above. Earlier # versions of hglib did not raise an exception, so we need to do it # this way to support both older and newer hglib. if not refs: # Write an hgrc defining the remote URL hgconfpath = os.path.join(rp_, '.hg', 'hgrc') with salt.utils.fopen(hgconfpath, 'w+') as hgconfig: hgconfig.write('[paths]\n') hgconfig.write('default = {0}\n'.format(repo_url)) repo_conf.update({ 'repo': repo, 'url': repo_url, 'hash': repo_hash, 'cachedir': rp_, 'lockfile': os.path.join(__opts__['cachedir'], 'hgfs', '{0}.update.lk'.format(repo_hash)) }) repos.append(repo_conf) repo.close() if new_remote: remote_map = os.path.join(__opts__['cachedir'], 'hgfs/remote_map.txt') try: with salt.utils.fopen(remote_map, 'w+') as fp_: timestamp = datetime.now().strftime('%d %b %Y %H:%M:%S.%f') fp_.write('# hgfs_remote map as of {0}\n'.format(timestamp)) for repo in repos: fp_.write('{0} = {1}\n'.format(repo['hash'], repo['url'])) except OSError: pass else: log.info('Wrote new hgfs_remote map to {0}'.format(remote_map)) return repos
def init(): ''' Return a list of hglib objects for the various hgfs remotes ''' bp_ = os.path.join(__opts__['cachedir'], 'hgfs') new_remote = False repos = [] per_remote_defaults = {} for param in PER_REMOTE_PARAMS: per_remote_defaults[param] = \ _text_type(__opts__['hgfs_{0}'.format(param)]) for remote in __opts__['hgfs_remotes']: repo_conf = copy.deepcopy(per_remote_defaults) if isinstance(remote, dict): repo_url = next(iter(remote)) per_remote_conf = dict([(key, _text_type(val)) for key, val in salt.utils.repack_dictlist( remote[repo_url]).items()]) if not per_remote_conf: log.error( 'Invalid per-remote configuration for remote {0}. If no ' 'per-remote parameters are being specified, there may be ' 'a trailing colon after the URI, which should be removed. ' 'Check the master configuration file.'.format(repo_url)) branch_method = \ per_remote_conf.get('branch_method', per_remote_defaults['branch_method']) if branch_method not in VALID_BRANCH_METHODS: log.error( 'Invalid branch_method {0!r} for remote {1}. Valid ' 'branch methods are: {2}. This remote will be ignored.'. format(branch_method, repo_url, ', '.join(VALID_BRANCH_METHODS))) continue for param in (x for x in per_remote_conf if x not in PER_REMOTE_PARAMS): log.error( 'Invalid configuration parameter {0!r} for remote {1}. ' 'Valid parameters are: {2}. See the documentation for ' 'further information.'.format( param, repo_url, ', '.join(PER_REMOTE_PARAMS))) per_remote_conf.pop(param) repo_conf.update(per_remote_conf) else: repo_url = remote if not isinstance(repo_url, string_types): log.error( 'Invalid gitfs remote {0}. Remotes must be strings, you may ' 'need to enclose the URI in quotes'.format(repo_url)) continue try: repo_conf['mountpoint'] = salt.utils.strip_proto( repo_conf['mountpoint']) except TypeError: # mountpoint not specified pass hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5')) repo_hash = hash_type(repo_url).hexdigest() rp_ = os.path.join(bp_, repo_hash) if not os.path.isdir(rp_): os.makedirs(rp_) if not os.listdir(rp_): # Only init if the directory is empty. hglib.init(rp_) new_remote = True try: repo = hglib.open(rp_) except hglib.error.ServerError: log.error( 'Cache path {0} (corresponding remote: {1}) exists but is not ' 'a valid mercurial repository. You will need to manually ' 'delete this directory on the master to continue to use this ' 'hgfs remote.'.format(rp_, repo_url)) continue refs = repo.config(names='paths') if not refs: # Write an hgrc defining the remote URI hgconfpath = os.path.join(rp_, '.hg', 'hgrc') with salt.utils.fopen(hgconfpath, 'w+') as hgconfig: hgconfig.write('[paths]\n') hgconfig.write('default = {0}\n'.format(repo_url)) repo_conf.update({ 'repo': repo, 'url': repo_url, 'hash': repo_hash, 'cachedir': rp_ }) repos.append(repo_conf) repo.close() if new_remote: remote_map = os.path.join(__opts__['cachedir'], 'hgfs/remote_map.txt') try: with salt.utils.fopen(remote_map, 'w+') as fp_: timestamp = datetime.now().strftime('%d %b %Y %H:%M:%S.%f') fp_.write('# hgfs_remote map as of {0}\n'.format(timestamp)) for repo in repos: fp_.write('{0} = {1}\n'.format(repo['hash'], repo['url'])) except OSError: pass else: log.info('Wrote new hgfs_remote map to {0}'.format(remote_map)) return repos