def __check_version(self): new_ver = self.arg_ver.lstrip('v') curr_ver = self.topology.version.lstrip('v') _cmp = semver.compare(curr_ver, new_ver) if _cmp == 0: raise exceptions.TiOPSArgumentError( 'Already running version {}.'.format(curr_ver)) elif _cmp > 0: raise exceptions.TiOPSRuntimeError( 'Downgrade is not supported, keep running {}.'.format(curr_ver), operation='upgrade') # update version and related variables self.old_ver = curr_ver self.new_ver = new_ver self.topology.version = 'v{}'.format(new_ver) self.topology.tiversion_dir = os.path.join( self.topology.tidown_dir, '{}'.format(self.topology.version)) self.topology.resource_dir = utils.profile_path( 'downloads', '{}/resources'.format(self.topology.version)) self.topology.dashboard_dir = utils.profile_path( 'downloads', '{}/dashboards'.format(self.topology.version)) self.topology.package_dir = utils.profile_path( 'downloads', '{}/packages'.format(self.topology.version)) self.topology.config_dir = utils.profile_path( 'downloads', '{}/configs'.format(self.topology.version))
def __common_args_bootstrap(self, bootstrap_parser): bootstrap_parser.add_argument( '-H', '--host', dest='hosts', required=True, help= 'Initialized host list: file or ip list (example: 10.0.1.10,10.0.1.11,10.0.1.12)' ) bootstrap_parser.add_argument('-s', '--ssh-port', dest='port', default=22, help='the port for ssh. (default: 22)') bootstrap_parser.add_argument('-f', '--forks', dest='forks', default=5, type=int, help='Concurrency number (default: 5)') bootstrap_parser.add_argument( '--private-key', dest='private_key', default='{}/id_rsa'.format(utils.profile_path('.ssh')), help= 'Specify the private key, usually no need to specify and the default private key will be used' )
def _post(self, component=None, pattern=None, node=None, role=None): try: utils.remove_dir(utils.profile_path(self.topology.cluster_dir)) except Exception as e: logging.warning(e) term.notice('TiDB cluster destroyed.')
def _display_cluster(self): try: _status = self.args.show_status except AttributeError: _status = self.status cluster_home = utils.profile_path('clusters/{}'.format( self.args.cluster_name)) _profile_path = utils.profile_path(cluster_home, 'meta.yaml') if os.path.exists(cluster_home) and os.path.exists(_profile_path): cluster_info = utils.read_yaml(_profile_path) info = 'TiDB cluster {}, version {}\nNode list:'.format( self.args.cluster_name, cluster_info['tidb_version']) self.term.info(info) display_info = self._format_cluster(_status) for section in display_info: for row in self.format_columns(section): self.term.normal(row)
def init_network(self, demo=False): term.notice( 'Start creating no-password ssh connections between the management machine and other machines' ) self._check_ip_list() # get password from prompt if not self._args.password: term.info( 'Please enter the password of init user on deployment server ({} password)' .format(self.init_user)) _passwd = term.getpass() else: _passwd = self._args.password # create ansible runner initnet = ansibleapi.ANSRunner(ips=self.hosts, user=self.init_user, tiargs=self._args, passwd=_passwd) term.info('Create {} user on remote machines.'.format(self.user)) initnet.run_model('user', 'name=%s ' 'shell=/bin/bash ' 'createhome=yes' % (self.user), become=True) term.info('Set authorized_keys for {} on cluster machine.'.format( self.user)) initnet.run_model('authorized_key', 'user=%s ' 'key={{ lookup("file", "%s/id_rsa.pub") }}' % (self.user, utils.profile_path('.ssh')), become=True) term.info('Add sudo permissions for {} on cluster machine.'.format( self.user)) initnet.run_model('lineinfile', 'path=/etc/sudoers ' 'line="{} ALL=(ALL) NOPASSWD: ALL" ' 'regexp="^{} .*" ' 'insertafter=EOF ' 'state=present'.format(self.user, self.user), become=True) if demo: term.notice('Finished setting up SSH keys.') else: term.notice('Done!!!')
def init(self, demo=False): term.notice('Start init management machine.') key_home = utils.profile_path('.ssh') if not os.path.exists(key_home): utils.create_dir(key_home) os.chmod(os.path.join(key_home), 0o700) if not os.path.isfile(os.path.join(key_home, 'id_rsa')) or \ not os.path.isfile(os.path.join(key_home, 'id_rsa.pub')): term.info('There is not SSH key. Start generating.'.format( getpass.getuser())) os.system( '/usr/bin/ssh-keygen -t rsa -N \'\' -f {}/id_rsa -q'.format( key_home)) else: term.normal('Already have SSH key, skip create.'.format( getpass.getuser())) if demo: term.notice('Finished init management machine.') else: term.notice('Done!!!')
def _list_clusters(self): profile_dir = utils.profile_path('clusters') _srv_list = [['Cluster', 'Version']] for _file in utils.list_dir(profile_dir): try: for x in ['downloads', 'host_vars', 'tiops.log']: if x in _file: raise RuntimeError except RuntimeError: continue if not os.path.isdir(_file): continue _cluster_name = os.path.split(_file)[1] try: _meta = utils.read_yaml( os.path.join(profile_dir, _file, 'meta.yaml')) except EnvironmentError as e: import errno # only pass when the error is file not found if e.errno != errno.ENOENT: raise self.term.warn( 'Metadata file of cluster {} not found, did the deploy process finished?' .format(_cluster_name)) # skip this cluster continue try: _version = _meta['tidb_version'] except KeyError: _version = '-' _srv_list.append([_cluster_name, _version]) self.term.info('Available TiDB clusters:') for row in self.format_columns(_srv_list): self.term.normal(row)
def __common_args_cluster(self, tidb_parser): _subcmd = self.__get_last_subcmd(tidb_parser) if _subcmd == 'display': tidb_parser.add_argument('-c', '--cluster-name', dest='cluster_name', required=False, help='Cluster name') else: tidb_parser.add_argument('-c', '--cluster-name', dest='cluster_name', required=True, help='Cluster name') tidb_parser.add_argument( '--private-key', dest='private_key', default='{}id_rsa'.format(utils.profile_path('.ssh')), help= 'Specify the private key, usually no need to specify and the default private key will be used' ) if _subcmd in ['deploy', 'upgrade', 'quickdeploy']: tidb_parser.add_argument( '-t', '--tidb-version', dest='tidb_version', default='3.0.9', help='TiDB cluster version (default: 3.0.9)') tidb_parser.add_argument( '--enable-check-config', dest='enable_check_config', default=False, action='store_true', help= 'Check if the configuration of the tidb component is reasonable (default: disable)' ) if _subcmd in ['deploy', 'upgrade', 'scale-out', 'quickdeploy']: tidb_parser.add_argument( '--local-pkg', dest='local_pkg', default=None, help= 'Specify a local path to pre-download packages instead of download them during the process.' ) if _subcmd not in ['edit-config', 'display']: tidb_parser.add_argument( '-f', '--forks', dest='forks', default=5, type=int, help='Concurrency when deploy TiDB cluster (default: 5)') if _subcmd in [ 'start', 'stop', 'restart', 'reload', 'upgrade', 'display', 'scale-in' ]: tidb_parser.add_argument('-n', '--node-id', dest='node_id', default=None, help='specified node id') if _subcmd in [ 'start', 'stop', 'restart', 'reload', 'upgrade', 'display' ]: tidb_parser.add_argument( '-r', '--role', dest='role', default=None, help= 'specified roles (items: ["pd", "tikv", "pump", "tidb", "drainer", "monitoring", "monitored", "grafana", "alertmanager"])' )
except TiOPSRuntimeError as e: tierror(e) except TiOPSException as e: term.debug(traceback.format_exc()) term.fatal(str(e)) sys.exit(1) if __name__ == '__main__': _parser = cmd.TiOPSParser() args = _parser() # add logging facilities, but outputs are not modified to use it yet if args.verbose: logging.basicConfig( filename=utils.profile_path("tiops.log"), format= '[%(asctime)s] [%(levelname)s] %(message)s (at %(filename)s:%(lineno)d in %(funcName)s).', datefmt='%Y-%m-%d %T %z', level=logging.DEBUG) logging.info("Using logging level: DEBUG.") logging.debug("Debug logging enabled.") logging.debug("Input arguments are: %s" % args) else: logging.basicConfig( filename=utils.profile_path("tiops.log"), format='[%(asctime)s] [%(levelname)s] %(message)s.', datefmt='%Y-%m-%d %T %z', level=logging.INFO) logging.info("Using logging level: INFO.")
def __init__(self, args=None): if not args: logging.error("Argument can't be empty.") raise ValueError self._args = args try: self.version = args.version except AttributeError: pass try: self.port = args.port except AttributeError: pass try: self.cluster_name = args.cluster_name except AttributeError: pass try: self.init_user = args.user except AttributeError: pass try: self.user = args.deploy_user except AttributeError: pass try: self.enable_checks = args.enable_checks except AttributeError: self.enable_checks = False # try to read hosts file from topology if the `-H` is not set # this doesn't effect manual `bootstrap-*` commands # as they don't have the `-T` argument if 'hosts' in args or 'topology' in args: try: _hosts_file = args.hosts except AttributeError: try: _hosts_file = args.topology except AttributeError: _host_file = None if os.path.isfile(_hosts_file): self.__read_host_list(_hosts_file) else: self.hosts = _hosts_file try: self.enable_check_ntp = args.enable_check_ntp except AttributeError: pass try: self.ntp_server = args.ntp_server except AttributeError: pass try: self.enable_swap = args.enable_swap except AttributeError: pass try: self.disable_irqbalance = args.disable_irqbalance except AttributeError: pass try: self.timezone = args.timezone except AttributeError: pass self.script_path = '{}/tiops/check/'.format( os.environ['TIUP_COMPONENT_INSTALL_DIR']) self.check_dir = '/tmp/tidb_check' self.host_vars = utils.profile_path('host_vars')