def __init__(self, defaults): self.distbase = defaults.distbase self.distdefault = defaults.distdefault self.aliases = defaults.aliases self.servers = defaults.servers self.locations = [] self.scp = SCP()
def main(options): config = collections.namedtuple( 'Config', ('node_name', 'port', 'threshold', 'validators', 'faulty_percent'), )(uuid.uuid1().hex, 8001, 51, [], 0) if not pathlib.Path(options.conf).exists(): parser.error('conf file, `%s` does not exists.' % options.conf) if not pathlib.Path(options.conf).is_file(): parser.error('conf file, `%s` is not valid file.' % options.conf) conf = configparser.ConfigParser() conf.read(options.conf) log.info('conf file, `%s` was loaded', options.conf) config = config._replace(node_name=conf['node']['name']) config = config._replace(port=int(conf['node']['port'])) config = config._replace(threshold=int(conf['node']['threshold_percent'])) if conf.has_option('faulty', 'faulty_percent'): config = config._replace( faulty_percent=int(conf['faulty']['faulty_percent'])) log.debug('loaded conf: %s', config) validator_list = [] for i in filter(lambda x: len(x.strip()) > 0, conf['node']['validator_list'].split(',')): validator_list.append(Endpoint.from_uri(i.strip())) config = config._replace(validators=validator_list) log.debug('Validators: %s' % config.validators) node = node_factory( config.node_name, Endpoint(NETWORK_MODULE.SCHEME, get_local_ipaddress(), config.port), config.faulty_percent) transport = NETWORK_MODULE.Transport(bind=('0.0.0.0', config.port)) # consensus_module = get_fba_module('isaac') consensus = SCP( node, config.threshold, tuple(map(lambda x: Node(x.extras['name'], x), config.validators)), transport, ) log.metric(node=node.name, data=node.to_dict()) application = Application(consensus, transport) application.start() base_server = BaseServer(application) base_server.start() return
class URLParser(object): """A minimal URL parser and splitter.""" scheme_re = re.compile(r'^(\S+?)://|^(file):') git_ssh_re = re.compile(r'^(\S+?):(.*)') def __init__(self): self.scp = SCP() def get_scheme(self, url): match = self.scheme_re.match(url) if match is not None: return match.group(1) or match.group(2) return '' def is_url(self, url): return bool(self.get_scheme(url)) def is_git_ssh_url(self, url): return (not self.is_url(url) and self.git_ssh_re.match(url) is not None and self.scp.has_host(url)) def abspath(self, url): scheme = self.get_scheme(url) if scheme == 'file': ignored, user, host, path, qs, frag = self.split(url) if host in ('', 'localhost'): # Strip leading slash to allow tilde expansion if host and path.startswith('/~'): path = path[1:] path = abspath(expanduser(path)) host = self._hostunsplit(user, host) return urlunsplit((scheme, host, path, qs, frag)) return url def split(self, url): scheme = self.get_scheme(url) if scheme: ignored, host, path, qs, frag = urlsplit(url) user, host = self._hostsplit(host) return scheme, user, host, path, qs, frag return '', '', '', url, '', '' def _hostsplit(self, host): if '@' in host: return host.split('@', 1) return '', host def _hostunsplit(self, user, host): if user: return '%s@%s' % (user, host) return host
def download_file(self): self.other_tools.mkdirs_once_many(self.dirpath_for_storage) self.other_tools.mkdirs_once_many(self.dirpath_for_storage_bak) print('下载文件的目标存储路径为:%s' % self.dirpath_for_storage) print('已下载文件的备份存储路径为:%s' % self.dirpath_for_storage_bak) print('清空备份目录下,上次下载的文件') if not self.other_tools.delete_file(self.dirpath_for_storage_bak)[0]: print('清空备份目录下的文件失败') exit() print('正在备份上次下载的文件') if not self.other_tools.copy_dir_or_file(self.dirpath_for_storage, self.dirpath_for_storage_bak)[0]: print('备份上次下载文件操作失败') exit() print('清空目标存储路径下,上次下载的文件') if not self.other_tools.delete_file(self.dirpath_for_storage)[0]: print('清空目标存储路径下,上次下载的文件') exit() try: hostConfig = HostConfigParser().get_host_config() # 获取主机配置信息 except Exception as e: print('获取主机配置信息失败:%s,提前结束运行\n,烦检查配置文件host_cofig.conf是否配置正确\n' % e) exit() host_list = hostConfig.sections() # 获取配置信息中,主机节点列表 file_for_download_success = open('./result/result_for_success.txt', 'w', encoding='utf-8') # 用于记录下载成功的文件 file_for_download_failure = open('./result/result_for_failure.txt', 'w', encoding='utf-8') # 用于记录下载失败的文件 download_tool = SCP() # 构造下载工具 print('正在读取file_for_download.txt配置信息') encoding = self.other_tools.get_file_encoding('./conf/file_for_download.txt') with open('./conf/file_for_download.txt', 'r', encoding=encoding) as file: host, port, username, password= '', '', '', '' remark = '' # 记录配置存在的问题 remark_for_host = '主机配置信息不存在' # 记录主机配置存在的问题 is_hostconfig_exists = False # 用于判断主机信息是否存在 line = file.readline() while line: line = line.rstrip('\n') line = line.rstrip('\t') line = line.strip() if line.startswith('#'): # 注释,跳过 line = file.readline() continue elif re.findall('[[\d]+\.[\d]+\.[\d]+\.[^\D]+]$', line): # 说明是主机ip,形如 [192.168.1.21] host = line.lstrip('[') host = host.rstrip(']') print('当前解析行为主机信息,host:%s,正在获取主机信息:' % host) if host in host_list: port, username, password = hostConfig[host]['port'],hostConfig[host]['username'], hostConfig[host]['password'] print('端口:%s' % port) print('username:%s' % username) print('password:%s\n' % password) is_hostconfig_exists = True remark_for_host = '' else: is_hostconfig_exists = False print('主机信息配置不存在\n') remark_for_host = host + '主机信息配置不存在' host, port,username, password= '','', '', '' elif line: # 说明是待下载的文件记录 remark = remark + remark_for_host target_for_download = line.strip(' ') target_for_download = target_for_download.strip('\t') target_for_download = target_for_download.rstrip('\\') target_for_download = target_for_download.strip('/') target_for_download = '/' + target_for_download target_for_download = target_for_download.replace('\\', '/') line = target_for_download + '|' + host + '|'+ '|' + username + '|' + password run_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()) # 记录执行时间 if is_hostconfig_exists: temp = target_for_download.split('/') temp = temp[len(temp) - 1] if temp in self.targets_for_download: print('正在下载目标文件(%s)到本地目录 %s' % (target_for_download, self.dirpath_for_storage)) result = download_tool.download_file(port, password, target_for_download, username, host, self.dirpath_for_storage) if result[0]: print('下载文件成功,正在记录日志到文件\n') file_for_download_success.write('操作类型:下载文件\n') file_for_download_success.write('执行时间:%s\n' % run_time) file_for_download_success.write('执行信息:\n' + line) file_for_download_success.write('\n-----------------------华丽分割线-----------------------\n\n') file_for_download_success.flush() else: print('下载文件失败,正在进行第二次尝试\n') result = download_tool.download_file(port,password, target_for_download, username, host, self.dirpath_for_storage) if result[0]: print('下载文件成功,正在记录日志到文件\n') file_for_download_success.write('操作类型:下载文件\n') file_for_download_success.write('执行时间:%s\n' % run_time) file_for_download_success.write('执行信息:\n' + line) file_for_download_success.write('\n-----------------------华丽分割线-----------------------\n\n') file_for_download_success.flush() else: print('下载文件失败,正在记录日志到文件\n') file_for_download_failure.write('操作类型:下载文件\n') file_for_download_failure.write('执行时间:%s\n' % run_time) file_for_download_failure.write('执行信息:\n' + line) file_for_download_failure.write('\n失败原因:' + result[1] + '\n') file_for_download_failure.write('-----------------------华丽分割线-----------------------\n\n') file_for_download_failure.flush() else: print('配置信息有误,正在记录日志到文件\n') file_for_download_failure.write('操作类型:下载文件\n') file_for_download_failure.write('执行时间:%s\n' % run_time) file_for_download_failure.write('执行信息:\n' + line) file_for_download_failure.write('\n失败原因:' + remark.lstrip('&') + '\n') file_for_download_failure.write('-----------------------华丽分割线-----------------------\n\n') file_for_download_failure.flush() line = file.readline() remark = '' file_for_download_failure.close() file_for_download_success.close() print('--------------------------下载完毕--------------------------')
class Locations(object): def __init__(self, defaults): self.distbase = defaults.distbase self.distdefault = defaults.distdefault self.aliases = defaults.aliases self.servers = defaults.servers self.locations = [] self.scp = SCP() def __len__(self): """Return number of locations. """ return len(self.locations) def __iter__(self): """Iterate over locations. """ return iter(self.locations) def extend(self, location): """Extend list of locations. """ self.locations.extend(location) def is_server(self, location): """Return True if 'location' is an index server. """ return location in self.servers def has_host(self, location): """Return True if 'location' contains a host part. """ return self.scp.has_host(location) def join(self, distbase, location): """Join 'distbase' and 'location' in such way that the result is a valid scp destination. """ return self.scp.join(distbase, location) def get_location(self, location, depth=0): """Resolve aliases and apply distbase. """ if not location: return [] if location in self.aliases: res = [] if depth > MAXALIASDEPTH: err_exit('Maximum alias depth exceeded: %(location)s' % locals()) for loc in self.aliases[location]: res.extend(self.get_location(loc, depth+1)) return res if self.is_server(location): return [location] if location == 'pypi': err_exit('No configuration found for server: pypi\n' 'Please create a ~/.pypirc file') if not self.has_host(location) and self.distbase: return [self.join(self.distbase, location)] return [location] def get_default_location(self): """Return the default location. """ return self.get_location(self.distdefault) def check_valid_locations(self, locations=None): """Fail if 'locations' is empty or contains bad scp destinations. """ if locations is None: locations = self.locations if not locations: err_exit('mkrelease: option -d is required\n%s' % USAGE) for location in locations: if not self.is_server(location) and not self.has_host(location): err_exit('Scp destination must contain a host part: %(location)s' % locals())
def __init__(self): self.scp = SCP()
def main(): #pylint: disable=R0915 LOGGER.info("ADF Version %s", ADF_VERSION) LOGGER.info("ADF Log Level is %s", ADF_LOG_LEVEL) scp = SCP() config = Config() config.store_config() try: parameter_store = ParameterStore(REGION_DEFAULT, boto3) deployment_account_id = parameter_store.fetch_parameter( 'deployment_account_id') organizations = Organizations(role=boto3, account_id=deployment_account_id) scp.apply(organizations, parameter_store, config.config) sts = STS() deployment_account_role = prepare_deployment_account( sts=sts, deployment_account_id=deployment_account_id, config=config) cache = Cache() ou_id = organizations.get_parent_info().get("ou_parent_id") account_path = organizations.build_account_path(ou_id=ou_id, account_path=[], cache=cache) s3 = S3(region=REGION_DEFAULT, bucket=S3_BUCKET_NAME) # Updating the stack on the master account in deployment region cloudformation = CloudFormation( region=config.deployment_account_region, deployment_account_region=config.deployment_account_region, role=boto3, wait=True, stack_name=None, s3=s3, s3_key_path='adf-build', account_id=ACCOUNT_ID) cloudformation.create_stack() kms_dict = {} # First Setup/Update the Deployment Account in all regions (KMS Key and S3 Bucket + Parameter Store values) for region in list( set([config.deployment_account_region] + config.target_regions)): cloudformation = CloudFormation( region=region, deployment_account_region=config.deployment_account_region, role=deployment_account_role, wait=True, stack_name=None, s3=s3, s3_key_path=account_path, account_id=deployment_account_id) cloudformation.create_stack() update_deployment_account_output_parameters( deployment_account_region=config.deployment_account_region, region=region, kms_dict=kms_dict, deployment_account_role=deployment_account_role, cloudformation=cloudformation) threads = [] account_ids = organizations.get_account_ids() for account_id in [ account for account in account_ids if account != deployment_account_id ]: thread = PropagatingThread(target=worker_thread, args=(account_id, sts, config, s3, cache, kms_dict)) thread.start() threads.append(thread) for thread in threads: thread.join() LOGGER.info("Executing Step Function on Deployment Account") step_functions = StepFunctions( role=deployment_account_role, deployment_account_id=deployment_account_id, deployment_account_region=config.deployment_account_region, regions=config.target_regions, account_ids=account_ids, update_pipelines_only=0) step_functions.execute_statemachine() except ParameterNotFoundError: LOGGER.info('You are now ready to bootstrap a deployment account ' 'by moving it into your deployment OU. ' 'Once you have moved it into the deployment OU, ' 'be sure to check out its progress in AWS Step Functions') return
def main(): scp = SCP() config = Config() config.store_config() try: parameter_store = ParameterStore(REGION_DEFAULT, boto3) deployment_account_id = parameter_store.fetch_parameter( 'deployment_account_id') organizations = Organizations(role=boto3, account_id=deployment_account_id) scp.apply(organizations, parameter_store, config.config) sts = STS() deployment_account_role = prepare_deployment_account( sts=sts, deployment_account_id=deployment_account_id, config=config) cache = Cache() ou_id = organizations.get_parent_info().get("ou_parent_id") account_path = organizations.build_account_path(ou_id=ou_id, account_path=[], cache=cache) s3 = S3(region=REGION_DEFAULT, bucket=S3_BUCKET_NAME) # Updating the stack on the master account in deployment region cloudformation = CloudFormation( region=config.deployment_account_region, deployment_account_region=config.deployment_account_region, # pylint: disable=R0801 role=boto3, wait=True, stack_name=None, s3=s3, s3_key_path='adf-build') cloudformation.create_stack() # First Setup the Deployment Account in all regions (KMS Key and S3 Bucket + Parameter Store values) for region in list( set([config.deployment_account_region] + config.target_regions)): cloudformation = CloudFormation( region=region, deployment_account_region=config.deployment_account_region, role=deployment_account_role, wait=True, stack_name=None, s3=s3, s3_key_path=account_path) cloudformation.create_stack() update_deployment_account_output_parameters( deployment_account_region=config.deployment_account_region, region=region, deployment_account_role=deployment_account_role, cloudformation=cloudformation) threads = [] account_ids = organizations.get_account_ids() for account_id in [ account for account in account_ids if account != deployment_account_id ]: thread = PropagatingThread(target=worker_thread, args=(account_id, sts, config, s3, cache)) thread.start() threads.append(thread) for thread in threads: thread.join() step_functions = StepFunctions( role=deployment_account_role, deployment_account_id=deployment_account_id, deployment_account_region=config.deployment_account_region, regions=config.target_regions, account_ids=account_ids, update_pipelines_only=0) step_functions.execute_statemachine() except ParameterNotFoundError: LOGGER.info("Deployment Account has not yet been Bootstrapped.") return