def __init__(self, cluster_spec, test_config, verbose, experiment=None): self.cluster_spec = cluster_spec self.test_config = test_config self.target_iterator = TargetIterator(cluster_spec, test_config) self.memcached = MemcachedHelper(test_config) self.monitor = Monitor(cluster_spec) self.rest = RestHelper(cluster_spec) self.remote = RemoteHelper(cluster_spec, test_config, verbose) if experiment: self.experiment = ExperimentHelper(experiment, cluster_spec, test_config) self.master_node = cluster_spec.yield_masters().next() if self.remote.gateways: self.build = SyncGatewayRequestHelper().get_version( self.remote.gateways[0] ) else: self.build = self.rest.get_version(self.master_node) self.cbagent = CbAgent(self) self.metric_helper = MetricHelper(self) self.reporter = Reporter(self) self.reports = {} self.snapshots = [] self.master_events = [] if self.test_config.test_case.use_workers: self.worker_manager = WorkerManager(cluster_spec, test_config)
def __init__(self, cluster_spec: ClusterSpec, test_config: TestConfig, verbose: bool): self.cluster_spec = cluster_spec self.test_config = test_config self.target_iterator = TargetIterator(cluster_spec, test_config) self.cluster = ClusterManager(cluster_spec, test_config) self.memcached = MemcachedHelper(test_config) self.monitor = Monitor(cluster_spec, test_config, verbose) self.rest = RestHelper(cluster_spec) self.remote = RemoteHelper(cluster_spec, verbose) self.profiler = Profiler(cluster_spec, test_config) self.master_node = next(cluster_spec.masters) self.build = self.rest.get_version(self.master_node) self.metrics = MetricHelper(self) self.reporter = ShowFastReporter(cluster_spec, test_config, self.build) self.cbmonitor_snapshots = [] self.cbmonitor_clusters = [] if self.test_config.test_case.use_workers: self.worker_manager = WorkerManager(cluster_spec, test_config, verbose)
def update_infrastructure_spec(self): remote = RemoteHelper(self.infra_spec) with open(self.generated_cloud_config_path) as f: self.deployed_infra = json.load(f) k8_nodes = { node_dict['metadata']['name']: { "labels": node_dict['metadata']['labels'], "addresses": node_dict['status']['addresses'] } for node_dict in remote.get_nodes() } address_replace_list = [] clusters = self.infra_spec.infrastructure_clusters for cluster, hosts in clusters.items(): for host in hosts.split(): address, services = host.split(":") node_group = address.split(".")[2] matching_node = None for node_name, node_spec in k8_nodes.items(): if node_spec['labels']['NodeRoles'] != cluster: continue if node_spec['labels'][ 'eks.amazonaws.com/nodegroup'] != node_group: continue has_all_services = True for service in services.split(","): service_enabled = node_spec['labels'].get( "{}_enabled".format(service), 'false') if service_enabled != 'true': has_all_services = False if has_all_services: replace_addr = None for node_addr_dict in node_spec['addresses']: if node_addr_dict['type'] == "ExternalIP": replace_addr = node_addr_dict['address'] if not replace_addr: raise Exception("no replace address found") address_replace_list.append((address, replace_addr)) del k8_nodes[node_name] matching_node = node_name break if not matching_node: raise Exception("no matching node found") print("cluster: {}, hosts: {}".format(cluster, str(address_replace_list))) # Safely read the input filename using 'with' with open(self.cluster_path) as f: s = f.read() # Safely write the changed content, if found in the file with open(self.cluster_path, 'w') as f: for replace_pair in address_replace_list: s = s.replace(replace_pair[0], replace_pair[1]) f.write(s)
def __init__(self, cluster_spec, test_config): self.cluster_spec = cluster_spec self.test_config = test_config self.snapshot = self.test_config.restore_settings.snapshot self.remote = RemoteHelper(self.cluster_spec)
def __init__(self, cluster_spec: ClusterSpec, test_config: TestConfig, verbose: bool): self.cluster_spec = cluster_spec self.test_config = test_config self.broker_url = 'amqp://*****:*****@172.23.97.73:5672/broker' self.remote = RemoteHelper(cluster_spec, verbose) self.dynamic_infra = self.cluster_spec.dynamic_infrastructure if self.dynamic_infra: self.WORKER_HOME = '/opt/perfrunner' self.broker_url = self.remote.get_broker_urls()[0] self.worker_template_path = "cloud/worker/worker_template.yaml" self.worker_path = "cloud/worker/worker.yaml" celery.conf.update(broker_url=self.broker_url, broker_pool_limit=None, worker_hijack_root_logger=False, result_backend="amqp://", result_persistent=False, result_exchange="perf_results", accept_content=['pickle'], result_serializer='pickle', task_serializer='pickle', task_protocol=1, broker_connection_timeout=5, broker_connection_retry=True, broker_connection_max_retries=2) self.workers = cycle(self.cluster_spec.workers) self.terminate() self.start() self.wait_until_workers_are_ready()
def main(): args = get_args() cluster_spec = ClusterSpec() cluster_spec.parse(args.cluster_spec_fname) remote = RemoteHelper(cluster_spec, verbose=False) remote.collect_info() for hostname in cluster_spec.servers: for fname in glob.glob('{}/*.zip'.format(hostname)): shutil.move(fname, '{}.zip'.format(hostname)) if cluster_spec.backup is not None: logs = os.path.join(cluster_spec.backup, 'logs') if os.path.exists(logs): shutil.make_archive('tools', 'zip', logs) failures = defaultdict(dict) for file_name in glob.iglob('./*.zip'): panic_files, crash_files, storage_corrupted = validate_logs(file_name) if panic_files: failures['panics'][file_name] = panic_files if crash_files: failures['crashes'][file_name] = crash_files if storage_corrupted: failures['storage_corrupted'][file_name] = True remote.collect_index_datafiles() if failures: logger.interrupt( "Following failures found: {}".format(pretty_dict(failures)))
def __init__(self, cluster_spec, test_config, options): self.test_config = test_config self.cluster_spec = cluster_spec self.client_settings = self.test_config.client_settings.__dict__ self.options = options self.remote = RemoteHelper(self.cluster_spec, options.verbose) self.client_os = RemoteHelper.detect_client_os( self.cluster_spec.workers[0], self.cluster_spec).lower()
def __init__(self, cluster_spec: ClusterSpec, test_config: TestConfig, verbose: bool): self.cluster_spec = cluster_spec self.test_config = test_config self.remote = RemoteHelper(cluster_spec, test_config, verbose) self.workers = cycle(self.cluster_spec.workers) self.terminate() self.start() self.wait_until_workers_are_ready()
def __init__(self, cluster_spec, test_config, verbose): super().__init__(cluster_spec=cluster_spec) self.cluster_spec = cluster_spec self.test_config = test_config self.remote = RemoteHelper(cluster_spec, verbose) self.master_node = next(cluster_spec.masters) self.build = self.get_version(self.master_node) version, build_number = self.build.split('-') self.build_version_number = tuple(map(int, version.split('.'))) + (int(build_number),)
def __init__(self, cluster_spec, options): self.remote_helper = RemoteHelper(cluster_spec) self.cluster_spec = cluster_spec arch = self.remote_helper.detect_arch() pkg = self.remote_helper.detect_pkg() openssl = self.remote_helper.detect_openssl(pkg) self.build = Build(arch, pkg, options.version, openssl, options.toy) logger.info('Target build info: {}'.format(self.build))
def main(): args = get_args() cluster_spec = ClusterSpec() cluster_spec.parse(args.cluster) remote = RemoteHelper(cluster_spec, None, args.verbose) remote.get_manifest() projects = parse_manifest() fetch(projects)
def main(): args = get_args() cluster_spec = ClusterSpec() cluster_spec.parse(args.cluster_spec_fname) remote = RemoteHelper(cluster_spec, verbose=False) logger.info('Recovering system state') for host, version in remote.get_system_backup_version().items(): remote.start_system_state_recovery(host, version)
def __init__(self, cluster_spec, test_config, options): self.test_config = test_config self.cluster_spec = cluster_spec self.client_settings = self.test_config.client_settings.__dict__ self.options = options self.remote = RemoteHelper(self.cluster_spec, options.verbose) self.client_os = RemoteHelper.detect_client_os( self.cluster_spec.workers[0], self.cluster_spec).lower() self.rest = RestHelper(self.cluster_spec, self.test_config, options.verbose) self.cb_version = version_tuple( self.rest.get_version(host=next(self.cluster_spec.masters)))
def __init__(self, cluster_spec, options): self.remote = RemoteHelper(cluster_spec, None, options.verbose) self.cluster_spec = cluster_spec arch = self.remote.detect_arch() pkg = self.remote.detect_pkg() release, build = options.version.split('-') self.SHERLOCK_BUILDS = 'http://latestbuilds.hq.couchbase.com/couchbase-server/sherlock/{}/'.format( build) self.build = Build(arch, pkg, options.version, release, build, options.toy) logger.info('Target build info: {}'.format(self.build))
def __init__(self, cluster_spec: ClusterSpec, test_config: TestConfig, verbose: bool = False): self.cluster_spec = cluster_spec self.test_config = test_config self.rest = RestHelper(cluster_spec) self.remote = RemoteHelper(cluster_spec, verbose) self.monitor = Monitor(cluster_spec, test_config, verbose) self.memcached = MemcachedHelper(test_config) self.master_node = next(self.cluster_spec.masters) self.initial_nodes = test_config.cluster.initial_nodes
def __init__(self, *args, **kwargs): options, args = get_options() self.cluster_spec = ClusterSpec() self.cluster_spec.parse(options.cluster_spec_fname, args) self.test_config = TestConfig() self.test_config.parse(options.test_config_fname, args) self.target_iterator = TargetIterator(self.cluster_spec, self.test_config) self.memcached = MemcachedHelper(self.test_config) self.remote = RemoteHelper(self.cluster_spec, self.test_config) self.rest = RestHelper(self.cluster_spec) super(FunctionalTest, self).__init__(*args, **kwargs)
def __init__(self, cluster_spec: ClusterSpec, test_config: TestConfig, verbose: bool = False): self.cluster_spec = cluster_spec self.test_config = test_config self.dynamic_infra = self.cluster_spec.dynamic_infrastructure self.rest = RestHelper(cluster_spec) self.remote = RemoteHelper(cluster_spec, verbose) self.monitor = Monitor(cluster_spec, test_config, verbose) self.memcached = MemcachedHelper(test_config) self.master_node = next(self.cluster_spec.masters) self.initial_nodes = test_config.cluster.initial_nodes self.build = self.rest.get_version(self.master_node)
def __init__(self, cluster_spec, test_config, verbose=False): self.cluster_spec = cluster_spec self.test_config = test_config self.rest = RestHelper(cluster_spec) self.remote = RemoteHelper(cluster_spec, test_config, verbose) self.monitor = Monitor(cluster_spec, test_config, verbose) self.memcached = MemcachedHelper(test_config) self.master_node = next(self.cluster_spec.masters) self.initial_nodes = test_config.cluster.initial_nodes self.mem_quota = test_config.cluster.mem_quota self.index_mem_quota = test_config.cluster.index_mem_quota self.fts_mem_quota = test_config.cluster.fts_index_mem_quota self.analytics_mem_quota = test_config.cluster.analytics_mem_quota
def __init__(self, *args, **kwargs): options, _args = get_options() override = \ _args and (arg.split('.') for arg in ' '.join(_args).split(',')) self.cluster_spec = ClusterSpec() self.cluster_spec.parse(options.cluster_spec_fname) self.test_config = TestConfig() self.test_config.parse(options.test_config_fname, override) self.target_iterator = TargetIterator(self.cluster_spec, self.test_config) self.memcached = MemcachedHelper(self.cluster_spec) self.remote = RemoteHelper(self.cluster_spec) super(FunctionalTest, self).__init__(*args, **kwargs)
def __init__(self, cluster_spec, test_config, verbose): self.cluster_spec = cluster_spec self.test_config = test_config self.rest = RestHelper(cluster_spec) self.remote = RemoteHelper(cluster_spec, test_config, verbose) self.monitor = Monitor(cluster_spec) self.memcached = MemcachedHelper(test_config) self.clusters = cluster_spec.yield_clusters() self.servers = cluster_spec.yield_servers self.masters = cluster_spec.yield_masters self.initial_nodes = test_config.cluster.initial_nodes self.mem_quota = test_config.cluster.mem_quota self.group_number = test_config.cluster.group_number or 1
def main(): args = get_args() cluster_spec = ClusterSpec() cluster_spec.parse(args.cluster_spec_fname) remote = RemoteHelper(cluster_spec, test_config=None, verbose=False) remote.collect_info() for hostname in cluster_spec.servers: for fname in glob.glob('{}/*.zip'.format(hostname)): shutil.move(fname, '{}.zip'.format(hostname)) if cluster_spec.backup is not None: logs = os.path.join(cluster_spec.backup, 'logs') if os.path.exists(logs): shutil.make_archive('tools', 'zip', logs)
def __init__(self, cluster_spec: ClusterSpec, test_config: TestConfig, verbose: bool): self.cluster_spec = cluster_spec self.test_config = test_config self.memcached = MemcachedHelper(test_config) self.remote = RemoteHelper(cluster_spec, test_config, verbose) self.rest = RestHelper(cluster_spec) # self.build = os.environ.get('SGBUILD') or "0.0.0-000" self.master_node = next(cluster_spec.masters) self.build = self.rest.get_sgversion(self.master_node) self.metrics = MetricHelper(self) self.reporter = ShowFastReporter(cluster_spec, test_config, self.build) if self.test_config.test_case.use_workers: self.worker_manager = WorkerManager(cluster_spec, test_config, verbose) self.settings = self.test_config.access_settings self.settings.syncgateway_settings = self.test_config.syncgateway_settings self.profiler = Profiler(cluster_spec, test_config) self.cluster = ClusterManager(cluster_spec, test_config) self.target_iterator = TargetIterator(cluster_spec, test_config) self.monitor = Monitor(cluster_spec, test_config, verbose)
def __init__(self, cluster_spec, options): self.options = options self.remote = RemoteHelper(cluster_spec, None, options.verbose) self.cluster_spec = cluster_spec arch = self.remote.detect_arch() pkg = self.remote.detect_pkg() release = None build = None if options.version: release, build = options.version.split('-') self.SHERLOCK_BUILDS = 'http://latestbuilds.hq.couchbase.com/couchbase-server/sherlock/{}/'.format( build) self.WATSON_BUILDS = 'http://172.23.120.24/builds/latestbuilds/couchbase-server/watson/{}/'.format( build) if options.toy: self.SHERLOCK_BUILDS = 'http://latestbuilds.hq.couchbase.com/couchbase-server/toy-{}/{}/'.format( options.toy, build) self.build = Build(arch, pkg, options.cluster_edition, options.version, release, build, options.toy, options.url) logger.info('Target build info: {}'.format(self.build))
def __init__(self, cluster_spec, options): self.options = options self.cluster_spec = cluster_spec self.operator_version = self.options.operator_version self.couchbase_version = self.options.couchbase_version self.node_count = len(self.cluster_spec.infrastructure_clusters['couchbase1'].split()) self.remote = RemoteHelper(cluster_spec) self.release = self.operator_version.split("-")[0] self.build = self.operator_version.split("-")[1] self.docker_config_path = os.path.expanduser("~") + "/.docker/config.json" self.operator_base_path = "cloud/operator/{}/{}".format(self.release[0], self.release[2]) self.certificate_authority_path = "{}/ca.crt".format(self.operator_base_path) self.crd_path = "{}/crd.yaml".format(self.operator_base_path) self.config_path = "{}/config.yaml".format(self.operator_base_path) self.config_template_path = "{}/config_template.yaml".format(self.operator_base_path) self.auth_path = "{}/auth_secret.yaml".format(self.operator_base_path) self.cb_cluster_path = "{}/couchbase-cluster.yaml".format(self.operator_base_path) self.template_cb_cluster_path = "{}/couchbase-cluster_template.yaml"\ .format(self.operator_base_path) self.worker_base_path = "cloud/worker" self.worker_path = "{}/worker.yaml".format(self.worker_base_path) self.rmq_base_path = "cloud/broker/rabbitmq/0.48" self.rmq_operator_path = "{}/cluster-operator.yaml".format(self.rmq_base_path) self.rmq_cluster_path = "{}/rabbitmq.yaml".format(self.rmq_base_path)
def __init__(self, cluster_spec: ClusterSpec): super().__init__(cluster_spec=cluster_spec) self.remote = RemoteHelper(cluster_spec) self.ip_table, self.port_translation = self.remote.get_ip_port_mapping( )
def __init__(self, cluster_spec, options): self.options = options self.cluster_spec = cluster_spec self.operator_version = self.options.operator_version if "-" in self.operator_version: self.operator_release = self.operator_version.split("-")[0] self.operator_tag = 'registry.gitlab.com/cb-vanilla/operator:{}'\ .format(self.operator_version) self.admission_controller_release = self.operator_version.split("-")[0] self.admission_controller_tag = \ 'registry.gitlab.com/cb-vanilla/admission-controller:{}' \ .format(self.operator_version) else: self.operator_release = self.operator_version self.operator_tag = 'couchbase/operator:{}'\ .format(self.operator_version) self.admission_controller_release = self.operator_version self.admission_controller_tag = 'couchbase/admission-controller:{}' \ .format(self.operator_version) self.couchbase_version = self.options.couchbase_version if "-" in self.couchbase_version: self.couchbase_release = self.couchbase_version.split("-")[0] self.couchbase_tag = 'registry.gitlab.com/cb-vanilla/server:{}'\ .format(self.couchbase_version) else: self.couchbase_release = self.couchbase_version self.couchbase_tag = 'couchbase/server:{}'\ .format(self.couchbase_version) self.operator_backup_version = self.options.operator_backup_version if self.operator_backup_version: if "-" in self.operator_backup_version: self.operator_backup_release = self.operator_backup_version.split("-")[0] self.operator_backup_tag = 'registry.gitlab.com/cb-vanilla/operator-backup:{}'\ .format(self.operator_backup_version) else: self.operator_backup_release = self.operator_backup_version self.operator_backup_tag = 'couchbase/operator-backup/{}'\ .format(self.operator_backup_version) else: self.operator_backup_tag = 'registry.gitlab.com/cb-vanilla/operator-backup:latest' self.node_count = len(self.cluster_spec.infrastructure_clusters['couchbase1'].split()) self.remote = RemoteHelper(cluster_spec) self.docker_config_path = os.path.expanduser("~") + "/.docker/config.json" self.operator_base_path = "cloud/operator/{}/{}"\ .format(self.operator_release.split(".")[0], self.operator_release.split(".")[1]) self.certificate_authority_path = "{}/ca.crt"\ .format(self.operator_base_path) self.crd_path = "{}/crd.yaml"\ .format(self.operator_base_path) self.config_path = "{}/config.yaml"\ .format(self.operator_base_path) self.config_template_path = "{}/config_template.yaml"\ .format(self.operator_base_path) self.auth_path = "{}/auth_secret.yaml"\ .format(self.operator_base_path) self.cb_cluster_path = "{}/couchbase-cluster.yaml"\ .format(self.operator_base_path) self.template_cb_cluster_path = "{}/couchbase-cluster_template.yaml"\ .format(self.operator_base_path) self.worker_base_path = "cloud/worker" self.worker_path = "{}/worker.yaml"\ .format(self.worker_base_path) self.rmq_base_path = "cloud/broker/rabbitmq/0.48" self.rmq_operator_path = "{}/cluster-operator.yaml"\ .format(self.rmq_base_path) self.rmq_cluster_path = "{}/rabbitmq.yaml"\ .format(self.rmq_base_path)
def __init__(self, cluster_spec, options): self.remote = RemoteHelper(cluster_spec, options.verbose) self.options = options self.cluster_spec = cluster_spec
def __init__(self, cluster_spec, test_config, verbose): self.cluster_spec = cluster_spec self.test_config = test_config self.remote = RemoteHelper(cluster_spec, test_config, verbose)
def __init__(self, cluster_spec, test_config, options): self.remote_helper = RemoteHelper(cluster_spec) self.cluster_spec = cluster_spec self.test_config = test_config self.pkg = self.remote_helper.detect_pkg() self.version = options.version
def __init__(self, cluster_spec, test_config, options): self.remote = RemoteHelper(cluster_spec, test_config, options.verbose) self.cluster_spec = cluster_spec self.test_config = test_config self.version = options.version self.request_helper = SyncGatewayRequestHelper()