Exemplo n.º 1
0
    def setUp(self):
        self.failover_util = failover_utils()
        self.node_util = node_utils()
        self.views_util = views_utils()

        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.primary_index_created = False
        self.use_sdk_client = self.input.param("use_sdk_client", False)
        self.analytics = self.input.param("analytics", False)
        if self.input.param("log_level", None):
            log.setLevel(level=0)
            for hd in log.handlers:
                if str(hd.__class__).find('FileHandler') != -1:
                    hd.setLevel(level=logging.DEBUG)
                else:
                    hd.setLevel(level=getattr(
                        logging, self.input.param("log_level", None)))
        self.servers = self.input.servers
        if str(self.__class__).find('moxitests') != -1:
            self.moxi_server = self.input.moxis[0]
            self.servers = [
                server for server in self.servers
                if server.ip != self.moxi_server.ip
            ]
        self.buckets = []
        self.bucket_base_params = {}
        self.bucket_base_params['membase'] = {}
        self.master = self.servers[0]
        self.bucket_util = bucket_utils(self.master)
        self.cluster_util = cluster_utils(self.master)
        self.indexManager = self.servers[0]
        if not hasattr(self, 'cluster'):
            self.cluster = Cluster()
        self.pre_warmup_stats = {}
        self.cleanup = False
        self.nonroot = False
        shell = RemoteMachineShellConnection(self.master)
        self.os_info = shell.extract_remote_info().type.lower()
        if self.os_info != 'windows':
            if self.master.ssh_username != "root":
                self.nonroot = True
        shell.disconnect()
        """ some tests need to bypass checking cb server at set up
            to run installation """
        self.skip_init_check_cbserver = \
            self.input.param("skip_init_check_cbserver", False)
        self.data_collector = DataCollector()
        self.data_analyzer = DataAnalyzer()
        self.result_analyzer = DataAnalysisResultAnalyzer()
        #         self.set_testrunner_client()
        self.change_bucket_properties = False
        self.cbas_node = self.input.cbas
        self.cbas_servers = []
        self.kv_servers = []
        self.otpNodes = []
        for server in self.servers:
            if "cbas" in server.services:
                self.cbas_servers.append(server)
            if "kv" in server.services:
                self.kv_servers.append(server)
        if not self.cbas_node and len(self.cbas_servers) >= 1:
            self.cbas_node = self.cbas_servers[0]

        try:
            self.skip_setup_cleanup = self.input.param("skip_setup_cleanup",
                                                       False)
            self.vbuckets = self.input.param("vbuckets", 1024)
            self.upr = self.input.param("upr", None)
            self.index_quota_percent = self.input.param(
                "index_quota_percent", None)
            self.targetIndexManager = self.input.param("targetIndexManager",
                                                       False)
            self.targetMaster = self.input.param("targetMaster", False)
            self.reset_services = self.input.param("reset_services", False)
            self.auth_mech = self.input.param("auth_mech", "PLAIN")
            self.wait_timeout = self.input.param("wait_timeout", 60)
            # number of case that is performed from testrunner( increment each time)
            self.case_number = self.input.param("case_number", 0)
            self.default_bucket = self.input.param("default_bucket", True)
            self.parallelism = self.input.param("parallelism", False)
            if self.default_bucket:
                self.default_bucket_name = "default"
            self.standard_buckets = self.input.param("standard_buckets", 0)
            self.sasl_buckets = self.input.param("sasl_buckets", 0)
            self.num_buckets = self.input.param("num_buckets", 0)
            self.verify_unacked_bytes = self.input.param(
                "verify_unacked_bytes", False)
            self.memcached_buckets = self.input.param("memcached_buckets", 0)
            self.enable_flow_control = self.input.param(
                "enable_flow_control", False)
            self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets + self.memcached_buckets
            self.num_servers = self.input.param("servers", len(self.servers))
            # initial number of items in the cluster
            self.nodes_init = self.input.param("nodes_init", 1)
            self.nodes_in = self.input.param("nodes_in", 1)
            self.nodes_out = self.input.param("nodes_out", 1)
            self.services_init = self.input.param("services_init", None)
            self.services_in = self.input.param("services_in", None)
            self.forceEject = self.input.param("forceEject", False)
            self.force_kill_memcached = TestInputSingleton.input.param(
                'force_kill_memcached', False)
            self.num_items = self.input.param("items", 1000)
            self.value_size = self.input.param("value_size", 512)
            self.dgm_run = self.input.param("dgm_run", False)
            self.active_resident_threshold = int(
                self.input.param("active_resident_threshold", 0))
            # max items number to verify in ValidateDataTask, None - verify all
            self.max_verify = self.input.param("max_verify", None)
            # we don't change consistent_view on server by default
            self.disabled_consistent_view = self.input.param(
                "disabled_consistent_view", None)
            self.rebalanceIndexWaitingDisabled = self.input.param(
                "rebalanceIndexWaitingDisabled", None)
            self.rebalanceIndexPausingDisabled = self.input.param(
                "rebalanceIndexPausingDisabled", None)
            self.maxParallelIndexers = self.input.param(
                "maxParallelIndexers", None)
            self.maxParallelReplicaIndexers = self.input.param(
                "maxParallelReplicaIndexers", None)
            self.quota_percent = self.input.param("quota_percent", None)
            self.port = None
            self.log_message = self.input.param("log_message", None)
            self.log_info = self.input.param("log_info", None)
            self.log_location = self.input.param("log_location", None)
            self.stat_info = self.input.param("stat_info", None)
            self.port_info = self.input.param("port_info", None)
            if not hasattr(self, 'skip_buckets_handle'):
                self.skip_buckets_handle = self.input.param(
                    "skip_buckets_handle", False)
            self.nodes_out_dist = self.input.param("nodes_out_dist", None)
            self.absolute_path = self.input.param("absolute_path", True)
            self.test_timeout = self.input.param(
                "test_timeout", 3600)  # kill hang test and jump to next one.
            self.enable_bloom_filter = self.input.param(
                "enable_bloom_filter", False)
            self.enable_time_sync = self.input.param("enable_time_sync", False)
            self.gsi_type = self.input.param("gsi_type", 'plasma')
            # bucket parameters go here,
            self.bucket_size = self.input.param("bucket_size", None)
            self.bucket_type = self.input.param("bucket_type", 'membase')
            self.num_replicas = self.input.param("replicas", 1)
            self.enable_replica_index = self.input.param("index_replicas", 1)
            self.eviction_policy = self.input.param(
                "eviction_policy", 'valueOnly')  # or 'fullEviction'
            # for ephemeral bucket is can be noEviction or nruEviction
            if self.bucket_type == 'ephemeral' and self.eviction_policy == 'valueOnly':
                # use the ephemeral bucket default
                self.eviction_policy = 'noEviction'

            # for ephemeral buckets it
            self.sasl_password = self.input.param("sasl_password", 'password')
            self.lww = self.input.param(
                "lww", False
            )  # only applies to LWW but is here because the bucket is created here
            self.maxttl = self.input.param("maxttl", None)
            self.compression_mode = self.input.param("compression_mode",
                                                     'passive')
            self.sdk_compression = self.input.param("sdk_compression", True)
            self.sasl_bucket_name = "bucket"
            self.sasl_bucket_priority = self.input.param(
                "sasl_bucket_priority", None)
            self.standard_bucket_priority = self.input.param(
                "standard_bucket_priority", None)
            # end of bucket parameters spot (this is ongoing)

            if self.skip_setup_cleanup:
                self.buckets = BucketHelper(self.master).get_buckets()
                return
            if not self.skip_init_check_cbserver:
                self.cb_version = None
                if RestHelper(RestConnection(
                        self.master)).is_ns_server_running():
                    """ since every new couchbase version, there will be new features
                        that test code will not work on previous release.  So we need
                        to get couchbase version to filter out those tests. """
                    self.cb_version = RestConnection(
                        self.master).get_nodes_version()
                else:
                    log.info("couchbase server does not run yet")
                self.protocol = self.get_protocol_type()
            self.services_map = None
            if self.sasl_bucket_priority is not None:
                self.sasl_bucket_priority = self.sasl_bucket_priority.split(
                    ":")
            if self.standard_bucket_priority is not None:
                self.standard_bucket_priority = self.standard_bucket_priority.split(
                    ":")

            log.info("==============  basetestcase setup was started for test #{0} {1}==============" \
                          .format(self.case_number, self._testMethodName))
            if not self.skip_buckets_handle and not self.skip_init_check_cbserver:
                self._cluster_cleanup()

            shared_params = self._create_bucket_params(
                server=self.master,
                size=self.bucket_size,
                replicas=self.num_replicas,
                enable_replica_index=self.enable_replica_index,
                eviction_policy=self.eviction_policy,
                bucket_priority=None,
                lww=self.lww,
                maxttl=self.maxttl,
                compression_mode=self.compression_mode)

            membase_params = copy.deepcopy(shared_params)
            membase_params['bucket_type'] = 'membase'
            self.bucket_base_params['membase'][
                'non_ephemeral'] = membase_params

            membase_ephemeral_params = copy.deepcopy(shared_params)
            membase_ephemeral_params['bucket_type'] = 'ephemeral'
            self.bucket_base_params['membase'][
                'ephemeral'] = membase_ephemeral_params

            memcached_params = copy.deepcopy(shared_params)
            memcached_params['bucket_type'] = 'memcached'
            self.bucket_base_params['memcached'] = memcached_params

            # avoid any cluster operations in setup for new upgrade
            #  & upgradeXDCR tests
            if str(self.__class__).find('newupgradetests') != -1 or \
                            str(self.__class__).find('upgradeXDCR') != -1 or \
                            str(self.__class__).find('Upgrade_EpTests') != -1 or \
                            hasattr(self, 'skip_buckets_handle') and \
                            self.skip_buckets_handle:
                log.info("any cluster operation in setup will be skipped")
                self.primary_index_created = True
                log.info("==============  basetestcase setup was finished for test #{0} {1} ==============" \
                              .format(self.case_number, self._testMethodName))
                return
            # avoid clean up if the previous test has been tear down
            if self.case_number == 1 or self.case_number > 1000:
                if self.case_number > 1000:
                    log.warn(
                        "teardDown for previous test failed. will retry..")
                    self.case_number -= 1000
                self.cleanup = True
                if not self.skip_init_check_cbserver:
                    self.tearDownEverything()
                self.cluster = Cluster()
            if not self.skip_init_check_cbserver:
                log.info("initializing cluster")
                self.reset_cluster()
                master_services = self.get_services(self.servers[:1], \
                                                    self.services_init, \
                                                    start_node=0)
                if master_services != None:
                    master_services = master_services[0].split(",")

                self.quota = self._initialize_nodes(self.cluster, self.servers, \
                                                    self.disabled_consistent_view, \
                                                    self.rebalanceIndexWaitingDisabled, \
                                                    self.rebalanceIndexPausingDisabled, \
                                                    self.maxParallelIndexers, \
                                                    self.maxParallelReplicaIndexers, \
                                                    self.port, \
                                                    self.quota_percent, \
                                                    services=master_services)

                self.change_env_variables()
                self.change_checkpoint_params()

                # Add built-in user
                if not self.skip_init_check_cbserver:
                    self.add_built_in_server_user(node=self.master)
                log.info("done initializing cluster")
            else:
                self.quota = ""
            if self.input.param("log_info", None):
                self.change_log_info()
            if self.input.param("log_location", None):
                self.change_log_location()
            if self.input.param("stat_info", None):
                self.change_stat_info()
            if self.input.param("port_info", None):
                self.change_port_info()
            if self.input.param("port", None):
                self.port = str(self.input.param("port", None))
            try:
                if (str(self.__class__).find('rebalanceout.RebalanceOutTests') != -1) or \
                        (str(self.__class__).find('memorysanitytests.MemorySanity') != -1) or \
                                str(self.__class__).find('negativetests.NegativeTests') != -1 or \
                                str(self.__class__).find('warmuptest.WarmUpTests') != -1 or \
                                str(self.__class__).find('failover.failovertests.FailoverTests') != -1 or \
                                str(self.__class__).find('observe.observeseqnotests.ObserveSeqNoTests') != -1 or \
                                str(self.__class__).find('epengine.lwwepengine.LWW_EP_Engine') != -1:

                    self.services = self.get_services(self.servers,
                                                      self.services_init)
                    # rebalance all nodes into the cluster before each test
                    self.cluster.rebalance(self.servers[:self.num_servers],
                                           self.servers[1:self.num_servers],
                                           [],
                                           services=self.services)
                elif self.nodes_init > 1 and not self.skip_init_check_cbserver:
                    self.services = self.get_services(
                        self.servers[:self.nodes_init], self.services_init)
                    self.cluster.rebalance(self.servers[:1], \
                                           self.servers[1:self.nodes_init], \
                                           [], services=self.services)
                elif str(self.__class__).find('ViewQueryTests') != -1 and \
                        not self.input.param("skip_rebalance", False):
                    self.services = self.get_services(self.servers,
                                                      self.services_init)
                    self.cluster.rebalance(self.servers,
                                           self.servers[1:], [],
                                           services=self.services)
                self.setDebugLevel(service_type="index")
            except BaseException, e:
                # increase case_number to retry tearDown in setup for the next test
                self.case_number += 1000
                self.fail(e)

            if self.dgm_run:
                self.quota = 256
            if self.total_buckets > 10:
                log.info("================== changing max buckets from 10 to {0} =================" \
                              .format(self.total_buckets))
                self.change_max_buckets(self, self.total_buckets)
            if self.total_buckets > 0 and not self.skip_init_check_cbserver:
                """ from sherlock, we have index service that could take some
                    RAM quota from total RAM quota for couchbase server.  We need
                    to get the correct RAM quota available to create bucket(s)
                    after all services were set """
                node_info = RestConnection(self.master).get_nodes_self()
                if node_info.memoryQuota and int(node_info.memoryQuota) > 0:
                    ram_available = node_info.memoryQuota
                else:
                    ram_available = self.quota
                if self.bucket_size is None:
                    if self.dgm_run:
                        """ if dgm is set,
                            we need to set bucket size to dgm setting """
                        self.bucket_size = self.quota
                    else:
                        self.bucket_size = self._get_bucket_size(ram_available, \
                                                                 self.total_buckets)

            self.bucket_base_params['membase']['non_ephemeral'][
                'size'] = self.bucket_size
            self.bucket_base_params['membase']['ephemeral'][
                'size'] = self.bucket_size
            self.bucket_base_params['memcached']['size'] = self.bucket_size

            if str(self.__class__).find('upgrade_tests') == -1 and \
                            str(self.__class__).find('newupgradetests') == -1:
                self._bucket_creation()
            log.info("==============  basetestcase setup was finished for test #{0} {1} ==============" \
                          .format(self.case_number, self._testMethodName))

            if not self.skip_init_check_cbserver:
                self._log_start(self)
                self.sleep(10)
Exemplo n.º 2
0
    def setUp(self):
        self.failover_util = failover_utils()
        self.node_util = node_utils()
        self.views_util = views_utils()
        
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.primary_index_created = False
        self.use_sdk_client = self.input.param("use_sdk_client", False)
        if self.input.param("log_level", None):
            log.setLevel(level=0)
            for hd in log.handlers:
                if str(hd.__class__).find('FileHandler') != -1:
                    hd.setLevel(level=logging.DEBUG)
                else:
                    hd.setLevel(level=getattr(logging, self.input.param("log_level", None)))
        self.servers = self.input.servers
        if str(self.__class__).find('moxitests') != -1:
            self.moxi_server = self.input.moxis[0]
            self.servers = [server for server in self.servers
                            if server.ip != self.moxi_server.ip]
        self.buckets = []
        self.cluster = CBCluster(servers=self.input.servers)
        self.bucket_util = bucket_utils(self.cluster)
        self.cluster_util = cluster_utils(self.cluster)
        self.task = ServerTasks()
        self.pre_warmup_stats = {}
        self.cleanup = False
        self.nonroot = False
        shell = RemoteMachineShellConnection(self.cluster.master)
        self.os_info = shell.extract_remote_info().type.lower()
        if self.os_info != 'windows':
            if self.cluster.master.ssh_username != "root":
                self.nonroot = True
        shell.disconnect()
        """ some tests need to bypass checking cb server at set up
            to run installation """
        self.skip_init_check_cbserver = \
            self.input.param("skip_init_check_cbserver", False)
        self.data_collector = DataCollector()
        self.data_analyzer = DataAnalyzer()
        self.result_analyzer = DataAnalysisResultAnalyzer()

        try:
            self.skip_setup_cleanup = self.input.param("skip_setup_cleanup", False)
            self.index_quota_percent = self.input.param("index_quota_percent", None)
            self.targetIndexManager = self.input.param("targetIndexManager", False)
            self.targetMaster = self.input.param("targetMaster", False)
            self.reset_services = self.input.param("reset_services", False)
            self.auth_mech = self.input.param("auth_mech", "PLAIN")
            self.wait_timeout = self.input.param("wait_timeout", 60)
            # number of case that is performed from testrunner( increment each time)
            self.case_number = self.input.param("case_number", 0)
            self.default_bucket = self.input.param("default_bucket", True)
            self.parallelism = self.input.param("parallelism", False)
            self.verify_unacked_bytes = self.input.param("verify_unacked_bytes", False)
            self.enable_flow_control = self.input.param("enable_flow_control", False)
            self.num_servers = self.input.param("servers", len(self.cluster.servers))
            # initial number of items in the cluster
            self.nodes_init = self.input.param("nodes_init", 1)
            self.nodes_in = self.input.param("nodes_in", 1)
            self.nodes_out = self.input.param("nodes_out", 1)
            self.services_init = self.input.param("services_init", None)
            self.services_in = self.input.param("services_in", None)
            self.forceEject = self.input.param("forceEject", False)
            self.force_kill_memcached = TestInputSingleton.input.param('force_kill_memcached', False)
            self.num_items = self.input.param("items", 1000)
            self.value_size = self.input.param("value_size", 512)
            self.dgm_run = self.input.param("dgm_run", False)
            self.active_resident_threshold = int(self.input.param("active_resident_threshold", 0))
            # max items number to verify in ValidateDataTask, None - verify all
            self.max_verify = self.input.param("max_verify", None)
            # we don't change consistent_view on server by default
            self.disabled_consistent_view = self.input.param("disabled_consistent_view", None)
            self.rebalanceIndexWaitingDisabled = self.input.param("rebalanceIndexWaitingDisabled", None)
            self.rebalanceIndexPausingDisabled = self.input.param("rebalanceIndexPausingDisabled", None)
            self.maxParallelIndexers = self.input.param("maxParallelIndexers", None)
            self.maxParallelReplicaIndexers = self.input.param("maxParallelReplicaIndexers", None)
            self.quota_percent = self.input.param("quota_percent", None)
            self.port = None
            self.log_message = self.input.param("log_message", None)
            self.log_info = self.input.param("log_info", None)
            self.log_location = self.input.param("log_location", None)
            self.stat_info = self.input.param("stat_info", None)
            self.port_info = self.input.param("port_info", None)
            if not hasattr(self, 'skip_buckets_handle'):
                self.skip_buckets_handle = self.input.param("skip_buckets_handle", False)
            self.nodes_out_dist = self.input.param("nodes_out_dist", None)
            self.absolute_path = self.input.param("absolute_path", True)
            self.test_timeout = self.input.param("test_timeout", 3600)  # kill hang test and jump to next one.
            self.enable_bloom_filter = self.input.param("enable_bloom_filter", False)
#             self.enable_time_sync = self.input.param("enable_time_sync", False)
            self.gsi_type = self.input.param("gsi_type", 'plasma')

            #jre-path for cbas
            self.jre_path=self.input.param("jre_path",None)
            # end of bucket parameters spot (this is ongoing)

            if self.skip_setup_cleanup:
                self.buckets = BucketHelper(self.master).get_buckets()
                return
            if not self.skip_init_check_cbserver:
                self.cb_version = None
                if RestHelper(RestConnection(self.cluster.master)).is_ns_server_running():
                    """ since every new couchbase version, there will be new features
                        that test code will not work on previous release.  So we need
                        to get couchbase version to filter out those tests. """
                    self.cb_version = RestConnection(self.cluster.master).get_nodes_version()
                else:
                    log.info("couchbase server does not run yet")
                self.protocol = self.cluster_util.get_protocol_type()
            self.services_map = None

            log.info("==============  basetestcase setup was started for test #{0} {1}==============" \
                          .format(self.case_number, self._testMethodName))
            if not self.skip_buckets_handle and not self.skip_init_check_cbserver:
                self.cluster_util._cluster_cleanup(self.bucket_util)

            # avoid any cluster operations in setup for new upgrade
            #  & upgradeXDCR tests
            if str(self.__class__).find('newupgradetests') != -1 or \
                            str(self.__class__).find('upgradeXDCR') != -1 or \
                            str(self.__class__).find('Upgrade_EpTests') != -1 or \
                            hasattr(self, 'skip_buckets_handle') and \
                            self.skip_buckets_handle:
                log.info("any cluster operation in setup will be skipped")
                self.primary_index_created = True
                log.info("==============  basetestcase setup was finished for test #{0} {1} ==============" \
                              .format(self.case_number, self._testMethodName))
                return
            # avoid clean up if the previous test has been tear down
            if self.case_number == 1 or self.case_number > 1000:
                if self.case_number > 1000:
                    log.warn("teardDown for previous test failed. will retry..")
                    self.case_number -= 1000
                self.cleanup = True
                if not self.skip_init_check_cbserver:
                    self.tearDownEverything()
                self.task = ServerTasks()
            if not self.skip_init_check_cbserver:
                log.info("initializing cluster")
                self.cluster_util.reset_cluster(self.targetMaster, self.reset_services)
                master_services = self.cluster_util.get_services(self.servers[:1], \
                                                    self.services_init, \
                                                    start_node=0)
                if master_services != None:
                    master_services = master_services[0].split(",")

                self.quota = self._initialize_nodes(self.task, self.cluster.servers, \
                                                    self.disabled_consistent_view, \
                                                    self.rebalanceIndexWaitingDisabled, \
                                                    self.rebalanceIndexPausingDisabled, \
                                                    self.maxParallelIndexers, \
                                                    self.maxParallelReplicaIndexers, \
                                                    self.port, \
                                                    self.quota_percent, \
                                                    services=master_services)

                self.cluster_util.change_env_variables()
                self.cluster_util.change_checkpoint_params()

                # Add built-in user
                if not self.skip_init_check_cbserver:
                    self.add_built_in_server_user(node=self.cluster.master)
                log.info("done initializing cluster")
            else:
                self.quota = ""
            if self.input.param("log_info", None):
                self.cluster_util.change_log_info()
            if self.input.param("log_location", None):
                self.cluster_util.change_log_location()
            if self.input.param("stat_info", None):
                self.cluster_util.change_stat_info()
            if self.input.param("port_info", None):
                self.cluster_util.change_port_info()
            if self.input.param("port", None):
                self.port = str(self.input.param("port", None))
            try:
                if (str(self.__class__).find('rebalanceout.RebalanceOutTests') != -1) or \
                        (str(self.__class__).find('memorysanitytests.MemorySanity') != -1) or \
                                str(self.__class__).find('negativetests.NegativeTests') != -1 or \
                                str(self.__class__).find('warmuptest.WarmUpTests') != -1 or \
                                str(self.__class__).find('failover.failovertests.FailoverTests') != -1 or \
                                str(self.__class__).find('observe.observeseqnotests.ObserveSeqNoTests') != -1 or \
                                str(self.__class__).find('epengine.lwwepengine.LWW_EP_Engine') != -1:

                    self.services = self.get_services(self.servers, self.services_init)
                    # rebalance all nodes into the cluster before each test
                    self.task.rebalance(self.servers[:self.num_servers], self.servers[1:self.num_servers], [],
                                           services=self.services)
                elif self.nodes_init > 1 and not self.skip_init_check_cbserver:
                    self.services = self.get_services(self.servers[:self.nodes_init], self.services_init)
                    self.task.rebalance(self.servers[:1], \
                                           self.servers[1:self.nodes_init], \
                                           [], services=self.services)
                elif str(self.__class__).find('ViewQueryTests') != -1 and \
                        not self.input.param("skip_rebalance", False):
                    self.services = self.get_services(self.servers, self.services_init)
                    self.task.rebalance(self.servers, self.servers[1:],
                                           [], services=self.services)
                self.cluster_util.setDebugLevel(service_type="index")
            except BaseException, e:
                # increase case_number to retry tearDown in setup for the next test
                self.case_number += 1000
                self.fail(e)

            log.info("==============  basetestcase setup was finished for test #{0} {1} ==============" \
                          .format(self.case_number, self._testMethodName))

            if not self.skip_init_check_cbserver:
                self._log_start(self)
                self.sleep(10)