Exemplo n.º 1
0
 def store_keys(self,
                servers,
                buckets,
                backup_num,
                backup_validation_path,
                mode="memory",
                backup_type="backup"):
     """
     Collects the latest keys from a cluster after a restore
     :param servers: servers in the cluster
     :param buckets: buckets in the cluster
     :param backup_num: current backup number
     :param backup_validation_path: where to store the data
     :param get_replica: bool to decide if replica data to be copied
     :param mode: where to get the items from - can be "disk" or "memory"
     :param backup_type: backup or restore
     """
     data_collector = DataCollector()
     info, complete_map = data_collector.collect_data(
         servers,
         buckets,
         userId=servers[0].rest_username,
         password=servers[0].rest_password,
         perNode=False,
         mode=mode)
     for bucket in list(complete_map.keys()):
         file_name = "{0}-{1}-{2}.json".format(bucket, "keys", backup_num)
         file_path = os.path.join(backup_validation_path, file_name)
         keys = list(complete_map[bucket].keys())
         with open(file_path, 'w') as f:
             json.dump({"keys": keys}, f)
Exemplo n.º 2
0
 def store_latest(self, servers, buckets, backup_num, backup_validation_path, get_replica=False, mode="memory",
                  backup_type="backup"):
     """
     Collects the latest key value pairs from a cluster after a restore
     :param servers: servers in the cluster
     :param buckets: buckets in the cluster
     :param backup_num: current backup number
     :param backup_validation_path: where to store the data
     :param get_replica: bool to decide if replica data to be copied
     :param mode: where to get the items from - can be "disk" or "memory"
     :param backup_type: backup or restore
     """
     data_collector = DataCollector()
     info, complete_map = data_collector.collect_data(servers, buckets, userId=servers[0].rest_username,
                                                      password=servers[0].rest_password, perNode=False,
                                                      getReplica=get_replica, mode=mode)
     for bucket in complete_map.keys():
         file_name = "{0}-{1}-{2}-{3}.json".format(bucket, "key_value", backup_type, backup_num)
         file_path = os.path.join(backup_validation_path, file_name)
         data = complete_map[bucket]
         for key in data:
             value = data[key]
             value = ",".join(value.split(',')[4:5])
             data[key] = value
         with open(file_path, 'w') as f:
             json.dump(data, f)
 def store_latest(self, servers, buckets, backup_num, backup_validation_path, get_replica=False, mode="memory",
                  backup_type="backup"):
     """
     Collects the latest key value pairs from a cluster after a restore
     :param servers: servers in the cluster
     :param buckets: buckets in the cluster
     :param backup_num: current backup number
     :param backup_validation_path: where to store the data
     :param get_replica: bool to decide if replica data to be copied
     :param mode: where to get the items from - can be "disk" or "memory"
     :param backup_type: backup or restore
     """
     data_collector = DataCollector()
     info, complete_map = data_collector.collect_data(servers, buckets, userId=servers[0].rest_username,
                                                      password=servers[0].rest_password, perNode=False,
                                                      getReplica=get_replica, mode=mode)
     for bucket in complete_map.keys():
         file_name = "{0}-{1}-{2}-{3}.json".format(bucket, "key_value", backup_type, backup_num)
         file_path = os.path.join(backup_validation_path, file_name)
         data = complete_map[bucket]
         for key in data:
             value = data[key]
             value = ",".join(value.split(',')[4:5])
             data[key] = value
         with open(file_path, 'w') as f:
             json.dump(data, f)
    def validate_restore(self, backup_number, backup_vbucket_seqno, restored_vbucket_seqno, compare_uuid=False,
                         compare="==", get_replica=False, mode="memory"):
        """
        Validates ep-engine stats and restored data
        :param backup_number: backup end number
        :param backup_vbucket_seqno: vbucket map of all backups
        :param restored_vbucket_seqno: vbucket map of restored cluster
        :param compare_uuid: bool to decide whether to compare uuid or not
        :param compare: comparator function
        :param get_replica: bool to decide whether to compare replicas or not
        :param mode: where to get the items from - can be "disk" or "memory"
        :return: status and message
        """
        self.log.info("backup start: " + str(self.backupset.start))
        self.log.info("backup end: " + str(self.backupset.end))
        success_msg = ""
        if not self.backupset.force_updates:
            status, msg = self.compare_vbucket_stats(backup_vbucket_seqno[backup_number - 1], restored_vbucket_seqno,
                                                 compare_uuid=compare_uuid, seqno_compare=compare)
            if not status:
                return status, msg
            success_msg = "{0}\n".format(msg)
        for bucket in self.buckets:
            kv_file_name = "{0}-{1}-{2}-{3}.json".format(bucket.name, "key_value", "backup", backup_number)
            kv_file_path = os.path.join(self.backup_validation_path, kv_file_name)
            backedup_kv = {}
            if os.path.exists(kv_file_path):
                try:
                    with open(kv_file_path, 'r') as f:
                        backedup_kv = json.load(f)
                except Exception, e:
                    raise e
            data_collector = DataCollector()
            info, restored_data = data_collector.collect_data(self.restore_cluster,
                                     [bucket],
                                     userId=self.restore_cluster[0].rest_username,
                                     password=self.restore_cluster[0].rest_password,
                                     perNode=False,
                                     getReplica=get_replica,
                                     mode=mode)
            data = restored_data[bucket.name]
            for key in data:
                value = data[key]
                value = ",".join(value.split(',')[4:5])
                data[key] = value

            is_equal, not_equal, extra, not_present = \
                                        self.compare_dictionary(backedup_kv, data)
            status, msg = self.compare_dictionary_result_analyser(is_equal,
                                                                  not_equal,
                                                                  extra,
                                                                  not_present,
                                                "{0} Data".format(bucket.name))
            if not status:
                return status, msg
            success_msg += "{0}\n".format(msg)
 def validate_restore(self, backup_number, backup_vbucket_seqno, restored_vbucket_seqno, compare_uuid=False,
                      compare="==", get_replica=False, mode="memory"):
     """
     Validates ep-engine stats and restored data
     :param backup_number: backup end number
     :param backup_vbucket_seqno: vbucket map of all backups
     :param restored_vbucket_seqno: vbucket map of restored cluster
     :param compare_uuid: bool to decide whether to compare uuid or not
     :param compare: comparator function
     :param get_replica: bool to decide whether to compare replicas or not
     :param mode: where to get the items from - can be "disk" or "memory"
     :return: status and message
     """
     self.log.info("backup start: " + str(self.backupset.start))
     self.log.info("backup end: " + str(self.backupset.end))
     success_msg = ""
     if not self.backupset.force_updates:
         status, msg = self.compare_vbucket_stats(backup_vbucket_seqno[backup_number - 1], restored_vbucket_seqno,
                                              compare_uuid=compare_uuid, seqno_compare=compare)
         if not status:
             return status, msg
         success_msg = "{0}\n".format(msg)
     for bucket in self.buckets:
         kv_file_name = "{0}-{1}-{2}-{3}.json".format(bucket.name, "key_value", "backup", backup_number)
         kv_file_path = os.path.join(self.backup_validation_path, kv_file_name)
         backedup_kv = {}
         if os.path.exists(kv_file_path):
             try:
                 with open(kv_file_path, 'r') as f:
                     backedup_kv = json.load(f)
             except Exception, e:
                 raise e
         data_collector = DataCollector()
         info, restored_data = data_collector.collect_data(self.restore_cluster, [bucket],
                                                           userId=self.restore_cluster[0].rest_username,
                                                           password=self.restore_cluster[0].rest_password,
                                                           perNode=False,
                                                           getReplica=get_replica, mode=mode)
         data = restored_data[bucket.name]
         for key in data:
             value = data[key]
             value = ",".join(value.split(',')[4:5])
             data[key] = value
         is_equal, not_equal, extra, not_present = self.compare_dictionary(backedup_kv, data)
         status, msg = self.compare_dictionary_result_analyser(is_equal, not_equal, extra, not_present,
                                                               "{0} Data".format(bucket.name))
         if not status:
             return status, msg
         success_msg += "{0}\n".format(msg)
Exemplo n.º 6
0
 def store_keys(self, servers, buckets, backup_num, backup_validation_path, mode="memory", backup_type="backup"):
     """
     Collects the latest keys from a cluster after a restore
     :param servers: servers in the cluster
     :param buckets: buckets in the cluster
     :param backup_num: current backup number
     :param backup_validation_path: where to store the data
     :param get_replica: bool to decide if replica data to be copied
     :param mode: where to get the items from - can be "disk" or "memory"
     :param backup_type: backup or restore
     """
     data_collector = DataCollector()
     info, complete_map = data_collector.collect_data(servers, buckets, userId=servers[0].rest_username,
                                                      password=servers[0].rest_password, perNode=False,
                                                      mode=mode)
     for bucket in complete_map.keys():
         file_name = "{0}-{1}-{2}.json".format(bucket, "keys", backup_num)
         file_path = os.path.join(backup_validation_path, file_name)
         keys = complete_map[bucket].keys()
         with open(file_path, 'w') as f:
             json.dump({"keys": keys}, f)
Exemplo n.º 7
0
    def setUp(self):
        self.input = TestInputSingleton.input
        self.parse_params()
        self.nodes_init = self.input.param("nodes_init", 3)

        self.log = logger.Logger.get_logger()
        if self.log_level:
            self.log.setLevel(level=self.log_level)
    
        self.use_https = True
        CbServer.use_https = True
        if not hasattr(self, 'cluster'):
            self.cluster = Cluster()
        self.buckets = []
        self.collection_name = {}
        CbServer.rest_username = self.input.membase_settings.rest_username
        CbServer.rest_password = self.input.membase_settings.rest_password
        self.data_collector = DataCollector()
        self.data_analyzer = DataAnalyzer()
        self.result_analyzer = DataAnalysisResultAnalyzer()
        self.set_testrunner_client()

        self.pod = pod(self.input.capella["pod"])
        self.tenant = tenant(
            self.input.capella["tenant_id"],
            self.input.capella["capella_user"],
            self.input.capella["capella_pwd"],
        )
        self.project_id = self.input.capella["project_id"]
        self.tenant.project_id = self.project_id
        
        cluster_details = self.create_capella_config()

        cluster_id, _, _ = CapellaAPI.create_cluster(self.pod, self.tenant, cluster_details)

        self.cluster_id = cluster_id
        CbServer.cluster_id = cluster_id

        CapellaAPI.add_allowed_ip(self.pod, self.tenant, self.cluster_id)
        CapellaAPI.create_db_user(self.pod, self.tenant, self.cluster_id, self.input.membase_settings.rest_username, self.input.membase_settings.rest_password)

        CbServer.pod = self.pod
        CbServer.tenant = self.tenant
        CbServer.cluster_id = self.cluster_id

        servers = CapellaAPI.get_nodes_formatted(
            self.pod, self.tenant, self.cluster_id, self.input.membase_settings.rest_username, self.input.membase_settings.rest_password)

        self.cluster_config = ClusterConfig(self.input.membase_settings.rest_username, self.input.membase_settings.rest_password, self.create_input_servers())
        self.cluster_config.update_servers(servers)

        if not self.skip_buckets_handle:
            BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)

        self.bucket_base_params = {'membase': {}}
    
        shared_params = self._create_bucket_params(server=self.master, size=self.bucket_size,
                                                   replicas=self.num_replicas,
                                                   enable_replica_index=self.enable_replica_index,
                                                   eviction_policy=self.eviction_policy, bucket_priority=None,
                                                   lww=self.lww, maxttl=self.maxttl,
                                                   compression_mode=self.compression_mode)

        membase_params = copy.deepcopy(shared_params)
        membase_params['bucket_type'] = 'membase'
        self.bucket_base_params['membase']['non_ephemeral'] = membase_params

        membase_ephemeral_params = copy.deepcopy(shared_params)
        membase_ephemeral_params['bucket_type'] = 'ephemeral'
        self.bucket_base_params['membase']['ephemeral'] = membase_ephemeral_params

        self.bucket_base_params['membase']['non_ephemeral']['size'] = self.bucket_size
        self.bucket_base_params['membase']['ephemeral']['size'] = self.bucket_size

        self._bucket_creation()
 def validate_restore(self,
                      backup_number,
                      backup_vbucket_seqno,
                      restored_vbucket_seqno,
                      compare_uuid=False,
                      compare="==",
                      get_replica=False,
                      mode="memory",
                      backups_on_disk=None):
     """
     Validates ep-engine stats and restored data
     :param backup_number: backup end number
     :param backup_vbucket_seqno: vbucket map of all backups
     :param restored_vbucket_seqno: vbucket map of restored cluster
     :param compare_uuid: bool to decide whether to compare uuid or not
     :param compare: comparator function
     :param get_replica: bool to decide whether to compare replicas or not
     :param mode: where to get the items from - can be "disk" or "memory"
     :return: status and message
     """
     self.log.info("backup start: " + str(self.backupset.start))
     self.log.info("backup end: " + str(self.backupset.end))
     success_msg = ""
     if not self.backupset.force_updates:
         status, msg = self.compare_vbucket_stats(
             backup_vbucket_seqno[backup_number - 1],
             restored_vbucket_seqno,
             compare_uuid=compare_uuid,
             seqno_compare=compare,
             mapBucket=self.backupset.map_buckets)
         if not status:
             return status, msg
         success_msg = "{0}\n".format(msg)
     for bucket in self.buckets:
         kv_file_name = "{0}-{1}-{2}-{3}.json".format(
             bucket.name, "key_value", "backup", backup_number)
         kv_file_path = os.path.join(self.backup_validation_path,
                                     kv_file_name)
         if backup_number == self.backupset.end and backup_number == backups_on_disk:
             backup_files = [
                 f"{self.backup_validation_path}/" + f
                 for f in listdir(self.backup_validation_path)
                 if f"{bucket.name}-key_value-backup" in f
             ]
             backup_files.sort(key=os.path.getmtime)
             try:
                 kv_file_path = backup_files[-1]
             except IndexError as e:
                 if self.backupset.deleted_buckets:
                     return True, "Deleted bucket, will not check"
                 else:
                     raise e
         backedup_kv = {}
         if os.path.exists(kv_file_path):
             try:
                 with open(kv_file_path, 'r') as f:
                     backedup_kv = json.load(f)
             except Exception as e:
                 raise e
         else:
             raise Exception(f"{kv_file_path} is missing!")
         data_collector = DataCollector()
         info, restored_data = data_collector.collect_data(
             self.restore_cluster, [bucket],
             userId=self.restore_cluster[0].rest_username,
             password=self.restore_cluster[0].rest_password,
             perNode=False,
             getReplica=get_replica,
             mode=mode)
         data = restored_data[bucket.name]
         for key in data:
             value = data[key]
             if '"b\'' in value:
                 value = ",".join(value.split(',')[4:8])
             else:
                 value = ",".join(value.split(',')[4:5])
             value = value.replace('""', '"')
             if value.startswith('"b\''):
                 value = value[3:-2]
             elif value.startswith("b"):
                 value = value.split(',')[0]
             data[key] = value
         self.log.info("Compare backup data in bucket %s " % bucket.name)
         is_equal, not_equal, extra, not_present = \
                                     self.compare_dictionary(backedup_kv, data)
         status, msg = self.compare_dictionary_result_analyser(
             is_equal, not_equal, extra, not_present,
             "{0} Data".format(bucket.name))
         if not status:
             return status, msg
         success_msg += "{0}\n".format(msg)
     success_msg += "Data validation success"
     return True, success_msg