Exemple #1
0
def get_user_etcd(test_class_instance, etcd, user_id):
    """
    Function gets user_id and user_name from etcd store.
    :param user_id:
    :param test_class_instance: instance of the test class, i.e. self
    :param etcd: url of the etcd service
    :return: actual_user_id =>
             actual_user_name =>
             error =>
    """
    actual_user_id, actual_user_name, error = '', '', ''
    test_class_instance.mylog.info(
        'gateway_util.get_user_etcd() function is being called')
    test_class_instance.mylog.info(
        '--------------------------------------------------------')
    test_class_instance.mylog.info(
        'gateway_util.get_user_etcd(): params: user_id \'%s\'' % user_id)
    test_class_instance.mylog.info('')
    cmd = 'ETCDCTL_API=3 /usr/local/bin/etcdctl --endpoints %s get --prefix "userv1/%s" --print-value-only' \
          % (etcd, user_id)
    out, error = litmus_utils.execCmd(test_class_instance,
                                      cmd,
                                      status='OUT_STATUS')
    if out != '':
        actual_user_id = ast.literal_eval(out).get('id')
        test_class_instance.mylog.info(
            'gateway_util.get_user_etcd() : actual_user_id = \'%s\'' %
            actual_user_id)
        actual_user_name = ast.literal_eval(out).get('name')
        if actual_user_name != 'DoubleQuotes\"' and actual_user_name != 'DoubleQuotes\"_updated_name':
            actual_user_name = json.loads("\"" + actual_user_name + "\"")
        test_class_instance.mylog.info(
            'gateway_util.get_user_etcd() : actual_user_name = \'%s\'' %
            actual_user_name)
    return actual_user_id, actual_user_name, error
Exemple #2
0
def remove_buckets(request, etcd):
    """
    :param request:
    :param etcd:
    :return:
    """
    request.cls.mylog.info('remove_buckets() fixture is being called')
    request.cls.mylog.info('----------------------------------------')
    cmd = '%s --endpoints %s del --prefix "bucketv1"' % (etcdctl, etcd)
    exit = litmus_utils.execCmd(request.cls, cmd)
    request.cls.mylog.info('remove_buckets() fixture is done')
    request.cls.mylog.info('--------------------------------')
    request.cls.mylog.info('')
    assert exit == 0, request.cls.mylog('remove_buckets() fixture exit status is not 0')
Exemple #3
0
def remove_orgs(request, etcd):
    """
    :param request:
    :param etcd:
    :return:
    """
    request.cls.mylog.info('remove_orgs() fixture is being called')
    request.cls.mylog.info('-------------------------------------')
    cmd_remove_org_id = '%s --endpoints %s del --prefix "indexv1/org/id"' % (etcdctl, etcd)
    cmd_remove_org_name = '%s --endpoints %s del --prefix "indexv1/org/name"' % (etcdctl, etcd)
    cmd = '%s --endpoints %s del --prefix "Organizationv1"' % (etcdctl, etcd)
    request.cls.mylog.info('remove_orgs() removing index org id')
    exit_org_id = litmus_utils.execCmd(request.cls, cmd_remove_org_id)
    request.cls.mylog.info('remove_orgs() removing index org name')
    exit_org_name = litmus_utils.execCmd(request.cls, cmd_remove_org_name)
    request.cls.mylog.info('remove_orgs() removing Organization')
    exit = litmus_utils.execCmd(request.cls, cmd)
    request.cls.mylog.info('remove_orgs() fixture is done')
    request.cls.mylog.info('-----------------------------')
    request.cls.mylog.info('')
    assert exit_org_id == 0, request.cls.mylog('remove_orgs() fixture exit_org_id status is not 0')
    assert exit_org_name == 0, request.cls.mylog('remove_orgs() fixture exit_org_name status is not 0')
    assert exit == 0, request.cls.mylog('remove_orgs() fixture exit status is not 0')
Exemple #4
0
def remove_tasks(request, etcd_tasks):
    """
    Remove all references for the specific tasks from etcd-tasks store using ETCDCTL command
    https://github.com/influxdata/idpe/blob/49ae6f73546b05e8597d2284a3957c20999c5bdd/task/store/etcd/store.go
    #L1195-L1214
    delTask := clientv3.OpDelete(path.Join(tasksPath, idStr))
    delTaskMeta := clientv3.OpDelete(path.Join(taskMetaPath, idStr)+"/", clientv3.WithPrefix())
    delOrgTask := clientv3.OpDelete(path.Join(orgsPath, org, idStr))

    // Only release the lease task but now the owner. When in a cluster
    // we will most likely not be the owner of this lease.
    delClaimTask := clientv3.OpDelete(path.Join(claimPath, idStr))
    delUserTask := clientv3.OpDelete(path.Join(usersPath, user, idStr))

    where:
    tasksPath     = basePath + "tasks"
    orgsPath      = basePath + "orgs"
    usersPath     = basePath + "users"
    taskMetaPath  = basePath + "task_meta"
    claimPath     = basePath + "claims"
    orgDelPath    = basePath + "org_deletions"
    userDelPath   = basePath + "user_deletions"
    cancelRunPath = basePath + "runs"
    basePath = "/tasks/v1/"

    :param request:
    :param etcd_tasks:
    :return:
    """
    # Should change to remove the above and leave /tasks/v1/claims/02f8d98a004e0000/owner
    #                                               tasks-7dcc47895c-mvpql
    request.cls.mylog.info('remove_tasks() fixture is being called')
    request.cls.mylog.info('--------------------------------------')
    cmd = '%s --endpoints %s del --prefix "/tasks/v1"' % (etcdctl, etcd_tasks)
    exit = litmus_utils.execCmd(request.cls, cmd)
    request.cls.mylog.info('remove_tasks() fixture is done')
    request.cls.mylog.info('------------------------------')
    request.cls.mylog.info('')
    assert exit == 0, request.cls.mylog('remove_tasks() fixture exit status is not 0')
Exemple #5
0
def get_tasks_etcd(test_class_instance, etcd_tasks, task_id):
    """
    Function get_tasks_etcd() gets information about specific task
    :param test_class_instance: instance of the test class, i.e. self
    :param etcd_tasks: url of the etcd-tasks service
    :param task_id: id of the task
    :return: {'flux_script':flux_script, 'error':error, 'task_name':name, 'org_id':org_id, 'user_id':user_id,
            'status':status, 'schedule':schedule}
    """
    out, actual_bucket_name, flux_script, error = '', '', '', ''
    schedule, name, org_id, user_id, status = '', '', '', '', ''
    test_class_instance.mylog.info(
        'gateway_util.get_tasks_etcd() function is being called')
    test_class_instance.mylog.info(
        '------------------------------------------------------')
    test_class_instance.mylog.info(
        'gateway_util.get_tasks_etcd(): params: task_id \'%s\'' % task_id)
    test_class_instance.mylog.info('')
    cmd_flux_script = 'ETCDCTL_API=3 /usr/local/bin/etcdctl --endpoints %s get --prefix "/tasks/v1/tasks/%s" ' \
                      '--print-value-only' % (etcd_tasks, task_id)
    cmd_meta_data = 'ETCDCTL_API=3 /usr/local/bin/etcdctl --endpoints %s get --prefix "/tasks/v1/task_meta/%s" ' \
                    '--print-value-only' % (etcd_tasks, task_id)
    out, error = litmus_utils.execCmd(test_class_instance,
                                      cmd_flux_script,
                                      status='OUT_STATUS')
    # ('option task = {name:"task_1", every:2m} from(bucket:"test_bucket") |> range(start:-5h)\n', '')
    if out != '':
        flux_script = out.strip('\n')
        test_class_instance.mylog.info(
            'gateway_util.get_tasks_etcd() : flux_script = \'%s\'' %
            flux_script)
    out, error = litmus_utils.execCmd(test_class_instance,
                                      cmd_meta_data,
                                      status='OUT_STATUS')
    # etcd returns meta data: schedule (active, every); task_name; org_id; user_id
    # ('\x08\x01\x10\xc8\xe8\xbc\xdf\x05\x1a\x06active*\x0b@every 2m0s\ntask_1\n02f288f282b44000\n02f288f30a344000\n'
    # , '')
    if out != '':
        meta_data_list = out.strip('\n').split('\n')
        # ['\x08\x01\x10\x98\xfd\xbc\xdf\x05\x1a\x06active*\x0b@every 2m0s', 'task_1', '02f288f282b44000',
        # '02f288f30a344000']
        # need to parse out the control characters, x08, x01, etc
        if len(meta_data_list) > 0:
            test_class_instance.mylog.info(
                'gateway_util.get_tasks_etcd(): meta_data_list = ' +
                str(meta_data_list))
            name = meta_data_list[2]
            org_id = meta_data_list[3]
            user_id = meta_data_list[4]
            status = re.search(r'active|inactive', meta_data_list[0]).group(0)
            schedule = re.search(r'((cron|every) .*$)',
                                 meta_data_list[1]).group(0)
    return {
        'flux_script': flux_script,
        'error': error,
        'task_name': name,
        'org_id': org_id,
        'user_id': user_id,
        'status': status,
        'schedule': schedule
    }
Exemple #6
0
def get_org_etcd(test_class_instance, etcd, org_id, get_index_values=False):
    """
    Function gets org ids and org name (hashed name) from etcd store and reports any errors.
    :param etcd:
    :param org_id:
    :param test_class_instance: instance of the test class, i.e. self
    :param get_index_values (bool), if set to True then get the org id and org name values from etcd index,
                                    default value is False
    :return: actual_org_id =>
             actual_org_name =>
             error =>
             name_by_index_id =>
             error_by_index_id =>
             id_by_index_name =>
             error_by_index_name =>
    """
    actual_org_id, actual_org_name, error, name_by_index_id, error_by_index_id, id_by_index_name, error_by_index_name = \
        '', '', '', '', '', '', ''
    test_class_instance.mylog.info(
        'gateway_util.get_org_etcd() function is being called')
    test_class_instance.mylog.info(
        '----------------------------------------------------')
    test_class_instance.mylog.info('')
    cmd = 'ETCDCTL_API=3 /usr/local/bin/etcdctl --endpoints %s get --prefix "Organizationv1/%s" --print-value-only' \
          % (etcd, org_id)
    out, error = litmus_utils.execCmd(test_class_instance,
                                      cmd,
                                      status='OUT_STATUS')
    # if we want to get values from indexv1/org/id and indexv1/org/name from etcd store
    if get_index_values:
        test_class_instance.mylog.info(
            'gateway_util.get_org_etcd() : Getting hashed name by index by id prefix'
        )
        cmd_index_by_id = 'ETCDCTL_API=3 /usr/local/bin/etcdctl --endpoints %s get --prefix "indexv1/org/id/%s" ' \
                          '--print-value-only' % (etcd, org_id)
        name_by_index_id, error_by_index_id = \
            litmus_utils.execCmd(test_class_instance, cmd_index_by_id, status='OUT_STATUS')
        # get first 64 characters
        name_by_index_id = name_by_index_id[:64]
        test_class_instance.mylog.info(
            'gateway_util.get_org_etcd() : Getting index by index by name prefix'
        )
        cmd_index_by_name = 'ETCDCTL_API=3 /usr/local/bin/etcdctl --endpoints %s get --prefix "indexv1/org/name/%s" ' \
                            '--print-value-only' % (etcd, name_by_index_id)
        id_by_index_name, error_by_index_name = \
            litmus_utils.execCmd(test_class_instance, cmd_index_by_name, status='OUT_STATUS')
        name_by_index_id = name_by_index_id.strip()
        id_by_index_name = id_by_index_name.strip()
    # If organization id and name exist (assuming no error)
    if out != '':
        actual_org_id = ast.literal_eval(out).get('id')
        test_class_instance.mylog.info(
            'gateway_util.get_org_etcd() : actual_org_id = \'%s\'' %
            actual_org_id)
        actual_org_name = ast.literal_eval(out).get('name')
        # need to handle double quotes for now separately
        if actual_org_name != 'DoubleQuotes\"' and actual_org_name != 'DoubleQuotes\"_updated_name':
            actual_org_name = json.loads("\"" + actual_org_name + "\"")
        test_class_instance.mylog.info(
            'gateway_util.get_org_etcd() : actual_org_name = \'%s\'' %
            actual_org_name)
    return actual_org_id, actual_org_name, error, name_by_index_id, error_by_index_id, id_by_index_name, \
           error_by_index_name
Exemple #7
0
    def test_tsm_diffs(self):
        '''
        '''
        time_minus_3_days_ms = int(10800000)
        test_name = 'test_tsm_diffs '
        database_name = 'test_tsm_db'
        rp = 'tsm_diff'
        user = self.clusteros
        host = (self.meta_leader).split('//')[1][:-5]
        privatekey = self.privatekey
        data_node = choice(self.data_nodes_ips)
        username, password = '', ''
        current_time_sec = int((datetime.datetime.utcnow() -
                                datetime.datetime(1970, 1, 1)).total_seconds())
        point_time_sec = current_time_sec - 10800 - 1
        current_time_ms = current_time_sec * 1000
        point_time = current_time_ms - time_minus_3_days_ms
        if self.http_auth:  # it is not supported by the writenode tool
            username = self.admin_user
            password = self.admin_pass
        point = [{
            'measurement': 'test_tsm',
            'time': point_time,
            'fields': {
                'value': 1
            },
            'tags': {
                't': 1
            }
        }]
        cmd_chmod='ssh -i %s -o StrictHostKeyChecking=no %s@%s \'cd /tmp; sudo chmod +x writenode_lin\'' \
                  % (privatekey, user, host)
        shard_id = None

        self.header(test_name)
        self.mylog.info(test_name + 'STEP 1: Create InfluxDBClient')
        client = InfluxDBClient(data_node,
                                username=username,
                                password=password)
        self.mylog.info(test_name + 'STEP 2: Create database')
        (success, error) = du.create_database(self, client, database_name)
        assert success, self.mylog.info(test_name +
                                        'Failure to create database :' +
                                        str(error))
        self.mylog.info(
            test_name +
            'STEP 3: Create retention policy with Replicaiton Factor 2')
        (success, error) = du.create_retention_policy(self,
                                                      client,
                                                      rp_name=rp,
                                                      duration='36h',
                                                      replication='2',
                                                      database=database_name,
                                                      default=True)
        assert success, self.mylog.info(
            test_name + 'Failure to create retention policy :' + str(error))
        # every 5 seconds if there are no writes the data from WAL will be written to TSM file
        # (INFLUXDB_DATA_CACHE_SNAPSHOT_WRITE_COLD_DURATION="5s" - setting in data section, provided through installer
        self.mylog.info(test_name + 'STEP 4: Write a point into %s database' %
                        database_name)
        self.mylog.info(test_name + 'POINT_TIME_SEC=' + str(point_time_sec))
        result = du.write_points(self,
                                 client,
                                 points=point,
                                 time_precision='ms',
                                 database=database_name)
        assert result, self.mylog.info(test_name +
                                       'Failure to write a point first time')
        self.mylog.info(
            test_name +
            'STEP 5: Wait for 7sec(CACHE_SNAPSHOT_WRITE_COLD_DURATION=5sec) before writing '
            'a second point to create a second tsm file')
        time.sleep(7)
        # at this time we should have one tsm file (the same shard group) on two of the data nodes
        self.mylog.info(
            test_name +
            'STEP 6: Load extra data into randomly chosen data node to cause entropy'
        )
        status, result, error = self.irl._show_cluster(self.meta_leader)
        assert status, self.mylog.info(test_name +
                                       'Failed to show cluster info :' +
                                       str(error))
        data_node_id = choice(iu.show_cluster_data_nodes(self, result).keys())
        self.mylog.info(test_name +
                        'Write extra data to \'%s\' node' % str(data_node_id))
        status, result, error = self.irl._show_shards(self.meta_leader)
        assert status, self.mylog.info(test_name +
                                       'Failed to show shards info :' +
                                       str(error))
        shards = iu.show_shards(self, result)
        # get the shard id the extra data should be written to
        for key, value in shards.items():
            if value['database'] == database_name:
                shard_id = key
                self.mylog.info(test_name + 'SHARD_ID=' + str(shard_id))
                break
        assert 0 == litmus_utils.execCmd(self, cmd_chmod), \
            self.mylog.info(test_name + 'Failied to execute \'%s\'' % cmd_chmod)
        cmd='ssh -i %s -o StrictHostKeyChecking=no %s@%s \'cd /tmp; ./writenode_lin -node %d -points 100 -shards %s' \
            ' -starttime %d\'' % (privatekey, user, host, data_node_id, shard_id, point_time_sec)
        assert 0 == litmus_utils.execCmd(self, cmd), \
            self.mylog.info(test_name + 'Failied to execute \'%s\'' % cmd)
        self.mylog.info(
            test_name +
            'Wait for 10 sec (INFLUXDB_DATA_COMPACT_FULL_WRITE_COLD_DURATION=10s) '
            'for compaction to complete')
        time.sleep(15)
        self.mylog.info(test_name + 'GET SHARD STRUCTURE OF DATANODES')
        for datanode in self.data_nodes_ips:
            litmus_utils.shard_layout(self, privatekey,
                                      '/var/lib/influxdb/data', database_name,
                                      rp, shard_id, user, datanode)
        self.mylog.info(test_name + 'STEP 7: Verify entropy was detected')
        success, result, message = self.irl._show_entropy(
            choice(self.data_nodes))
        assert success, self.mylog.info(test_name +
                                        'Failure to run \'show entropy\' :' +
                                        str(error))
        entropy_shard = iu.show_entropy_shards(self, result)
        self.mylog.info(test_name + 'Assert expected status=diff equals to ' +
                        entropy_shard[shard_id].get('status'))
        assert entropy_shard[shard_id].get(
            'status') == 'diff', self.mylog.info('Assertion Error')
        # TODO: add retentin policy and database assertions.
        self.mylog.info(test_name + 'STEP 8: Fix entropy')
        (success, message) = self.irl.shard_repair(choice(self.data_nodes),
                                                   shard_id)
        assert success, self.mylog.info(
            test_name + 'Failed to repair shard %s, message=%s' %
            (shard_id, message))
        self.mylog.info(test_name +
                        'GET SHARD STRUCTURE OF DATANODES AFTER SHARD REPAIR')
        for datanode in self.data_nodes_ips:
            litmus_utils.shard_layout(self, privatekey,
                                      '/var/lib/influxdb/data', database_name,
                                      rp, shard_id, user, datanode)
        time.sleep(10)
        success, result, message = self.irl._show_entropy(
            choice(self.data_nodes))
        assert success, self.mylog.info(test_name +
                                        'Failure to run \'show entropy\' :' +
                                        str(error))
        entropy_shard = iu.show_entropy_shards(self, result)
        assert entropy_shard == {}
        self.footer(test_name)