Beispiel #1
0
 def create_es_connection(self):
     ks = KeyStore()
     es_conf = ks.get_elasticsearch_credentials()
     self.es = Elasticsearch(hosts=[es_conf["es_url"]],
                             verify_certs=False,
                             http_auth=(es_conf["es_user"],
                                        es_conf["es_password"]))
Beispiel #2
0
    def upload_sstables(node,
                        test_data: TestDataInventory,
                        keyspace_name: str = 'keyspace1'):
        key_store = KeyStore()
        creds = key_store.get_scylladb_upload_credentials()
        # Download the sstable files from S3
        remote_get_file(node.remoter,
                        test_data.sstable_url,
                        test_data.sstable_file,
                        hash_expected=test_data.sstable_md5,
                        retries=2,
                        user_agent=creds['user_agent'])
        result = node.remoter.sudo(
            f"ls -t /var/lib/scylla/data/{keyspace_name}/")
        upload_dir = result.stdout.split()[0]
        if node.is_docker():
            node.remoter.run(
                f'tar xvfz {test_data.sstable_file} -C /'
                f'var/lib/scylla/data/{keyspace_name}/{upload_dir}/upload/')
        else:
            node.remoter.sudo(
                f'tar xvfz {test_data.sstable_file} -C /var/lib/scylla/data/{keyspace_name}/{upload_dir}/upload/',
                user='******')

        # Scylla Enterprise 2019.1 doesn't support to load schema.cql and manifest.json, let's remove them
        node.remoter.sudo(
            f'rm -f /var/lib/scylla/data/{keyspace_name}/{upload_dir}/upload/schema.cql'
        )
        node.remoter.sudo(
            f'rm -f /var/lib/scylla/data/{keyspace_name}/{upload_dir}/upload/manifest.json'
        )
def fix_es_mapping(index_name):
    ks = KeyStore()
    es_conf = ks.get_elasticsearch_credentials()

    mapping_url = "{es_url}/{index_name}/_mapping".format(
        index_name=index_name, **es_conf)
    res = requests.get(mapping_url,
                       auth=(es_conf["es_user"], es_conf["es_password"]))
    output = res.json()[index_name]

    output['mappings']['test_stats']['dynamic'] = False

    output['mappings']['test_stats']['properties']['coredumps'] = dict(
        type='object')
    output['mappings']['test_stats']['properties']['setup_details'][
        'properties']['db_cluster_details'] = dict(type='object')
    output['mappings']['test_stats']['properties']['system_details'] = {
        "dynamic": False,
        "properties": {}
    }

    res = requests.put(mapping_url + "/test_stats",
                       json=output['mappings'],
                       auth=(es_conf["es_user"], es_conf["es_password"]))
    print(res.text)
    res.raise_for_status()

    click.secho("fixed {index_name}".format(index_name=index_name), fg='green')
 def create_key_pair(self):
     LOGGER.info("Creating SCT Key Pair...")
     if self.sct_keypair:
         LOGGER.warning(f"SCT Key Pair already exists in {self.region_name}!")
     else:
         ks = KeyStore()
         sct_key_pair = ks.get_ec2_ssh_key_pair()
         self.resource.import_key_pair(KeyName=self.KEY_PAIR_NAME,  # pylint: disable=no-member
                                       PublicKeyMaterial=sct_key_pair.public_key)
         LOGGER.info("SCT Key Pair created.")
 def __init__(self, instance_name: str, zone: str):
     credentials = KeyStore().get_gcp_credentials()
     self.credentials = service_account.Credentials.from_service_account_info(
         credentials)
     self.project_id = credentials['project_id']
     self.instance_name = instance_name
     self.zone = zone
    def _get_gcloud_container(self) -> Container:
        """Create Google Cloud SDK container.

        Cloud SDK requires to enable some authorization method first.  Because of that we start a container which
        runs forever using `cat' command (like Jenkins do), put a service account credentials and activate them.

        All consequent gcloud commands run using container.exec_run() method.
        """
        container = ContainerManager.run_container(self, "gcloud")
        credentials = KeyStore().get_gcp_credentials()
        credentials["client_email"] = f"{credentials['client_email']}"
        shell_command = f"umask 077 && echo '{json.dumps(credentials)}' > /tmp/gcloud_svc_account.json"
        shell_command += " && echo 'kubeletConfig:\n  cpuManagerPolicy: static' > /tmp/system_config.yaml"
        # NOTE: use 'bash' in case of non-alpine sdk image and 'sh' when it is 'alpine' one.
        res = container.exec_run(["bash", "-c", shell_command])
        if res.exit_code:
            raise DockerException(f"{container}: {res.output.decode('utf-8')}")
        res = container.exec_run([
            "gcloud", "auth", "activate-service-account",
            credentials["client_email"], "--key-file",
            "/tmp/gcloud_svc_account.json", "--project",
            credentials["project_id"]
        ])
        if res.exit_code:
            raise DockerException(
                f"{container}[]: {res.output.decode('utf-8')}")
        return container
    def _get_gcloud_container(self) -> Container:
        """Create Google Cloud SDK container.

        Cloud SDK requires to enable some authorization method first.  Because of that we start a container which
        runs forever using `cat' command (like Jenkins do), put a service account credentials and activate them.

        All consequent gcloud commands run using container.exec_run() method.
        """
        container = ContainerManager.run_container(self, "gcloud")
        credentials = KeyStore().get_gcp_credentials()
        credentials[
            "client_email"] = f"{credentials['project_id']}@appspot.gserviceaccount.com"
        shell_command = f"umask 077 && echo '{json.dumps(credentials)}' > /tmp/gcloud_svc_account.json"
        res = container.exec_run(["sh", "-c", shell_command])
        if res.exit_code:
            raise DockerException(f"{container}: {res.output.decode('utf-8')}")
        res = container.exec_run([
            "gcloud", "auth", "activate-service-account",
            credentials["client_email"], "--key-file",
            "/tmp/gcloud_svc_account.json", "--project",
            credentials["project_id"]
        ])
        if res.exit_code:
            raise DockerException(
                f"{container}[]: {res.output.decode('utf-8')}")
        return container
Beispiel #8
0
def definition(image_id, image_type):
    return InstanceDefinition(name="test-vm-1",
                              image_id=image_id,
                              type=image_type,
                              user_name="tester",
                              ssh_key=KeyStore().get_ec2_ssh_key_pair(),
                              tags={'test-tag': 'test_value'},
                              user_data=[PrintingTestUserDataObject()])
 def __init__(self, cloud_instances: CloudInstances, user=None):
     super().__init__(cloud_instances,
                      static_ips=None,
                      html_template="per_qa_user.html")
     self.user = user
     self.report = {
         7: defaultdict(list),
         5: defaultdict(list),
         3: defaultdict(list)
     }
     self.qa_users = KeyStore().get_qa_users()
Beispiel #10
0
 def __init__(self, cloud_instances):
     super(PerUserSummaryReport,
           self).__init__(cloud_instances,
                          html_template="per_user_summary.html")
     self.report = {
         "results": {
             "qa": {},
             "others": {}
         },
         "cloud_providers": tuple()
     }
     self.qa_users = KeyStore().get_qa_users()
Beispiel #11
0
 def __init__(self, cloud_instances: CloudInstances, static_ips: StaticIPs):
     super().__init__(cloud_instances,
                      static_ips,
                      html_template="per_user_summary.html")
     self.report = {
         "results": {
             "qa": {},
             "others": {}
         },
         "cloud_providers": CLOUD_PROVIDERS
     }
     self.qa_users = KeyStore().get_qa_users()
Beispiel #12
0
    def __init__(self, region_name):
        self.region_name = region_name
        info = KeyStore().get_gcp_credentials()
        self.project = info['project_id']

        credentials = service_account.Credentials.from_service_account_info(info)

        self.iam = build('iam', 'v1', credentials=credentials)

        self.network_client = compute_v1.NetworksClient(credentials=credentials)
        self.firewall_client = compute_v1.FirewallsClient(credentials=credentials)
        self.storage_client = storage.Client(credentials=credentials)
Beispiel #13
0
def test_can_create_basic_scylla_instance_definition_from_sct_config():
    """Test for azure_region_definition_builder"""
    EnvConfig = namedtuple('EnvConfig', [
        "SCT_CLUSTER_BACKEND", "SCT_TEST_ID", "SCT_CONFIG_FILES",
        "SCT_AZURE_REGION_NAME", "SCT_N_DB_NODES", "SCT_USER_PREFIX",
        "SCT_AZURE_IMAGE_DB", "SCT_N_LOADERS", "SCT_N_MONITORS_NODES"
    ])
    env_config = EnvConfig(
        SCT_CLUSTER_BACKEND="azure",
        SCT_TEST_ID="example_test_id",
        SCT_CONFIG_FILES=
        f'["{Path(__file__).parent.absolute()}/azure_default_config.yaml"]',
        SCT_AZURE_REGION_NAME="['eastus', 'easteu']",
        SCT_N_DB_NODES="3 1",
        SCT_USER_PREFIX="unit-test",
        SCT_AZURE_IMAGE_DB="some_image_id",
        SCT_N_LOADERS="2 0",
        SCT_N_MONITORS_NODES="1")

    os.environ.update(env_config._asdict())
    config = SCTConfiguration()
    tags = TestConfig.common_tags()
    ssh_key = KeyStore().get_gce_ssh_key_pair()
    prefix = config.get('user_prefix')
    test_config = TestConfig()
    builder = region_definition_builder.get_builder(params=config,
                                                    test_config=test_config)
    region_definitions = builder.build_all_region_definitions()

    instance_definition = InstanceDefinition(
        name=f"{prefix}-db-node-eastus-1",
        image_id=env_config.SCT_AZURE_IMAGE_DB,
        type="Standard_L8s_v2",
        user_name="scyllaadm",
        root_disk_size=30,
        tags=tags | {
            "NodeType": "scylla-db",
            "keep_action": "",
            'NodeIndex': '1'
        },
        ssh_key=ssh_key)
    assert len(region_definitions) == 2
    actual_region_definition = region_definitions[0]

    assert actual_region_definition.test_id == env_config.SCT_TEST_ID
    assert actual_region_definition.backend == "azure"
    assert actual_region_definition.region == "eastus"
    # ignoring user_data in this validation
    actual_region_definition.definitions[
        0].user_data = instance_definition.user_data
    # ssh_key is not shown, if actual looks the same as expected possibly ssh_key differ
    assert instance_definition == actual_region_definition.definitions[0]
Beispiel #14
0
def get_all_gce_regions():
    from sdcm.keystore import KeyStore
    gcp_credentials = KeyStore().get_gcp_credentials()
    gce_driver = get_driver(Provider.GCE)

    compute_engine = gce_driver(gcp_credentials["project_id"] +
                                "@appspot.gserviceaccount.com",
                                gcp_credentials["private_key"],
                                project=gcp_credentials["project_id"])
    all_gce_regions = [
        region_obj.name for region_obj in compute_engine.region_list
    ]
    return all_gce_regions
Beispiel #15
0
def list_instances_gce(tags_dict, running=False):
    """
    list all instances with specific tags GCE

    :param tags_dict: a dict of the tag to select the instances, e.x. {"TestId": "9bc6879f-b1ef-47e1-99ab-020810aedbcc"}

    :return: None
    """

    # avoid cyclic dependency issues, since too many things import utils.py
    from sdcm.keystore import KeyStore

    gcp_credentials = KeyStore().get_gcp_credentials()
    gce_driver = get_driver(Provider.GCE)

    compute_engine = gce_driver(gcp_credentials["project_id"] +
                                "@appspot.gserviceaccount.com",
                                gcp_credentials["private_key"],
                                project=gcp_credentials["project_id"])

    logger.info("Going to get all instances from GCE")
    all_gce_instances = compute_engine.list_nodes()
    # filter instances by tags since libcloud list_nodes() doesn't offer any filtering
    if tags_dict:
        instances = filter_gce_by_tags(tags_dict=tags_dict,
                                       instances=all_gce_instances)
    else:
        instances = all_gce_instances

    if running:
        # https://libcloud.readthedocs.io/en/latest/compute/api.html#libcloud.compute.types.NodeState
        instances = [i for i in instances if i.state == 'running']
    else:
        instances = [i for i in instances if not i.state == 'terminated']
    logger.info("Done. Found total of %s instances.", len(instances))
    return instances
 def key_pair():
     ks = KeyStore()
     return ks.get_ec2_ssh_key_pair()
def get_docker_hub_credentials() -> dict:
    LOGGER.info("Get Docker Hub credentials")
    return KeyStore().get_docker_hub_credentials()
def get_gce_services(regions: list) -> dict:
    credentials = KeyStore().get_gcp_credentials()
    return {region_az: _get_gce_service(credentials, region_az) for region_az in map(append_zone, regions)}
 def from_keystore_creds(cls) -> "HousekeepingDB":
     return cls.from_params(KeyStore().get_housekeeping_db_credentials())
 def azure_credentials(self) -> dict[str, str]:  # pylint: disable=no-self-use; pylint doesn't now about cached_property
     return KeyStore().get_azure_credentials()
Beispiel #21
0
class ES(elasticsearch.Elasticsearch):
    """
    Provides interface for Elasticsearch DB
    """
    def __init__(self):
        self._conf = self.get_conf()
        super().__init__(hosts=[self._conf["es_url"]],
                         verify_certs=False,
                         http_auth=(self._conf["es_user"],
                                    self._conf["es_password"]))

    def get_conf(self):
        self.key_store = KeyStore()
        return self.key_store.get_elasticsearch_credentials()

    def _create_index(self, index):
        self.indices.create(index=index, ignore=400)  # pylint: disable=unexpected-keyword-arg

    def create_doc(self, index, doc_type, doc_id, body):
        """
        Add document in json format
        """
        LOGGER.info('Create doc')
        LOGGER.info('INDEX: %s', index)
        LOGGER.info('DOC_TYPE: %s', doc_type)
        LOGGER.info('DOC_ID: %s', doc_id)
        LOGGER.info('BODY: %s', body)
        self._create_index(index)
        if self.exists(index=index, doc_type=doc_type, id=doc_id):
            self.update(index=index,
                        doc_type=doc_type,
                        id=doc_id,
                        body={'doc': body})
        else:
            self.create(index=index, doc_type=doc_type, id=doc_id, body=body)

    def update_doc(self, index, doc_type, doc_id, body):
        """
        Update document with partial data
        """
        LOGGER.info('Update doc %s with info %s', doc_id, body)
        self.update(index=index,
                    doc_type=doc_type,
                    id=doc_id,
                    body=dict(doc=body))

    def get_all(self, index, limit=1000):
        """
        Search for documents for the certain index
        """
        return self.search(index=index, size=limit)  # pylint: disable=unexpected-keyword-arg

    def get_doc(self, index, doc_id, doc_type='_all'):
        """
        Get document by id
        """
        doc = self.get(
            index=index,
            doc_type=doc_type,
            id=doc_id,
            ignore=[  # pylint: disable=unexpected-keyword-arg
                400, 404
            ])
        if not doc['found']:
            LOGGER.warning('Document not found: %s %s', doc_id, doc_type)
            return None
        return doc

    def delete_doc(self, index, doc_type, doc_id):
        """
        Delete document
        """
        if self.get_doc(index, doc_id, doc_type):
            self.delete(index=index, doc_type=doc_type, id=doc_id)
 def _retrieve_credentials(self):
     keystore = KeyStore()
     creds = keystore.get_email_credentials()
     self._user = creds["user"]
     self._password = creds["password"]
 def key_pair(self) -> SSHKey:
     return KeyStore().get_ec2_ssh_key_pair()
 def key_pair(self) -> SSHKey:
     return KeyStore().get_gce_ssh_key_pair()  # scylla-test
 def _get_ssh_key() -> SSHKey:
     return KeyStore().get_gce_ssh_key_pair()
 def config(cls):
     if cls._config is None:
         cls._config = ArgusConfig(**KeyStore().get_argusdb_credentials(), keyspace_name="argus")
     return cls._config
Beispiel #27
0
 def get_conf(self):
     self.key_store = KeyStore()
     return self.key_store.get_elasticsearch_credentials()
Beispiel #28
0
 def set_backup_azure_blob_credentials(cls) -> None:
     cls.backup_azure_blob_credentials = KeyStore().get_backup_azure_blob_credentials()
def migrate(old_index_name, dry_run, new_index, days):  # pylint: disable=too-many-locals

    logging.basicConfig(level=logging.DEBUG)
    logging.config.dictConfig({
        'version': 1,
        'disable_existing_loggers': True,
        'formatters': {
            'standard': {
                'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
            },
        },
        'handlers': {
            'default': {
                'level': 'INFO',
                'formatter': 'standard',
                'class': 'logging.StreamHandler',
                'stream': 'ext://sys.stdout',  # Default is stderr
            },
        },
        'loggers': {
            '': {  # root logger
                'handlers': ['default'],
                'level': 'WARNING',
                'propagate': False
            },
            '__main__': {  # if __name__ == '__main__'
                'handlers': ['default'],
                'level': 'DEBUG',
                'propagate': False
            },
        }
    })
    ks = KeyStore()
    es_conf = ks.get_elasticsearch_credentials()
    elastic_search = Elasticsearch(hosts=[es_conf["es_url"]],
                                   verify_certs=True,
                                   http_auth=(es_conf["es_user"],
                                              es_conf["es_password"]))

    if not elastic_search.indices.exists(index=new_index):
        elastic_search.indices.create(index=new_index)

    def post_to_new(doc):
        if dry_run:
            return
        elastic_search.index(index=new_index, doc_type='nemesis', body=doc)

    res = scan(elastic_search,
               index=old_index_name,
               query={
                   "query": {
                       "range": {
                           "test_details.start_time": {
                               "gte":
                               (datetime.datetime.utcnow() -
                                datetime.timedelta(days=days)).timestamp(),
                               "lte":
                               datetime.datetime.utcnow().timestamp(),
                               "boost":
                               2.0
                           }
                       }
                   }
               },
               size=300,
               scroll='3h')

    for num, hit in enumerate(res):
        nemesis_list = hit["_source"]["nemesis"]
        test_data = hit["_source"]
        LOGGER.info("%s: %s", num, test_data['test_details']['test_id'])
        if 'scylla-server' not in test_data['versions']:
            LOGGER.debug("%s: No version for %s - %s", num,
                         test_data['test_details']['test_id'],
                         test_data['test_details']['job_name'])
            if not test_data['test_details']['job_name']:
                LOGGER.debug(test_data)
            continue

        for nemesis_class, data in nemesis_list.items():
            for failure in data['failures']:
                new_nemesis_data = dict(
                    test_id=test_data['test_details']['test_id'],
                    job_name=test_data['test_details']['job_name'],
                    test_name=test_data['test_details']['test_name'],
                    scylla_version=test_data['versions']['scylla-server']
                    ['version'],
                    scylla_git_sha=test_data['versions']['scylla-server']
                    ['commit_id'],
                )

                new_nemesis_data.update(
                    dict(nemesis_name=nemesis_class,
                         nemesis_duration=failure['duration'],
                         start_time=datetime.datetime.utcfromtimestamp(
                             failure['start']),
                         end_time=datetime.datetime.utcfromtimestamp(
                             failure['end']),
                         target_node=failure['node'],
                         outcome="failure",
                         failure_message=failure['error']))
                post_to_new(new_nemesis_data)

            for run in data['runs']:
                new_nemesis_data = dict(
                    test_id=test_data['test_details']['test_id'],
                    job_name=test_data['test_details']['job_name'],
                    test_name=test_data['test_details']['test_name'],
                    scylla_version=test_data['versions']['scylla-server']
                    ['version'],
                    scylla_git_sha=test_data['versions']['scylla-server']
                    ['commit_id'],
                )
                new_nemesis_data.update(
                    dict(nemesis_name=nemesis_class,
                         nemesis_duration=run['duration'],
                         start_time=datetime.datetime.utcfromtimestamp(
                             run['start']),
                         end_time=datetime.datetime.utcfromtimestamp(
                             run['end']),
                         target_node=run['node'],
                         outcome="passed"))
                if run.get('type', '') == 'skipped':
                    new_nemesis_data['outcome'] = 'skipped'
                    new_nemesis_data['skip_reason'] = run['skip_reason']
                post_to_new(new_nemesis_data)
def get_gce_service(region: str) -> GceDriver:
    credentials = KeyStore().get_gcp_credentials()
    return _get_gce_service(credentials, append_zone(region))