Beispiel #1
0
def _the_backup_index_exists(context):
    storage = Storage(config=context.medusa_config.storage)
    assert True is medusa.index.index_exists(storage)
Beispiel #2
0
class RestoreNodeTest(unittest.TestCase):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.local_storage_dir = "/tmp/medusa_local_storage"
        self.medusa_bucket_dir = "/tmp/medusa_test_bucket"

    def setUp(self):
        if os.path.isdir(self.local_storage_dir):
            shutil.rmtree(self.local_storage_dir)
        if os.path.isdir(self.medusa_bucket_dir):
            shutil.rmtree(self.medusa_bucket_dir)

        os.makedirs(self.local_storage_dir)
        config = configparser.ConfigParser(interpolation=None)
        config['storage'] = {
            'host_file_separator': ',',
            'bucket_name': 'medusa_test_bucket',
            'key_file': '',
            'storage_provider': 'local',
            'prefix': '',
            'fqdn': '127.0.0.1',
            'api_key_or_username': '',
            'api_secret_or_password': '',
            'base_path': '/tmp'
        }
        config['cassandra'] = {'is_ccm': 1}

        self.config = MedusaConfig(
            storage=_namedtuple_from_dict(StorageConfig, config['storage']),
            cassandra=_namedtuple_from_dict(CassandraConfig,
                                            config['cassandra']),
            monitoring={},
            ssh=None,
            restore=None)

        self.storage = Storage(config=self.config.storage)

    def test_add_object_from_string(self):
        file_content = "content of the test file"
        self.storage.storage_driver.upload_blob_from_string(
            "test1/file.txt", file_content)
        self.assertEquals(
            self.storage.storage_driver.get_blob_content_as_string(
                "test1/file.txt"), file_content)

    def test_download_blobs(self):
        files_to_download = list()
        file1_content = "content of the test file1"
        file2_content = "content of the test file2"
        self.storage.storage_driver.upload_blob_from_string(
            "test_download_blobs1/file1.txt", file1_content)
        files_to_download.append("test_download_blobs1/file1.txt")
        self.storage.storage_driver.upload_blob_from_string(
            "test_download_blobs2/file2.txt", file2_content)
        files_to_download.append("test_download_blobs2/file2.txt")
        self.assertEquals(len(os.listdir(self.medusa_bucket_dir)), 2)
        self.storage.storage_driver.download_blobs(files_to_download,
                                                   self.local_storage_dir)
        self.assertEquals(len(os.listdir(self.local_storage_dir)), 2)

    def test_list_objects(self):
        file1_content = "content of the test file1"
        file2_content = "content of the test file2"
        self.storage.storage_driver.upload_blob_from_string(
            "test_download_blobs1/file1.txt", file1_content)
        self.storage.storage_driver.upload_blob_from_string(
            "test_download_blobs2/file2.txt", file2_content)
        objects = self.storage.storage_driver.list_objects()
        self.assertEquals(len(objects), 2)
        one_object = self.storage.storage_driver.list_objects(
            "test_download_blobs2")
        self.assertEquals(len(one_object), 1)

    def test_read_blob(self):
        file1_content = "content of the test file1"
        self.storage.storage_driver.upload_blob_from_string(
            "test_download_blobs1/file1.txt", file1_content)
        objects = self.storage.storage_driver.list_objects(
            "test_download_blobs1")
        object_content = self.storage.storage_driver.read_blob_as_string(
            objects[0])
        self.assertEquals(object_content, file1_content)

    def test_get_blob(self):
        file1_content = "content of the test file1"
        self.storage.storage_driver.upload_blob_from_string(
            "test_download_blobs1/file1.txt", file1_content)
        obj = self.storage.storage_driver.get_blob(
            "test_download_blobs1/file1.txt")
        self.assertEquals(obj.name, "test_download_blobs1/file1.txt")

    def test_read_blob_as_bytes(self):
        file1_content = "content of the test file1"
        self.storage.storage_driver.upload_blob_from_string(
            "test_download_blobs1/file1.txt", file1_content)
        object_content = self.storage.storage_driver.get_blob_content_as_bytes(
            "test_download_blobs1/file1.txt")
        self.assertEquals(object_content, b"content of the test file1")

    def test_verify_hash(self):
        file1_content = "content of the test file1"
        manifest = self.storage.storage_driver.upload_blob_from_string(
            "test_download_blobs1/file1.txt", file1_content)
        obj = self.storage.storage_driver.get_blob(
            "test_download_blobs1/file1.txt")
        self.assertEquals(manifest.MD5, obj.hash)

    def test_hashes_match(self):
        # Should match
        hash1 = "S1EAM/BVMqhbJnAUs/nWlQ=="
        hash2 = "4b510033f05532a85b267014b3f9d695"
        self.assertTrue(
            medusa.storage.abstract_storage.AbstractStorage.hashes_match(
                hash1, hash2))

        # Should match
        hash1 = "4b510033f05532a85b267014b3f9d695"
        hash2 = "4b510033f05532a85b267014b3f9d695"
        self.assertTrue(
            medusa.storage.abstract_storage.AbstractStorage.hashes_match(
                hash1, hash2))

        # Should not match
        hash1 = "S1EAM/BVMqhbJnAUs/nWlQsdfsdf=="
        hash2 = "4b510033f05532a85b267014b3f9d695"
        self.assertFalse(
            medusa.storage.abstract_storage.AbstractStorage.hashes_match(
                hash1, hash2))

    def test_generate_md5_hash(self):
        with tempfile.NamedTemporaryFile() as tf:
            # write random bytes
            two_megabytes = 2 * 1024 * 1024
            tf.write(os.urandom(two_megabytes))
            tf.flush()

            # compute checksum of the whole file at once
            tf.seek(0)
            checksum_full = hashlib.md5(tf.read()).digest()
            digest_full = base64.encodestring(checksum_full).decode(
                'UTF-8').strip()

            # compute checksum using default-size chunks
            tf.seek(0)
            digest_chunk = generate_md5_hash(tf.name)

            # compare the digests
            self.assertEqual(digest_chunk, digest_full)

            # compute checksum using custom size chunks
            tf.seek(0)
            self.assertEqual(digest_full,
                             generate_md5_hash(tf.name, block_size=128))
            tf.seek(0)
            self.assertEqual(digest_full,
                             generate_md5_hash(tf.name, block_size=256))
            tf.seek(0)
            self.assertEqual(digest_full,
                             generate_md5_hash(tf.name, block_size=1024))
            tf.seek(0)
            self.assertEqual(digest_full,
                             generate_md5_hash(tf.name,
                                               block_size=100000000))  # 100M
            tf.seek(0)
            self.assertEqual(digest_full,
                             generate_md5_hash(tf.name, block_size=-1))
            tf.seek(0)
            self.assertNotEqual(digest_full,
                                generate_md5_hash(tf.name, block_size=0))

    def test_get_object_datetime(self):
        file1_content = "content of the test file1"
        self.storage.storage_driver.upload_blob_from_string(
            "test_download_blobs1/file1.txt", file1_content)
        obj = self.storage.storage_driver.get_blob(
            "test_download_blobs1/file1.txt")
        self.assertEquals(
            datetime.datetime.fromtimestamp(int(obj.extra["modify_time"])),
            self.storage.storage_driver.get_object_datetime(obj))

    def test_get_fqdn_from_backup_index_blob(self):
        blob_name = "index/backup_index/2019051307/manifest_node1.whatever.com.json"
        self.assertEquals("node1.whatever.com",
                          self.storage.get_fqdn_from_any_index_blob(blob_name))

        blob_name = "index/backup_index/2019051307/schema_node2.whatever.com.cql"
        self.assertEquals("node2.whatever.com",
                          self.storage.get_fqdn_from_any_index_blob(blob_name))

        blob_name = "index/backup_index/2019051307/schema_node3.whatever.com.txt"
        self.assertEquals("node3.whatever.com",
                          self.storage.get_fqdn_from_any_index_blob(blob_name))

        blob_name = "index/backup_index/2019051307/schema_node_with_underscores.whatever.com.txt"
        self.assertEquals("node_with_underscores.whatever.com",
                          self.storage.get_fqdn_from_any_index_blob(blob_name))

    def test_get_fqdn_from_any_index_blob(self):
        blob_name = "tokenmap_hostname-with-dashes-and-3-numbers.json"
        self.assertEqual("hostname-with-dashes-and-3-numbers",
                         self.storage.get_fqdn_from_any_index_blob(blob_name))
        blob_name = "tokenmap_hostname-with-dashes.and-dots.json"
        self.assertEqual("hostname-with-dashes.and-dots",
                         self.storage.get_fqdn_from_any_index_blob(blob_name))
        blob_name = "tokenmap_hostname_with-underscores.and-dots-and.dashes.json"
        self.assertEqual("hostname_with-underscores.and-dots-and.dashes",
                         self.storage.get_fqdn_from_any_index_blob(blob_name))
        blob_name = "index/bi/third_backup/finished_localhost_1574343029.timestamp"
        self.assertEqual("localhost",
                         self.storage.get_fqdn_from_any_index_blob(blob_name))

    def test_parse_backup_index(self):
        file_content = "content of the test file"
        # SSTables for node1 and backup1
        self.storage.storage_driver.upload_blob_from_string(
            "node1/backup1/data/ks1/sstable1.db", file_content)
        self.storage.storage_driver.upload_blob_from_string(
            "node1/backup1/data/ks1/sstable2.db", file_content)
        # Metadata for node1 and backup1
        self.storage.storage_driver.upload_blob_from_string(
            "node1/backup1/meta/tokenmap.json", file_content)
        self.storage.storage_driver.upload_blob_from_string(
            "node1/backup1/meta/manifest.json", file_content)
        self.storage.storage_driver.upload_blob_from_string(
            "node1/backup1/meta/schema.cql", file_content)
        # SSTables for node2 and backup1
        self.storage.storage_driver.upload_blob_from_string(
            "node2/backup1/data/ks1/sstable1.db", file_content)
        self.storage.storage_driver.upload_blob_from_string(
            "node2/backup1/data/ks1/sstable2.db", file_content)
        # Metadata for node2 and backup1
        self.storage.storage_driver.upload_blob_from_string(
            "node2/backup1/meta/tokenmap.json", file_content)
        self.storage.storage_driver.upload_blob_from_string(
            "node2/backup1/meta/manifest.json", file_content)
        self.storage.storage_driver.upload_blob_from_string(
            "node2/backup1/meta/schema.cql", file_content)
        # SSTables for node1 and backup2
        self.storage.storage_driver.upload_blob_from_string(
            "node1/backup2/data/ks1/sstable1.db", file_content)
        self.storage.storage_driver.upload_blob_from_string(
            "node1/backup2/data/ks1/sstable2.db", file_content)
        # Metadata for node1 and backup2
        self.storage.storage_driver.upload_blob_from_string(
            "node1/backup2/meta/tokenmap.json", file_content)
        self.storage.storage_driver.upload_blob_from_string(
            "node1/backup2/meta/manifest.json", file_content)
        self.storage.storage_driver.upload_blob_from_string(
            "node1/backup2/meta/schema.cql", file_content)
        build_indices(self.config, False)
        path = 'index/backup_index'
        backup_index = self.storage.storage_driver.list_objects(path)
        blobs_by_backup = self.storage.group_backup_index_by_backup_and_node(
            backup_index)
        self.assertTrue("backup1" in blobs_by_backup)
        self.assertTrue("backup2" in blobs_by_backup)
        self.assertTrue("node1" in blobs_by_backup["backup1"])
        self.assertTrue("node2" in blobs_by_backup["backup1"])
        self.assertTrue("node1" in blobs_by_backup["backup2"])
        self.assertFalse("node2" in blobs_by_backup["backup2"])

    def test_remove_extension(self):
        self.assertEquals('localhost',
                          self.storage.remove_extension('localhost.txt'))
        self.assertEquals('localhost',
                          self.storage.remove_extension('localhost.timestamp'))
        self.assertEquals('localhost',
                          self.storage.remove_extension('localhost.cql'))
        self.assertEquals('localhost.foo',
                          self.storage.remove_extension('localhost.foo'))

    def test_get_timestamp_from_blob_name(self):
        self.assertEquals(
            1558021519,
            self.storage.get_timestamp_from_blob_name(
                'finished_localhost_1558021519.timestamp'))
        self.assertEquals(
            1558021519,
            self.storage.get_timestamp_from_blob_name(
                'finished_some.host.net_1558021519.timestamp'))
        self.assertEquals(
            1558021519,
            self.storage.get_timestamp_from_blob_name(
                'finished_some_underscores.host.net_1558021519.timestamp'))

        self.assertEquals(
            1574343029,
            self.storage.get_timestamp_from_blob_name(
                'index/bi/third_backup/finished_localhost_1574343029.timestamp'
            ))
Beispiel #3
0
def _truncate_the_backup_folder(context):
    storage = Storage(config=context.medusa_config.storage)
    path_root = "/tmp/medusa_it_bucket"
    backup_path = "{}/{}localhost".format(path_root, storage.prefix_path)
    shutil.rmtree(backup_path)
Beispiel #4
0
def _the_backup_index_does_not_exist(context):
    storage = Storage(config=context.medusa_config.storage)
    assert False is medusa.index.index_exists(storage)
Beispiel #5
0
def _the_latest_complete_cluster_backup_is(context, expected_backup_name):
    storage = Storage(config=context.medusa_config.storage)
    actual_backup = storage.latest_complete_cluster_backup()
    if actual_backup is not None:
        assert expected_backup_name == actual_backup.name
Beispiel #6
0
def _truncate_the_index(context):
    storage = Storage(config=context.medusa_config.storage)
    path_root = "/tmp/medusa_it_bucket"
    index_path = "{}/{}index".format(path_root, storage.prefix_path)
    shutil.rmtree(index_path)
Beispiel #7
0
def _the_latest_cluster_backup_is(context, expected_backup_name):
    storage = Storage(config=context.medusa_config.storage)
    backup = storage.latest_cluster_backup()
    assert expected_backup_name == backup.name
def orchestrate(config, backup_name, stagger, enable_md5_checks, mode,
                temp_dir, parallel_snapshots, parallel_uploads):
    backup = None
    monitoring = Monitoring(config=config.monitoring)
    try:
        backup_start_time = datetime.datetime.now()
        if not config.storage.fqdn:
            err_msg = "The fqdn was not provided nor calculated properly."
            logging.error(err_msg)
            raise Exception(err_msg)

        if not temp_dir.is_dir():
            err_msg = '{} is not a directory'.format(temp_dir)
            logging.error(err_msg)
            raise Exception(err_msg)

        try:
            # Try to get a backup with backup_name. If it exists then we cannot take another backup with that name
            storage = Storage(config=config.storage)
            cluster_backup = storage.get_cluster_backup(backup_name)
            if cluster_backup:
                err_msg = 'Backup named {} already exists.'.format(backup_name)
                logging.error(err_msg)
                raise Exception(err_msg)
        except KeyError:
            info_msg = 'Starting backup {}'.format(backup_name)
            logging.info(info_msg)

        backup = BackupJob(config, backup_name, stagger, enable_md5_checks,
                           mode, temp_dir, parallel_snapshots,
                           parallel_uploads)
        backup.execute()

        backup_end_time = datetime.datetime.now()
        backup_duration = backup_end_time - backup_start_time

        logging.debug('Emitting metrics')

        logging.info('Backup duration: {}'.format(backup_duration.seconds))
        tags = [
            'medusa-cluster-backup', 'cluster-backup-duration', backup_name
        ]
        monitoring.send(tags, backup_duration.seconds)

        tags = ['medusa-cluster-backup', 'cluster-backup-error', backup_name]
        monitoring.send(tags, 0)

        logging.debug('Done emitting metrics.')
        logging.info('Backup of the cluster done.')

    except Exception as e:
        tags = ['medusa-cluster-backup', 'cluster-backup-error', backup_name]
        monitoring.send(tags, 1)

        logging.error(
            'This error happened during the cluster backup: {}'.format(str(e)))
        traceback.print_exc()

        if backup is not None:
            err_msg = 'Something went wrong! Attempting to clean snapshots and exit.'
            logging.error(err_msg)

            delete_snapshot_command = ' '.join(
                backup.cassandra.delete_snapshot_command(backup.snapshot_tag))
            pssh_run_success_cleanup = backup.orchestration_uploads\
                .pssh_run(backup.hosts,
                          delete_snapshot_command,
                          hosts_variables={})
            if pssh_run_success_cleanup:
                info_msg = 'All nodes successfully cleared their snapshot.'
                logging.info(info_msg)
            else:
                err_msg_cleanup = 'Some nodes failed to clear the snapshot. Cleaning snapshots manually is recommended'
                logging.error(err_msg_cleanup)
        sys.exit(1)
Beispiel #9
0
def _there_is_no_latest_backup_for_node_fqdn(context, fqdn):
    storage = Storage(config=context.medusa_config.storage)
    node_backup = storage.latest_node_backup(fqdn=fqdn)
    assert node_backup is None
Beispiel #10
0
def _node_fakes_a_complete_backup(context, fqdn, backup_name, backup_datetime):
    storage = Storage(config=context.medusa_config.storage)
    path_root = "/tmp/medusa_it_bucket"

    fake_tokenmap = json.dumps({
        "n1": {
            "tokens": [1],
            "is_up": True
        },
        "n2": {
            "tokens": [2],
            "is_up": True
        },
        "n3": {
            "tokens": [3],
            "is_up": True
        },
    })

    dir_path = os.path.join(path_root, storage.prefix_path + "index",
                            "backup_index", backup_name)
    if not os.path.exists(dir_path):
        os.makedirs(dir_path)

    # fake token map, manifest and schema in index
    path_tokenmap = "{}/{}index/backup_index/{}/tokenmap_{}.json".format(
        path_root, storage.prefix_path, backup_name, fqdn)
    write_dummy_file(path_tokenmap, backup_datetime, fake_tokenmap)
    path_manifest = "{}/{}index/backup_index/{}/manifest_{}.json".format(
        path_root, storage.prefix_path, backup_name, fqdn)
    write_dummy_file(path_manifest, backup_datetime, fake_tokenmap)
    path_schema = "{}/{}index/backup_index/{}/schema_{}.cql".format(
        path_root, storage.prefix_path, backup_name, fqdn)
    write_dummy_file(path_schema, backup_datetime, fake_tokenmap)

    dir_path = os.path.join(path_root, storage.prefix_path + "index",
                            "latest_backup", fqdn)
    if not os.path.exists(dir_path):
        os.makedirs(dir_path)

    # fake token map in latest_backup
    path_latest_backup_tokenmap = "{}/{}index/latest_backup/{}/tokenmap.json".format(
        path_root, storage.prefix_path, fqdn)
    write_dummy_file(path_latest_backup_tokenmap, backup_datetime,
                     fake_tokenmap)

    # fake token name in latest_backup
    path_latest_backup_name = "{}/{}index/latest_backup/{}/backup_name.txt".format(
        path_root, storage.prefix_path, fqdn)
    write_dummy_file(path_latest_backup_name, backup_datetime)

    # fake actual backup folder
    dir_path = os.path.join(path_root, storage.prefix_path + fqdn, backup_name,
                            "meta")
    if not os.path.exists(dir_path):
        os.makedirs(dir_path)

    # fake schema in actual backup path
    path_schema = "{}/{}{}/{}/meta/schema.cql".format(path_root,
                                                      storage.prefix_path,
                                                      fqdn, backup_name)
    write_dummy_file(path_schema, backup_datetime)

    # fake manifest in actual backup path
    path_manifest = "{}/{}{}/{}/meta/manifest.json".format(
        path_root, storage.prefix_path, fqdn, backup_name)
    write_dummy_file(path_manifest, backup_datetime)

    # fake token map in actual backup path
    path_tokenmap = "{}/{}{}/{}/meta/tokenmap.json".format(
        path_root, storage.prefix_path, fqdn, backup_name)
    write_dummy_file(path_tokenmap, backup_datetime, fake_tokenmap)
def main(config, backup_name_arg, stagger_time, mode):
    start = datetime.datetime.now()
    backup_name = backup_name_arg or start.strftime('%Y%m%d%H')
    monitoring = Monitoring(config=config.monitoring)

    try:
        storage = Storage(config=config.storage)
        cassandra = Cassandra(config)

        differential_mode = False
        if mode == "differential":
            differential_mode = True

        node_backup = storage.get_node_backup(
            fqdn=config.storage.fqdn,
            name=backup_name,
            differential_mode=differential_mode)

        if node_backup.exists():
            raise IOError(
                'Error: Backup {} already exists'.format(backup_name))

        # Make sure that priority remains to Cassandra/limiting backups resource usage
        try:
            throttle_backup()
        except Exception:
            logging.warning(
                "Throttling backup impossible. It's probable that ionice is not available."
            )

        logging.info('Saving tokenmap and schema')
        schema, tokenmap = get_schema_and_tokenmap(cassandra)

        node_backup.schema = schema
        node_backup.tokenmap = json.dumps(tokenmap)
        if differential_mode is True:
            node_backup.differential = mode
        add_backup_start_to_index(storage, node_backup)

        if stagger_time:
            stagger_end = start + stagger_time
            logging.info(
                'Staggering backup run, trying until {}'.format(stagger_end))
            while not stagger(config.storage.fqdn, storage, tokenmap):
                if datetime.datetime.now() < stagger_end:
                    logging.info('Staggering this backup run...')
                    time.sleep(60)
                else:
                    raise IOError('Backups on previous nodes did not complete'
                                  ' within our stagger time.')

        actual_start = datetime.datetime.now()

        num_files, node_backup_cache = do_backup(cassandra, node_backup,
                                                 storage, differential_mode,
                                                 config, backup_name)

        end = datetime.datetime.now()
        actual_backup_duration = end - actual_start

        print_backup_stats(actual_backup_duration, actual_start, end,
                           node_backup, node_backup_cache, num_files, start)

        update_monitoring(actual_backup_duration, backup_name, monitoring,
                          node_backup)
        return (actual_backup_duration, actual_start, end, node_backup,
                node_backup_cache, num_files, start)

    except Exception as e:
        tags = ['medusa-node-backup', 'backup-error', backup_name]
        monitoring.send(tags, 1)
        medusa.utils.handle_exception(
            e, "This error happened during the backup: {}".format(str(e)),
            config)
Beispiel #12
0
 def __init__(self, config):
     logging.info("Init service")
     self.config = config
     self.storage = Storage(config=self.config.storage)
Beispiel #13
0
class MedusaService(medusa_pb2_grpc.MedusaServicer):
    def __init__(self, config):
        logging.info("Init service")
        self.config = config
        self.storage = Storage(config=self.config.storage)

    def AsyncBackup(self, request, context):
        # TODO pass the staggered arg
        logging.info("Performing ASYNC backup {} (type={})".format(
            request.name, request.mode))
        response = medusa_pb2.BackupResponse()
        mode = BACKUP_MODE_DIFFERENTIAL
        if medusa_pb2.BackupRequest.Mode.FULL == request.mode:
            mode = BACKUP_MODE_FULL

        try:
            response.backupName = request.name
            response.status = response.status = medusa_pb2.StatusType.IN_PROGRESS
            with ThreadPoolExecutor(
                    max_workers=1,
                    thread_name_prefix=request.name) as executor:
                BackupMan.register_backup(request.name, is_async=True)
                backup_future = executor.submit(backup_node.handle_backup,
                                                config=self.config,
                                                backup_name_arg=request.name,
                                                stagger_time=None,
                                                enable_md5_checks_flag=False,
                                                mode=mode)

                backup_future.add_done_callback(record_backup_info)
                BackupMan.set_backup_future(request.name, backup_future)

        except Exception as e:

            response.status = medusa_pb2.StatusType.FAILED
            if request.name:
                BackupMan.update_backup_status(request.name,
                                               BackupMan.STATUS_FAILED)

            context.set_details("Failed to create async backup: {}".format(e))
            context.set_code(grpc.StatusCode.INTERNAL)
            logging.exception("Async backup failed due to error: {}".format(e))

        return response

    def Backup(self, request, context):
        # TODO pass the staggered arg
        logging.info("Performing SYNC backup {} (type={})".format(
            request.name, request.mode))
        response = medusa_pb2.BackupResponse()
        mode = BACKUP_MODE_DIFFERENTIAL
        if medusa_pb2.BackupRequest.Mode.FULL == request.mode:
            mode = BACKUP_MODE_FULL

        try:
            response.backupName = request.name
            BackupMan.register_backup(request.name, is_async=False)
            backup_node.handle_backup(config=self.config,
                                      backup_name_arg=request.name,
                                      stagger_time=None,
                                      enable_md5_checks_flag=False,
                                      mode=mode)
            record_status_in_response(response, request.name)
            return response
        except Exception as e:
            response.status = medusa_pb2.StatusType.FAILED
            if request.name:
                BackupMan.update_backup_status(request.name,
                                               BackupMan.STATUS_FAILED)

            context.set_details("Failed to create sync backups: {}".format(e))
            context.set_code(grpc.StatusCode.INTERNAL)
            logging.exception("Sync backup failed due to error: {}".format(e))

        return response

    def BackupStatus(self, request, context):

        response = medusa_pb2.BackupStatusResponse()
        try:
            backup = self.storage.get_cluster_backup(request.backupName)

            # TODO how is the startTime determined?
            response.startTime = datetime.fromtimestamp(
                backup.started).strftime(TIMESTAMP_FORMAT)
            response.finishedNodes.extend(
                [node.fqdn for node in backup.complete_nodes()])
            response.unfinishedNodes.extend(
                [node.fqdn for node in backup.incomplete_nodes()])
            response.missingNodes.extend(
                [node.fqdn for node in backup.missing_nodes()])

            if backup.finished:
                response.finishTime = datetime.fromtimestamp(
                    backup.finished).strftime(TIMESTAMP_FORMAT)
            else:
                response.finishTime = ""

            record_status_in_response(response, request.backupName)
        except KeyError:
            context.set_details("backup <{}> does not exist".format(
                request.backupName))
            context.set_code(grpc.StatusCode.NOT_FOUND)
            response.status = medusa_pb2.StatusType.UNKNOWN
        return response

    def GetBackups(self, request, context):
        response = medusa_pb2.GetBackupsResponse()
        last_status = medusa_pb2.StatusType.UNKNOWN
        try:
            # cluster backups
            backups = get_backups(self.config, True)
            for backup in backups:
                summary = medusa_pb2.BackupSummary()
                summary.backupName = backup.name
                if backup.started is None:
                    summary.startTime = 0
                else:
                    summary.startTime = backup.started

                if backup.finished is None:
                    summary.finishTime = 0
                    summary.status = medusa_pb2.StatusType.IN_PROGRESS
                    last_status = medusa_pb2.StatusType.IN_PROGRESS
                else:
                    summary.finishTime = backup.finished
                    if last_status != medusa_pb2.StatusType.IN_PROGRESS:
                        summary.status = medusa_pb2.StatusType.SUCCESS

                summary.totalNodes = len(backup.tokenmap)
                summary.finishedNodes = len(backup.complete_nodes())

                for node in backup.tokenmap:
                    summary.nodes.append(create_token_map_node(backup, node))

                response.backups.append(summary)

        except Exception as e:
            context.set_details(
                "Failed to get backups due to error: {}".format(e))
            context.set_code(grpc.StatusCode.INTERNAL)
            response.status = medusa_pb2.StatusType.UNKNOWN
        return response

    def DeleteBackup(self, request, context):
        logging.info("Deleting backup {}".format(request.name))
        response = medusa_pb2.DeleteBackupResponse()

        try:
            delete_backup(self.config, [request.name], True)
            handle_backup_removal(request.name)
        except Exception as e:
            context.set_details("deleting backups failed: {}".format(e))
            context.set_code(grpc.StatusCode.INTERNAL)
            logging.exception("Deleting backup {} failed".format(request.name))
        return response
Beispiel #14
0
def _i_can_see_no_backups(context):
    storage = Storage(config=context.medusa_config.storage)
    cluster_backups = storage.list_cluster_backups()
    assert 0 == len(list(cluster_backups))
Beispiel #15
0
def _there_is_no_latest_complete_backup(context):
    storage = Storage(config=context.medusa_config.storage)
    actual_backup = storage.latest_complete_cluster_backup()
    assert actual_backup is None
Beispiel #16
0
def _the_latest_backup_for_fqdn_is_called_backupname(context, expected_fqdn,
                                                     expected_backup_name):
    storage = Storage(config=context.medusa_config.storage)
    latest_backup = storage.latest_node_backup(fqdn=expected_fqdn)
    assert latest_backup.name == expected_backup_name
def orchestrate(config, backup_name, seed_target, temp_dir, host_list, keep_auth, bypass_checks,
                verify, keyspaces, tables, pssh_pool_size, use_sstableloader=False):
    monitoring = Monitoring(config=config.monitoring)
    try:
        restore_start_time = datetime.datetime.now()
        if seed_target is not None:
            keep_auth = False

        if seed_target is None and host_list is None:
            err_msg = 'You must either provide a seed target or a list of host'
            logging.error(err_msg)
            raise Exception(err_msg)

        if seed_target is not None and host_list is not None:
            err_msg = 'You must either provide a seed target or a list of host, not both'
            logging.error(err_msg)
            raise Exception(err_msg)

        if not temp_dir.is_dir():
            err_msg = '{} is not a directory'.format(temp_dir)
            logging.error(err_msg)
            raise Exception(err_msg)

        if keep_auth:
            logging.info('system_auth keyspace will be left untouched on the target nodes')
        else:
            logging.info('system_auth keyspace will be overwritten with the backup on target nodes')

        storage = Storage(config=config.storage)

        try:
            cluster_backup = storage.get_cluster_backup(backup_name)
        except KeyError:
            err_msg = 'No such backup --> {}'.format(backup_name)
            logging.error(err_msg)
            raise Exception(err_msg)

        restore = RestoreJob(cluster_backup, config, temp_dir, host_list, seed_target, keep_auth, verify,
                             pssh_pool_size, keyspaces, tables, bypass_checks, use_sstableloader)
        restore.execute()

        restore_end_time = datetime.datetime.now()
        restore_duration = restore_end_time - restore_start_time

        logging.debug('Emitting metrics')

        logging.info('Restore duration: {}'.format(restore_duration.seconds))
        tags = ['medusa-cluster-restore', 'restore-duration', backup_name]
        monitoring.send(tags, restore_duration.seconds)

        tags = ['medusa-cluster-restore', 'restore-error', backup_name]
        monitoring.send(tags, 0)

        logging.debug('Done emitting metrics')
        logging.info('Successfully restored the cluster')

    except Exception as e:
        tags = ['medusa-cluster-restore', 'restore-error', backup_name]
        monitoring.send(tags, 1)

        logging.error('This error happened during the cluster restore: {}'.format(str(e)))
        traceback.print_exc()
        sys.exit(1)