def test_not_safe_to_snap_raises_exception(self): client = Mock() client.snapshot.get.return_value = testvars.inprogress client.snapshot.get_repository.return_value = testvars.test_repo slo = curator.SnapshotList(client, repository=testvars.repo_name) do = curator.DeleteSnapshots(slo, retry_interval=0, retry_count=1) self.assertRaises(curator.FailedExecution, do.do_action)
def test_do_action(self): client = Mock() client.snapshot.get.return_value = testvars.snapshots client.snapshot.get_repository.return_value = testvars.test_repo client.snapshot.delete.return_value = None slo = curator.SnapshotList(client, repository=testvars.repo_name) do = curator.DeleteSnapshots(slo) self.assertIsNone(do.do_action())
def test_init(self): client = Mock() client.snapshot.get.return_value = testvars.snapshots client.snapshot.get_repository.return_value = testvars.test_repo slo = curator.SnapshotList(client, repository=testvars.repo_name) do = curator.DeleteSnapshots(slo) self.assertEqual(slo, do.snapshot_list) self.assertEqual(client, do.client)
def test_do_action_raises_exception(self): client = Mock() client.snapshot.get.return_value = testvars.snapshots client.snapshot.get_repository.return_value = testvars.test_repo client.snapshot.delete.return_value = None client.snapshot.delete.side_effect = testvars.fake_fail slo = curator.SnapshotList(client, repository=testvars.repo_name) do = curator.DeleteSnapshots(slo) self.assertRaises(curator.FailedExecution, do.do_action)
def lambda_handler(event, context): # Build the Elasticsearch client. es = Elasticsearch( hosts=[{ 'host': host, 'port': 443 }], http_auth=awsauth, use_ssl=True, verify_certs=True, connection_class=RequestsHttpConnection, timeout= 120 # Deleting snapshots can take a while, so keep the connection open for long enough to get a response. ) try: # Get all snapshots in the repository. snapshot_list = curator.SnapshotList(es, repository=repository_name) # Filter by age, any snapshot older than two weeks. snapshot_list.filter_by_age(source='creation_date', direction='older', unit='weeks', unit_count=2) # Delete the old snapshots. curator.DeleteSnapshots(snapshot_list, retry_interval=30, retry_count=3).do_action() except (curator.exceptions.SnapshotInProgress, curator.exceptions.NoSnapshots, curator.exceptions.FailedExecution) as e: print(e) # Split into two try blocks. We still want to try and take a snapshot if deletion failed. try: # Get the list of indices. # You can filter this list if you didn't want to snapshot all indices. index_list = curator.IndexList(es) # Take a new snapshot. This operation can take a while, so we don't want to wait for it to complete. curator.Snapshot(index_list, repository=repository_name, name=snapshot_name, wait_for_completion=False).do_action() except (curator.exceptions.SnapshotInProgress, curator.exceptions.FailedExecution) as e: print(e)
def lambda_handler(event, context): awsauth = AWS4Auth(credentials.access_key, credentials.secret_key, os.environ['region'], service, session_token=credentials.token) es = Elasticsearch(hosts=[{ 'host': os.environ['host'], 'port': 443 }], http_auth=awsauth, use_ssl=True, verify_certs=True, connection_class=RequestsHttpConnection, timeout=120) utc_yesterday = datetime.utcnow() - timedelta(days=1) index_name = os.environ['index_prefix'] + utc_yesterday.strftime( os.environ['date_string']) # 同じ名前のSnapshotがすでに存在するときは削除する try: snapshot_list = curator.SnapshotList( es, repository=os.environ['es_snapshot_repository']) snapshot_list.filter_by_regex(kind='prefix', value=index_name) curator.DeleteSnapshots(snapshot_list, retry_interval=30, retry_count=3).do_action() except curator.exceptions.NoSnapshots as e: logging.info("No snapshots to delete") except (curator.exceptions.SnapshotInProgress, curator.exceptions.NoSnapshots, curator.exceptions.FailedExecution) as e: logging.error("Failed to delete snapshot", exc_info=True) # 前日のindexのSnapshotを取る try: index_list = curator.IndexList(es) index_list.filter_by_regex(kind='prefix', value=index_name) curator.Snapshot(index_list, repository=os.environ['es_snapshot_repository'], name=index_name, wait_for_completion=True).do_action() except curator.exceptions.NoIndices as e: logging.warning("No indices to take snapshot") except (curator.exceptions.SnapshotInProgress, curator.exceptions.FailedExecution) as e: logging.error("Failed to take snapshot", exc_info=True)
def lambda_handler(event, context): # Build the Elasticsearch client. es = Elasticsearch(hosts=[{ 'host': host, 'port': 443 }], http_auth=awsauth, use_ssl=True, verify_certs=True, connection_class=RequestsHttpConnection) #print(es.info()) index_list = curator.IndexList(es) # Do index deletion first index_list.filter_by_age(source='name', direction='older', timestring='%Y.%m.%d', unit='days', unit_count=index_rentention_days) if index_list.indices: print("Found %s indices to delete " % len(index_list.indices)) print(index_list.indices) try: curator.DeleteIndices(index_list).do_action() except (curator.exceptions.FailedExecution) as e: print(e) # Snapshots next # QA first # Filters by age, anything with a time stamp older than delete_index_day days in the index name. index_list = curator.IndexList(es) index_list.filter_by_regex(kind='prefix', value='qa', exclude=False) if index_list.indices: index_list.filter_by_age(source='name', direction='older', timestring='%Y.%m.%d', unit='days', unit_count=snapshot_index_days) if index_list.indices: print("Found %s indices to snapshot " % len(index_list.indices)) print(index_list.indices) try: curator.Snapshot(index_list, repository='my-es-snapshot-repo', name='qa-%Y%m%d%H%M%S', ignore_unavailable=True, include_global_state=False, partial=True, wait_for_completion=False, wait_interval=10, max_wait=-1, skip_repo_fs_check=True).do_action() except (curator.exceptions.SnapshotInProgress, curator.exceptions.FailedExecution) as e: print(e) # Do STAGE now index_list = curator.IndexList(es) index_list.filter_by_regex(kind='prefix', value='stage', exclude=False) if index_list.indices: index_list.filter_by_age(source='name', direction='older', timestring='%Y.%m.%d', unit='days', unit_count=snapshot_index_days) if index_list.indices: print("Found %s indices to snapshot " % len(index_list.indices)) print(index_list.indices) try: curator.Snapshot(index_list, repository='my-es-snapshot-repo', name='stage-%Y%m%d%H%M%S', ignore_unavailable=True, include_global_state=False, partial=True, wait_for_completion=False, wait_interval=10, max_wait=-1, skip_repo_fs_check=True).do_action() except (curator.exceptions.SnapshotInProgress, curator.exceptions.FailedExecution) as e: print(e) # Do PROD now index_list = curator.IndexList(es) index_list.filter_by_regex(kind='prefix', value='prod', exclude=False) if index_list.indices: index_list.filter_by_age(source='name', direction='older', timestring='%Y.%m.%d', unit='days', unit_count=snapshot_index_days) if index_list.indices: print("Found %s indices to snapshot " % len(index_list.indices)) print(index_list.indices) try: curator.Snapshot(index_list, repository='my-es-snapshot-repo', name='prod-%Y%m%d%H%M%S', ignore_unavailable=True, include_global_state=False, partial=True, wait_for_completion=False, wait_interval=10, max_wait=-1, skip_repo_fs_check=True).do_action() except (curator.exceptions.SnapshotInProgress, curator.exceptions.FailedExecution) as e: print(e) # Now delete old snapshots snapshot_list = curator.SnapshotList(es, repository='my-es-snapshot-repo') if snapshot_list.snapshots: snapshot_list.filter_by_age(source='creation_date', direction='older', timestring='%Y.%m.%d', unit='days', unit_count=snapshot_rentention_days) if snapshot_list.snapshots: try: curator.DeleteSnapshots(snapshot_list, retry_interval=10, retry_count=3).do_action() except (curator.exceptions.SnapshotInProgress, curator.exceptions.NoSnapshots, curator.exceptions.FailedExecution) as e: print(e)
def lambda_handler(event, context): now = datetime.now() snapshot_prefix = 'automatic-' snapshot_name = snapshot_prefix + now.strftime("%Y%m%d%H%M%S") # Build the Elasticsearch client. es = Elasticsearch( hosts=[{ 'host': host, 'port': 443 }], http_auth=awsauth, use_ssl=True, verify_certs=True, connection_class=RequestsHttpConnection, # Deleting snapshots can take a while, so keep the connection open for long enough to get a response. timeout=120) # REGISTER try: payload = { "type": "s3", "settings": { "bucket": bucket, "region": region, "role_arn": role_arn } } es.snapshot.create_repository(repository_name, json.dumps(payload)) except (ElasticsearchException) as e: print(e) raise # DELETE try: snapshot_list = curator.SnapshotList(es, repository=repository_name) snapshot_list.filter_by_regex(kind='prefix', value=snapshot_prefix) snapshot_list.filter_by_age(source='creation_date', direction='older', unit='days', unit_count=int(retention)) # Delete the old snapshots. curator.DeleteSnapshots(snapshot_list, retry_interval=30, retry_count=3).do_action() except (curator.exceptions.NoSnapshots) as e: # This is fine print(e) except (curator.exceptions.SnapshotInProgress, curator.exceptions.FailedExecution) as e: print(e) raise # CREATE try: index_list = curator.IndexList(es) # Take a new snapshot. This operation can take a while, so we don't want to wait for it to complete. curator.Snapshot(index_list, repository=repository_name, name=snapshot_name, wait_for_completion=False).do_action() except (curator.exceptions.SnapshotInProgress, curator.exceptions.FailedExecution) as e: print(e) raise
try: slo = curator.SnapshotList(client, repository=repo) slo.filter_by_age(source='creation_date', direction="older", timestring=None, unit="days", unit_count=days, epoch=None, exclude=False) except: print("No snapshots found older than:", days, "repo:", repo) sys.exit(0) if not slo.snapshots: print("No snapshots found older than:", days, "repo:", repo) sys.exit(0) print('Running curator snapshot repository:', repo, "days:", days, "action:", action) print('Affected snapshots') for snap in slo.snapshots: print(snap) delete_snapshots = curator.DeleteSnapshots(slo) if action == "DELETE": delete_snapshots.do_action() else: delete_snapshots.do_dry_run()