Exemple #1
0
 def get_list_object(self):
     if self.action in snapshot_actions(
     ) or self.action == 'show_snapshots':
         self.list_object = SnapshotList(self.client,
                                         repository=self.repository)
     else:
         self.list_object = IndexList(self.client)
 def __list_indices(self, unit: str, unit_count: int) -> IndexList:
     index_list = IndexList(self.__es)
     index_list.filter_by_regex(kind='prefix', value='index-prefix')
     index_list.filter_by_age(source='name',
                              direction='older',
                              timestring='%Y-%m-%d',
                              unit=unit,
                              unit_count=unit_count)
     return index_list
Exemple #3
0
 def get_alias_obj(self):
     action_obj = Alias(name=self.alias['name'],
                        extra_settings=self.alias['extra_settings'])
     for k in ['remove', 'add']:
         if k in self.alias:
             self.logger.debug(
                 '{0}ing matching indices {1} alias "{2}"'.format(
                     'Add'
                     if k == 'add' else 'Remov',  # 0 = "Add" or "Remov"
                     'to' if k == 'add' else 'from',  # 1 = "to" or "from"
                     self.alias['name']  # 2 = the alias name
                 ))
             self.alias[k]['ilo'] = IndexList(self.client)
             self.alias[k]['ilo'].iterate_filters(
                 {'filters': self.alias[k]['filters']})
             f = getattr(action_obj, k)
             f(self.alias[k]['ilo'], warn_if_no_indices=self.alias['wini'])
     return action_obj
def do(args):
    """
    Chunk indices into multiple chunks for day.
    """

    # Load our config and junk
    config = load_config(args.config)
    snapshot_prefix = datetime.now().strftime(config['name'])

    # Build client
    auth = (config['username'], config['password'])
    es = Elasticsearch(config['host'],
                       use_ssl='https' in config['host'],
                       verify_certs=True,
                       http_auth=auth,
                       request_timeout=900)

    if not es.ping():
        raise systemexit('Cannot authenticate!')

    if not es.snapshot.verify_repository(config['repo']).get('nodes'):
        raise systemexit('Could not verify repository!')

    # Fetch indices
    ilo = IndexList(es)
    ilo.filter_closed()
    ilo.filter_kibana()
    ilo.empty_list_check()

    # Order indices according to shard count
    unordered_indices = ilo.index_info
    ordering = sorted(
        unordered_indices,
        key=lambda k: int(unordered_indices[k]['number_of_shards']))
    ordered_indices = []

    for key in ordering:
        index = {
            'index': key,
            'shards': int(unordered_indices[key]['number_of_shards'])
        }
        ordered_indices.append(index)

    # Build buckets
    total_shards = sum([i['shards'] for i in ordered_indices])
    buckets = [
        Bucket(config['chunk_size'], config['threshold'])
        for _ in range((math.ceil(total_shards / config['chunk_size'])))
    ]

    # Populate them by attempting to add shards to each bucket
    def find_next_bucket(index, buckets):
        """
        Find next bucket with available space, returning None when nothing is
        available.
        """
        sorted_buckets = list(sorted(buckets, key=lambda b: b.free))
        for bucket in sorted_buckets:
            if bucket.free >= index['shards']:
                return bucket
        return None

    while ordered_indices:
        index = ordered_indices.pop()
        bucket = find_next_bucket(index, buckets)
        if bucket:
            bucket.add(index)
        else:
            new_bucket = Bucket(config['chunk_size'])
            new_bucket.add(index)
            buckets.append(new_bucket)

    # Take small buckets and merge them with existing buckets
    small_buckets = []
    big_buckets = []
    for bucket in buckets:
        if bucket.too_small:
            small_buckets.append(bucket)
        else:
            big_buckets.append(bucket)

    # Spread them over all the remaining buckets
    big_bucket_cycle = cycle(big_buckets)
    for bucket in small_buckets:
        for index in bucket.data:
            next(big_bucket_cycle).add(index)
    buckets = big_buckets

    # Build ilos for each bucket
    ilos = []
    for bucket in buckets:
        bucket_ilo = IndexList(es)
        bucket_ilo.filter_closed()
        bucket_ilo.filter_kibana()
        bucket_ilo.filter_by_regex(kind='regex',
                                   value=bucket.regex,
                                   exclude=False)
        ilos.append(bucket_ilo)

    # Wait until repo is available
    while snapshot_running(es):
        time.sleep(60)

    # Begin chunked snapshots, waiting for each to complete
    for i, bucket_ilo in enumerate(ilos, 1):
        final = i != len(ilos)
        slo = Snapshot(bucket_ilo,
                       config['repo'],
                       name=f'{snapshot_prefix}-chunk-{i}',
                       ignore_unavailable=True,
                       include_global_state=False,
                       partial=True,
                       wait_for_completion=final,
                       wait_interval=60)
        log.info(f'Starting snapshot {slo.name}, chunk {i}/{len(ilos)}')
        slo.do_action()

    log.info(f'Completed {len(ilos)} chunks')
Exemple #5
0
def build_delete_action_for_expired_indices(logger, cluster, delete_timeout):
    elastic = connect_to_elasticsearch(cluster.address)
    index_list = IndexList(elastic)
    filter_expired_indices(logger, index_list, cluster)
    return DeleteIndices(index_list, master_timeout=delete_timeout)