Esempio n. 1
0
def reweight_by_utilization(options):
    if options.oload <= 100:
        raise Exception("You must give a percentage higher than 100.")

    cephinfo.init_pg()
    pgm = cephinfo.pg_data

    pgs_by_osd = defaultdict(int)

    if options.by_pg:
        weight_sum = 0.0
        num_pg_copies = 0
        num_osds = 0
        for p in pgm['pg_stats']:
            pool = p['pgid'].split('.')[0]
            if options.pools and pool not in options.pools:
                continue
            for q in p['up']:
                if not pgs_by_osd[q]:
                    pgs_by_osd[q] = 0
                    weight_sum += get_weight(q, 'crush_weight')
                    num_osds += 1
                pgs_by_osd[q] += 1
                num_pg_copies += 1

        if not num_osds or (num_pg_copies / num_osds <
                            mon_reweight_min_pgs_per_osd):
            raise Exception(
                'Refusing to reweight: we only have %d PGs across %d osds!' %
                (num_pg_copies, num_osds))

        average_util = num_pg_copies / weight_sum
        if VERBOSE:
            print "weight_sum: %3f, num_pg_copies: %d, num_osds: %d" % (
                weight_sum, num_pg_copies, num_osds)

    else:
        num_osd = len(pgm['osd_stats'])

        if num_osd < MIN_OSDS:
            raise Exception(
                "Refusing to reweight: we have only %d OSDs! (%d needed)" %
                (num_osd, MIN_OSDS))

        # Avoid putting a small number (or 0) in the denominator when calculating average_util
        if pgm['osd_stats_sum'][
                'kb'] * 1024 / num_osd < mon_reweight_min_bytes_per_osd:
            raise Exception(
                "Refusing to reweight: we only have %d GB total space across all osds! (%d GB needed)"
                % (pgm['osd_stats_sum']['kb'] / 1024 / 1024,
                   mon_reweight_min_bytes_per_osd * num_osd / 1024 / 1024 /
                   1024))

        if pgm['osd_stats_sum'][
                'kb_used'] * 1024 / num_osd < mon_reweight_min_bytes_used_per_osd:
            raise Exception(
                "Refusing to reweight: we only have %d GB used across all osds! (%d GB needed)"
                % (pgm['osd_stats_sum']['kb_used'] / 1024 / 1024,
                   mon_reweight_min_bytes_used_per_osd * num_osd / 1024 /
                   1024 / 1024))

        average_util = float(pgm['osd_stats_sum']['kb_used']) / float(
            pgm['osd_stats_sum']['kb'])

    if VERBOSE: print "Found %d OSDs in total" % len(pgm['osd_stats'])

    # filter out the empty osds
    nonempty_osds = [
        osd for osd in pgm['osd_stats'] if float(osd['kb']) > 0
        and get_weight(osd['osd'], type='crush_weight') > 0
    ]

    if VERBOSE: print "Found %d non-empty OSDs" % len(nonempty_osds)

    # optionally filter out osds not in the requested bucket
    # and recalculate average_util
    if options.bucket:
        bucket_osds = []
        for bucket in options.bucket:
            bucket_osds.extend(ceph_osds_in_bucket.list(bucket))
        sum_kb = 0
        sum_weight = 0
        sum_kb_used = 0
        filtered_osds = []
        for osd in nonempty_osds:
            name = 'osd.%d' % osd['osd']
            if name in bucket_osds:
                sum_weight += get_weight(osd['osd'],
                                         'crush_weight') * 1024 * 1024 * 1024
                sum_kb_used += osd['kb_used']
                filtered_osds.insert(0, osd)
        average_util = float(sum_kb_used) / float(sum_weight)
        if VERBOSE:
            print "Found %d OSDs after filtering by bucket" % len(
                filtered_osds)
    else:
        filtered_osds = nonempty_osds

    # sort osds from most to least deviant from the average_util
    if options.by_pg:
        osds = sorted(
            filtered_osds,
            key=lambda osd: -abs(average_util - pgs_by_osd[osd['osd']] /
                                 get_weight(osd['osd'], type='crush_weight')))
    else:
        #osds = sorted(filtered_osds, key=lambda osd: -abs(average_util - float(osd['kb_used']) / float(osd['kb'])))
        osds = sorted(
            filtered_osds,
            key=lambda osd: -abs(average_util - float(osd['kb_used']) /
                                 (get_weight(osd['osd'], type='crush_weight') *
                                  1024 * 1024 * 1024)))

    # adjust down only if we are above the threshold
    overload_util = average_util * options.oload / 100.0

    # but aggressively adjust weights up whenever possible
    underload_util = average_util

    if VERBOSE:
        print "average_util: %04f, overload_util: %04f, underload_util: %04f. " % (
            average_util, overload_util, underload_util)

    n = 0
    for osd in osds:
        if options.by_pg:
            util = pgs_by_osd[osd['osd']] / get_weight(osd['osd'],
                                                       type='crush_weight')
        else:
            util = float(osd['kb_used']) / (get_weight(
                osd['osd'], type='crush_weight') * 1024 * 1024 * 1024)

        # skip very empty OSDs
        if util < 0.001:
            continue

        if util >= overload_util:
            # Assign a lower weight to overloaded OSDs. The current weight
            # is a factor to take into account the original weights,
            # to represent e.g. differing storage capacities
            weight = get_weight(osd['osd'])
            new_weight = (average_util / util) * float(weight)
            new_weight = max(new_weight, weight - options.max_change)
            print "osd.%d (%4f >= %4f) [%04f -> %04f]" % (
                osd['osd'], util, overload_util, weight, new_weight)
            if options.doit:
                change_weight(osd['osd'], new_weight, options.really)
            n += 1
            if n >= options.num_osds: break
        if not options.no_increasing and util <= underload_util:
            # assign a higher weight.. if we can
            weight = get_weight(osd['osd'])
            new_weight = (average_util / util) * float(weight)
            new_weight = max(new_weight, weight + 0.01)
            new_weight = min(new_weight, weight + options.max_change)
            if new_weight > 1.0:
                new_weight = 1.0
            if new_weight > weight:
                print "osd.%d (%4f <= %4f) [%04f -> %04f]" % (
                    osd['osd'], util, underload_util, weight, new_weight)
                if options.doit:
                    change_weight(osd['osd'], new_weight, options.really)
                n += 1
                if n >= options.num_osds: break
def reweight_by_utilization(options):
  if options.oload <= 100:
    raise Exception("You must give a percentage higher than 100.")
#      "The reweighting threshold will be calculated as <average-utilization> "
#      "times <input-percentage>. For example, an argument of 200 would "
#      "reweight OSDs which are twice as utilized as the average OSD.\n";

  cephinfo.init_pg()
  pgm = cephinfo.pg_data

  pgs_by_osd = defaultdict(int)

  if options.by_pg:
    weight_sum = 0.0
    num_pg_copies = 0
    num_osds = 0
    for p in pgm['pg_stats']:
      pool = p['pgid'].split('.')[0]
      if options.pools and pool not in options.pools:
         continue
      for a in p['up']:
        if not pgs_by_osd[a]:
          num_osds += 1
          weight_sum += get_weight(a,'crush_weight')
          pgs_by_osd[a] = 0
        pgs_by_osd[a] += 1
        num_pg_copies += 1

    if not num_osds or (num_pg_copies / num_osds < 10):
      raise Exception('Refusing to reweight: we only have %d PGs across %d osds!' % (num_pg_copies, num_osds))

    average_util = num_pg_copies / weight_sum
    print "weight_sum: %3f, num_pg_copies: %d, num_osds: %d" % (weight_sum, num_pg_copies, num_osds)

  else:

    # Avoid putting a small number (or 0) in the denominator when calculating
    # average_util
    if pgm['osd_stats_sum']['kb'] < 1024:
      raise Exception("Refusing to reweight: we only have %d kB across all osds!" % pgm['osd_stats_sum']['kb'])

    if pgm['osd_stats_sum']['kb_used'] < 5 * 1024:
      raise Exception("Refusing to reweight: we only have %d kB across all osds!" % pgm['osd_stats_sum']['kb_used'])

    average_util = float(pgm['osd_stats_sum']['kb_used']) / float(pgm['osd_stats_sum']['kb'])

  # adjust down only if we are above the threshold
  overload_util = average_util * options.oload / 100.0

  # adjust weights up whenever possible
  underload_util = average_util - (overload_util - average_util)

  print "average_util: %04f, overload_util: %04f, underload_util: %04f. " %(average_util, overload_util, underload_util)

  print "reweighted: "

  n = 0
  if options.by_pg:
    osds = sorted(pgm['osd_stats'], key=lambda osd: -abs(average_util - pgs_by_osd[osd['osd']] / get_weight(osd['osd'],type='crush_weight')))
  else:
    osds = sorted(pgm['osd_stats'], key=lambda osd: -abs(average_util - float(osd['kb_used']) / float(osd['kb'])))
  
  for osd in osds:
    if options.by_pg:
      util = pgs_by_osd[osd['osd']] / get_weight(osd['osd'],type='crush_weight')
    else:
      util = float(osd['kb_used']) / float(osd['kb'])

    # skip the OSDs that do not contain anything (probably a different crush root)
    if util < 0.01:
      continue

    if util >= overload_util:
      # Assign a lower weight to overloaded OSDs. The current weight
      # is a factor to take into account the original weights,
      # to represent e.g. differing storage capacities
      weight = get_weight(osd['osd'])
      new_weight = (average_util / util) * float(weight)
      if weight - new_weight > options.max_change:
        new_weight = weight - options.max_change
      print "%d (%4f >= %4f) [%04f -> %04f]" % (osd['osd'], util, overload_util, weight, new_weight)
      if options.doit: change_weight(osd['osd'], new_weight, options.really)
      n += 1
      if n >= options.num_osds: break
    if options.adjust_up and util <= underload_util:
      # assign a higher weight.. if we can
      weight = get_weight(osd['osd'])
      new_weight = (average_util / util) * float(weight)
      if new_weight - weight > options.max_change:
        new_weight = weight + options.max_change
      if new_weight > 1.0:
        new_weight = 1.0
      if new_weight > weight:
        print "%d (%4f <= %4f) [%04f -> %04f]" % (osd['osd'], util, underload_util, weight, new_weight)
        if options.doit: change_weight(osd['osd'], new_weight, options.really)
        n += 1
        if n >= options.num_osds: break
#!/usr/bin/env python

from cephinfo import cephinfo
import itertools
import random

# read the ceph PG and OSD info from the ceph-mon
cephinfo.init_pg()
cephinfo.init_osd()


osds = [ osd['osd'] for osd in cephinfo.get_osds_data() ]
triple_combinations = [ tuple(pg['acting']) for pg in cephinfo.get_pg_stats() ]

print "We have %d OSDs and %d PGs, hence %d combinations e.g. like this: %s" % (len(osds), cephinfo.get_n_pgs(), len(triple_combinations), triple_combinations[0])

nFailures = 0
nDataLoss = 0
nSimulations = 1000

print "Simulating %d triple failures" % nSimulations

for i in xrange(0,nSimulations):
  triple_failure = random.sample(osds, 3)

  nFailures += 1

  for order in itertools.permutations(triple_failure):
    if order in triple_combinations:
      nDataLoss += 1
      print "3 replica data loss incident with failure %s" % str(order)
def reweight_by_utilization(options):
    if options.oload <= 100:
        raise Exception("You must give a percentage higher than 100.")


#      "The reweighting threshold will be calculated as <average-utilization> "
#      "times <input-percentage>. For example, an argument of 200 would "
#      "reweight OSDs which are twice as utilized as the average OSD.\n";

    cephinfo.init_pg()
    pgm = cephinfo.pg_data

    pgs_by_osd = defaultdict(int)

    if options.by_pg:
        weight_sum = 0.0
        num_pg_copies = 0
        num_osds = 0
        for p in pgm['pg_stats']:
            pool = p['pgid'].split('.')[0]
            if options.pools and pool not in options.pools:
                continue
            for q in p['up']:
                if not pgs_by_osd[q]:
                    pgs_by_osd[q] = 0
                    weight_sum += get_weight(q, 'crush_weight')
                    num_osds += 1
                pgs_by_osd[q] += 1
                num_pg_copies += 1

        if not num_osds or (num_pg_copies / num_osds <
                            mon_reweight_min_pgs_per_osd):
            raise Exception(
                'Refusing to reweight: we only have %d PGs across %d osds!' %
                (num_pg_copies, num_osds))

        average_util = num_pg_copies / weight_sum
        print "weight_sum: %3f, num_pg_copies: %d, num_osds: %d" % (
            weight_sum, num_pg_copies, num_osds)

    else:
        num_osd = len(pgm['osd_stats'])
        # Avoid putting a small number (or 0) in the denominator when calculating
        # average_util
        if pgm['osd_stats_sum'][
                'kb'] * 1024 / num_osd < mon_reweight_min_bytes_per_osd:
            raise Exception(
                "Refusing to reweight: we only have %d kB across all osds!" %
                pgm['osd_stats_sum']['kb'])

        if pgm['osd_stats_sum']['kb_used'] < 5 * 1024:
            raise Exception(
                "Refusing to reweight: we only have %d kB across all osds!" %
                pgm['osd_stats_sum']['kb_used'])

        average_util = float(pgm['osd_stats_sum']['kb_used']) / float(
            pgm['osd_stats_sum']['kb'])

    # adjust down only if we are above the threshold
    overload_util = average_util * options.oload / 100.0

    # but aggressively adjust weights up whenever possible
    underload_util = average_util  # - (overload_util - average_util)

    print "average_util: %04f, overload_util: %04f, underload_util: %04f. " % (
        average_util, overload_util, underload_util)

    print "reweighted: "

    # sort to get heaviest osds first

    nonempty_osds = [
        osd for osd in pgm['osd_stats'] if float(osd['kb']) > 0
        and get_weight(osd['osd'], type='crush_weight') > 0
    ]

    if options.by_pg:
        osds = sorted(
            nonempty_osds,
            key=lambda osd: -abs(average_util - pgs_by_osd[osd['osd']] /
                                 get_weight(osd['osd'], type='crush_weight')))
    else:
        osds = sorted(nonempty_osds,
                      key=lambda osd: -abs(average_util - float(osd['kb_used'])
                                           / float(osd['kb'])))

    n = 0
    for osd in osds:
        if options.by_pg:
            util = pgs_by_osd[osd['osd']] / get_weight(osd['osd'],
                                                       type='crush_weight')
        else:
            util = float(osd['kb_used']) / float(osd['kb'])

        # skip the OSDs that do not contain anything (probably a different crush root)
        if util < 0.01:
            continue

        if util >= overload_util:
            # Assign a lower weight to overloaded OSDs. The current weight
            # is a factor to take into account the original weights,
            # to represent e.g. differing storage capacities
            weight = get_weight(osd['osd'])
            new_weight = (average_util / util) * float(weight)
            new_weight = max(new_weight, weight - options.max_change)
            print "%d (%4f >= %4f) [%04f -> %04f]" % (
                osd['osd'], util, overload_util, weight, new_weight)
            if options.doit:
                change_weight(osd['osd'], new_weight, options.really)
            n += 1
            if n >= options.num_osds: break
        if options.adjust_up and util <= underload_util:
            # assign a higher weight.. if we can
            weight = get_weight(osd['osd'])
            new_weight = (average_util / util) * float(weight)
            new_weight = min(new_weight, weight + options.max_change)
            if new_weight > 1.0:
                new_weight = 1.0
            if new_weight > weight:
                print "%d (%4f <= %4f) [%04f -> %04f]" % (
                    osd['osd'], util, underload_util, weight, new_weight)
                if options.doit:
                    change_weight(osd['osd'], new_weight, options.really)
                n += 1
                if n >= options.num_osds: break
def reweight_by_utilization(options):
  if options.oload <= 100:
    raise Exception("You must give a percentage higher than 100.")

  cephinfo.init_pg()
  pgm = cephinfo.pg_data

  pgs_by_osd = defaultdict(int)

  if options.by_pg:
    weight_sum = 0.0
    num_pg_copies = 0
    num_osds = 0
    for p in pgm['pg_stats']:
      pool = p['pgid'].split('.')[0]
      if options.pools and pool not in options.pools:
         continue
      for q in p['up']:
        if not pgs_by_osd[q]:
          pgs_by_osd[q] = 0
          weight_sum += get_weight(q,'crush_weight')
          num_osds += 1
        pgs_by_osd[q] += 1
        num_pg_copies += 1

    if not num_osds or (num_pg_copies / num_osds < mon_reweight_min_pgs_per_osd):
      raise Exception('Refusing to reweight: we only have %d PGs across %d osds!' % (num_pg_copies, num_osds))

    average_util = num_pg_copies / weight_sum
    if VERBOSE: print "weight_sum: %3f, num_pg_copies: %d, num_osds: %d" % (weight_sum, num_pg_copies, num_osds)

  else:
    num_osd = len(pgm['osd_stats'])

    if num_osd < MIN_OSDS:
      raise Exception("Refusing to reweight: we have only %d OSDs! (%d needed)" % (num_osd, MIN_OSDS))

    # Avoid putting a small number (or 0) in the denominator when calculating average_util
    if pgm['osd_stats_sum']['kb'] * 1024 / num_osd < mon_reweight_min_bytes_per_osd:
      raise Exception("Refusing to reweight: we only have %d GB total space across all osds! (%d GB needed)" % (pgm['osd_stats_sum']['kb'] / 1024 / 1024, mon_reweight_min_bytes_per_osd * num_osd / 1024 / 1024 / 1024))

    if pgm['osd_stats_sum']['kb_used'] * 1024 / num_osd < mon_reweight_min_bytes_used_per_osd:
      raise Exception("Refusing to reweight: we only have %d GB used across all osds! (%d GB needed)" % (pgm['osd_stats_sum']['kb_used'] / 1024 / 1024, mon_reweight_min_bytes_used_per_osd * num_osd / 1024 / 1024 / 1024))

    average_util = float(pgm['osd_stats_sum']['kb_used']) / float(pgm['osd_stats_sum']['kb'])

  if VERBOSE: print "Found %d OSDs in total" % len(pgm['osd_stats'])

  # filter out the empty osds
  nonempty_osds = [ osd for osd in pgm['osd_stats'] if float(osd['kb']) > 0 and get_weight(osd['osd'],type='crush_weight') > 0 ]

  if VERBOSE: print "Found %d non-empty OSDs" % len(nonempty_osds)

  # optionally filter out osds not in the requested bucket
  # and recalculate average_util
  if options.bucket:
    bucket_osds = []
    for bucket in options.bucket:
      bucket_osds.extend(ceph_osds_in_bucket.list(bucket))
    sum_kb = 0
    sum_weight = 0
    sum_kb_used = 0
    filtered_osds = []
    for osd in nonempty_osds:
      name = 'osd.%d' % osd['osd']
      if name in bucket_osds:
        sum_weight += get_weight(osd['osd'], 'crush_weight') * 1024*1024*1024
        sum_kb_used += osd['kb_used']
        filtered_osds.insert(0, osd)
    average_util = float(sum_kb_used) / float(sum_weight)
    if VERBOSE: print "Found %d OSDs after filtering by bucket" % len(filtered_osds)
  else:
    filtered_osds = nonempty_osds


  # sort osds from most to least deviant from the average_util
  if options.by_pg:
    osds = sorted(filtered_osds, key=lambda osd: -abs(average_util - pgs_by_osd[osd['osd']] / get_weight(osd['osd'],type='crush_weight')))
  else:
    #osds = sorted(filtered_osds, key=lambda osd: -abs(average_util - float(osd['kb_used']) / float(osd['kb'])))
    osds = sorted(filtered_osds, key=lambda osd: -abs(average_util - float(osd['kb_used']) / (get_weight(osd['osd'],type='crush_weight') * 1024*1024*1024)))

  # adjust down only if we are above the threshold
  overload_util = average_util * options.oload / 100.0

  # but aggressively adjust weights up whenever possible
  underload_util = average_util

  if VERBOSE: print "average_util: %04f, overload_util: %04f, underload_util: %04f. " %(average_util, overload_util, underload_util)

  n = 0
  for osd in osds:
    if options.by_pg:
      util = pgs_by_osd[osd['osd']] / get_weight(osd['osd'],type='crush_weight')
    else:
      util = float(osd['kb_used']) / (get_weight(osd['osd'],type='crush_weight') * 1024*1024*1024)

    # skip very empty OSDs
    if util < 0.001:
      continue

    if util >= overload_util:
      # Assign a lower weight to overloaded OSDs. The current weight
      # is a factor to take into account the original weights,
      # to represent e.g. differing storage capacities
      weight = get_weight(osd['osd'])
      new_weight = (average_util / util) * float(weight)
      new_weight = max(new_weight, weight - options.max_change)
      print "osd.%d (%4f >= %4f) [%04f -> %04f]" % (osd['osd'], util, overload_util, weight, new_weight)
      if options.doit: change_weight(osd['osd'], new_weight, options.really)
      n += 1
      if n >= options.num_osds: break
    if not options.no_increasing and util <= underload_util:
      # assign a higher weight.. if we can
      weight = get_weight(osd['osd'])
      new_weight = (average_util / util) * float(weight)
      new_weight = max(new_weight, weight + 0.01)
      new_weight = min(new_weight, weight + options.max_change)
      if new_weight > 1.0:
        new_weight = 1.0
      if new_weight > weight:
        print "osd.%d (%4f <= %4f) [%04f -> %04f]" % (osd['osd'], util, underload_util, weight, new_weight)
        if options.doit: change_weight(osd['osd'], new_weight, options.really)
        n += 1
        if n >= options.num_osds: break
Esempio n. 6
0
def reweight_by_utilization(oload, by_pg, pools, doit, really):
    if oload <= 100:
        raise Exception("You must give a percentage higher than 100.")


#      "The reweighting threshold will be calculated as <average-utilization> "
#      "times <input-percentage>. For example, an argument of 200 would "
#      "reweight OSDs which are twice as utilized as the average OSD.\n";

    cephinfo.init_pg()
    pgm = cephinfo.pg_data

    pgs_by_osd = defaultdict(int)

    if by_pg:
        weight_sum = 0.0
        num_pg_copies = 0
        num_osds = 0
        for p in pgm['pg_stats']:
            pool = p['pgid'].split('.')[0]
            if pools and pool not in pools:
                continue
            for a in p['acting']:
                if not pgs_by_osd[a]:
                    num_osds += 1
                    weight_sum += get_weight(a, 'crush_weight')
                    pgs_by_osd[a] = 0
                pgs_by_osd[a] += 1
                num_pg_copies += 1

        if not num_osds or (num_pg_copies / num_osds < 10):
            raise Exception(
                'Refusing to reweight: we only have %d PGs across %d osds!' %
                (num_pg_copies, num_osds))

        average_util = num_pg_copies / weight_sum
        print "weight_sum: %3f, num_pg_copies: %d, num_osds: %d" % (
            weight_sum, num_pg_copies, num_osds)

    else:

        # Avoid putting a small number (or 0) in the denominator when calculating
        # average_util
        if pgm['osd_stats_sum']['kb'] < 1024:
            raise Exception(
                "Refusing to reweight: we only have %d kB across all osds!" %
                pgm['osd_stats_sum']['kb'])

        if pgm['osd_stats_sum']['kb_used'] < 5 * 1024:
            raise Exception(
                "Refusing to reweight: we only have %d kB across all osds!" %
                pgm['osd_stats_sum']['kb_used'])

        average_util = float(pgm['osd_stats_sum']['kb_used']) / float(
            pgm['osd_stats_sum']['kb'])

    # adjust down only if we are above the threshold
    overload_util = average_util * oload / 100.0

    # adjust weights up whenever possible
    underload_util = average_util  # - (overload_util - average_util)

    print "average_util: %04f, overload_util: %04f, underload_util: %04f. " % (
        average_util, overload_util, underload_util)

    print "reweighted: "

    for osd in pgm['osd_stats']:
        if by_pg:
            util = pgs_by_osd[osd['osd']] / get_weight(osd['osd'],
                                                       type='crush_weight')
        else:
            util = float(osd['kb_used']) / float(osd['kb'])

        # skip the OSDs that do not contain anything (probably a different crush root)
        if util < 0.01:
            continue

        if util >= overload_util:
            # Assign a lower weight to overloaded OSDs. The current weight
            # is a factor to take into account the original weights,
            # to represent e.g. differing storage capacities
            weight = get_weight(osd['osd'])
            new_weight = (average_util / util) * float(weight)
            print "%d (%4f >= %4f) [%04f -> %04f]" % (
                osd['osd'], util, overload_util, weight, new_weight)
            if doit: change_weight(osd['osd'], new_weight, really)
        if util <= underload_util:
            # assign a higher weight.. if we can
            weight = get_weight(osd['osd'])
            new_weight = (average_util / util) * float(weight)
            if new_weight > 1.0:
                new_weight = 1.0
            if new_weight > weight:
                print "%d (%4f <= %4f) [%04f -> %04f]" % (
                    osd['osd'], util, underload_util, weight, new_weight)
                if doit: change_weight(osd['osd'], new_weight, really)
Esempio n. 7
0
#!/usr/bin/env python

from cephinfo import cephinfo
import itertools
import random

# read the ceph PG and OSD info from the ceph-mon
cephinfo.init_pg()
cephinfo.init_osd()

osds = [osd['osd'] for osd in cephinfo.get_osds_data()]
triple_combinations = [tuple(pg['acting']) for pg in cephinfo.get_pg_stats()]

print "We have %d OSDs and %d PGs, hence %d combinations e.g. like this: %s" % (
    len(osds), cephinfo.get_n_pgs(), len(triple_combinations),
    triple_combinations[0])

nFailures = 0
nDataLoss = 0
nSimulations = 1000

print "Simulating %d triple failures" % nSimulations

for i in xrange(0, nSimulations):
    triple_failure = random.sample(osds, 3)

    nFailures += 1

    for order in itertools.permutations(triple_failure):
        if order in triple_combinations:
            nDataLoss += 1