Beispiel #1
0
    def humanize(self, **kwargs):
        """Gets the human-friendly representation of file size.

        :param kwargs: All keyword arguments will be passed to
                       :func:`humanize.filesize.naturalsize`.
        """
        return naturalsize(self, **kwargs)
Beispiel #2
0
    def humanize(self, **kwargs):
        """Gets the human-friendly representation of file size.

        :param kwargs: All keyword arguments will be passed to
                       :func:`humanize.filesize.naturalsize`.
        """
        return naturalsize(self, **kwargs)
Beispiel #3
0
 def async_post(self, *_, **__):
     data = []
     # filter backups by criteria
     filter_func = False
     folder = self.get_folder()
     if 'filter' in self.request_params:
         filter_func = True
     for file_name in os.listdir(folder):
         file_path = os.path.join(folder, file_name)
         if os.path.isfile(file_path):
             info = os.stat(file_path)
             record = {
                 'name': file_name,
                 'date':
                 datetime_to_utc(datetime.fromtimestamp(info.st_ctime)),
                 'size': info.st_size
             }
             if not filter_func or self.filter_func(record):
                 data.append(record)
     totalCount = len(data)
     if 'sort' in self.request_params:
         for sort_criteria in self.request_params['sort']:
             sort_property = sort_criteria['property']
             reverse = True if sort_criteria['direction'].lower(
             ) == 'asc' else False
             data.sort(key=lambda record: record[sort_property],
                       reverse=reverse)
     bottom = (self.request_params['page'] -
               1) * self.request_params['limit']
     top = bottom + self.request_params['limit']
     data = data[bottom:top]
     for record in data:
         record['size'] = naturalsize(record['size'])
     return {'totalCount': totalCount, 'data': data}
Beispiel #4
0
 def print(self):
     for p in self.arr:
         if p.number_of_reports_with_swapping(
         ) * args.interval > args.hl_analyze * 60:
             print(Fore.RED + _(
                 "{} [{}] has {} reports with {} of swap variation average and {} of swap average. They took {}"
             ).format(p.datetime(), p.name(), int(p.length()),
                      filesize.naturalsize(int(p.average_diff())),
                      filesize.naturalsize(int(p.average_swap())),
                      p.duration()) + Fore.RESET)
         else:
             if args.hl_only == False:
                 print(
                     _("{} [{}] has {} reports with {} of swap variation average and {} of swap average. They took {}"
                       ).format(p.datetime(), p.name(), int(p.length()),
                                filesize.naturalsize(int(p.average_diff())),
                                filesize.naturalsize(int(p.average_swap())),
                                p.duration()))
Beispiel #5
0
 def __repr__(self):
     return '<History [' \
            'id={short_id}, ' \
            'created={created}, ' \
            'size={size}]>'.format(
         short_id=self.short_id,
         created=self.created.isoformat(),
         size=filesize.naturalsize(self.size)
     )
Beispiel #6
0
def size():
    """Return how much memory we're taking.

    Doesn't work if you're running with Gunicorn because it makes child processes.
    """
    process = psutil.Process(os.getpid())
    n_bytes = process.memory_info().rss  # in bytes
    return jsonify(
        n_bytes=n_bytes,
        n_bytes_human=naturalsize(n_bytes),
    )
Beispiel #7
0
def my_view(request):
    up_since = index["up_since"]
    up_time = datetime.now() - up_since
    cache_stats = cache.stats()
    return {
        'entries': cache.cache_len(),
        'cache_disk_size': filesize.naturalsize(cache.cache_disk_size()),
        'up_date': time.naturaldate(up_since),
        'up_duration': time.naturaldelta(up_time),
        'cache_hits': str(cache_stats['hit']),
        'cache_misses': str(cache_stats['misses'])
    }
Beispiel #8
0
    def __repr__(self):
        repo = tag = '<none>'
        # just tak the first tag if there is one
        if self.tags:
            repo, tag = self.tags[0].split(':')

        return '<Image [' \
               'repo={repo}, ' \
               'tag={tag}, ' \
               'id={short_id}, ' \
               'vsize={vsize}]>'.format(
            repo=repo,
            tag=tag,
            short_id=self.short_id,
            vsize=filesize.naturalsize(self.virtual_size)
        )
Beispiel #9
0
def plot(results, rate=False):
    xs = np.array(sizes)
    ys_aesaes, std_aesaes, ys_twoaes, std_twoaes = map(np.array, zip(*results))

    plt.xscale('log')
    plt.xlabel('file size')
    plt.xlim(xs[0], xs[-1])
    plt.xticks(xs, [naturalsize(x, binary=True) for x in xs], rotation=90)

    plt.ylabel('re-encryption ' + ('rate ($MB/s$)' if rate else 'time ($s$)'))
    plt.yscale('linear' if rate else 'log')

    plt.ylim(0, max(max(ys_aesaes), max(ys_twoaes)) * 1.2)

    plt.errorbar(xs, ys_aesaes, yerr=std_aesaes, fmt='r^--', label='AES+AES')
    plt.errorbar(xs, ys_twoaes, yerr=std_twoaes, fmt='b-',  label='TWOAES')

    plt.fill_between(xs, ys_aesaes, ys_twoaes, where=ys_aesaes>ys_twoaes,
                     facecolor='g', alpha=.2, interpolate=True)
    plt.fill_between(xs, ys_aesaes, ys_twoaes, where=ys_aesaes<ys_twoaes,
                     facecolor='r', alpha=.2, interpolate=True)

    plt.legend(loc='lower right')
    plt.tight_layout()
Beispiel #10
0
def test(size, use_aesni, runs, out, tempdir, rate=False):

    a1 = AES.new(k1, mode=AES.MODE_ECB, use_aesni=use_aesni)
    a2 = AES.new(k2, mode=AES.MODE_ECB, use_aesni=use_aesni)

    if use_aesni:
        ta = TWOAES.new(k1+k2, mode=TWOAES.MODE_ECB, use_aesni=True, window=10)
    else:
        ta = TWOAES.new(k1+k2, mode=TWOAES.MODE_ECB, use_aesni=False)

    def single_test(size, recrypt):
        # setup
        plain = bytegenerator(size)
        cipher = a1.encrypt(plain)

        # test
        start = default_timer()
        cipher2 = recrypt(cipher)
        time = default_timer() - start
        assert a2.decrypt(cipher2) == plain
        return (size / (2. ** 20)) / time if rate else time

    def recrypt_aesaes(cipher):
        plain = a1.decrypt(cipher)
        if tempdir:
            with NamedTemporaryFile(dir=tempdir) as temp:
                temp.write(plain)
                temp.flush()
                temp.seek(0)
                plain = temp.read()
        return a2.encrypt(plain)

    results = ( repeat(partial(single_test, size, recrypt_aesaes), runs, out)
              + repeat(partial(single_test, size, ta.encrypt), runs, out))
    print(pattern.format(naturalsize(size,binary=True), results[0], results[2]))
    return results
    def bundle(self,
               firmware_hash: str,
               datasets: List[Dataset],
               *,
               file: Union[str, BinaryIO, IO[bytes]],
               shard_spec: ShardSpec = None,
               delta_to: Dict[str, str] = None,
               overwrite: bool = False) -> List[ObjectInfo]:
        """
        Builds a data bundle (*.tar.gz) for a firmware hash, including content from the
        specified datasets (FWAN plugin output locations)

        :param firmware_hash: The firmware hash to bundle
        :param datasets: The datasets to include in the bundle
        :param file: The output path or file-like-object to which the *.tar.gz output should be written
        :param shard_spec: If provided, only the specified shard of file hashes will appear in the bundle
        :param delta_to: A dictionary of path->etag values, which if supplied will cause the bundle to be built as a
        delta to that set, meaning only new objects or objects with modified etags will appear in the bundle.
        :param overwrite: The output path will not be overwritten, to prevent accidental data loss, unless this is set
        :return: A list of the object included in the bundle
        """
        if not firmware_hash:
            raise ValueError('firmware_hash must be specified')

        if not datasets:
            raise ValueError('datasets must be specified, and non-empty')

        if delta_to is None:
            delta_to = {}

        logger.info(
            f"Building {'delta' if delta_to else ''} bundle for {firmware_hash}"
        )

        contents: List[ObjectInfo] = []

        with mgzip.open(filename=file,
                        mode='w' if overwrite else 'x') as gz, tarfile.open(
                            fileobj=gz,
                            mode='w',
                            bufsize=tarfile.RECORDSIZE * 4) as tar:
            # Fetch and process the file tree, using that as the basis for all other paths that need to be bundled.

            file_tree_path = f'file_tree/{firmware_hash}.jsonl'

            with CodeTimer('Read firmware file tree from object storage'):
                try:
                    file_tree_result = fetch_object(
                        bucket=self.firmware_metadata_bucket,
                        key=file_tree_path)
                except ClientError as e:
                    raise Exception(
                        'Firmware file tree could not be read') from e

            with CodeTimer('Extract file hashes from file tree'):
                try:
                    file_hashes = extract_file_hashes(file_tree_result.payload)
                except json.JSONDecodeError as e:
                    raise Exception(
                        'Firmware file tree could not be parsed') from e

            if not file_hashes:
                raise Exception('Firmware file tree is empty')

            file_tree_in_bundle = False

            if is_dataset_in_shard(
                    dataset=FILE_TREE_DATASET, shard_spec=shard_spec
            ) and file_tree_result.info.etag != delta_to.get(file_tree_path):
                with CodeTimer('Add file tree to bundle'):
                    add_to_tarfile(tar, file_tree_result)
                    contents.append(file_tree_result.info)
                    file_tree_in_bundle = True
            else:
                logger.info(
                    'File tree is unchanged or not part of this shard and will not be included in the bundle'
                )

            file_tree_size = file_tree_result.info.size

            logger.info(
                'File tree num distinct file hashes = {hash_count}; size = {size}'
                .format(hash_count=len(file_hashes),
                        size=naturalsize(file_tree_size)))

            if shard_spec:
                with CodeTimer(
                        f'Limiting file hashes to shard {shard_spec.index}'):

                    def is_in_shard(file_hash: str) -> bool:
                        return int(file_hash,
                                   16) % shard_spec.count == shard_spec.index

                    file_hashes = [
                        file_hash for file_hash in file_hashes
                        if is_in_shard(file_hash)
                    ]
                logger.info(f'Sharded num file hashes = {len(file_hashes)}')

            file_tree_result = None

            # Build paths to be bundled

            with CodeTimer('Build paths for bundle'):
                bundle_datasets = [
                    ds for ds in datasets
                    if is_dataset_in_shard(dataset=ds, shard_spec=shard_spec)
                ]
                paths = self.build_paths(firmware_hash=firmware_hash,
                                         datasets=bundle_datasets,
                                         file_hashes=file_hashes,
                                         delta_to=delta_to) or []
                path_count = len(paths)

            # Validate the paths (check for duplicates)

            with CodeTimer('Validate paths'):
                duplicates = [
                    path for path, count in collections.Counter(paths).items()
                    if count > 1
                ]
                if duplicates:
                    raise Exception(
                        f'Bundle paths contained {len(duplicates)} duplicates: {duplicates}'
                    )

            total_path_count = path_count + 1 if file_tree_in_bundle else 0

            logger.info(
                f'Bundle will include at most {total_path_count} paths from object storage'
            )

            fetch_count = 0
            miss_count = 0
            skip_count = 0
            fetch_bytes = 0

            with CodeTimer('Bundle objects'):
                with concurrent.futures.ThreadPoolExecutor(
                        max_workers=self.max_workers) as executor:
                    fetch_start = datetime.datetime.now()

                    with CodeTimer(
                            'Submit object storage path retrieval tasks'):
                        # Randomize path ordering to improve the performance of fetches from object storage,
                        # so that a diversity of object key prefixes is being fetched at any one time.

                        random.shuffle(paths)

                        futures = [
                            executor.submit(
                                fetch_object,
                                bucket=self.firmware_metadata_bucket,
                                key=path,
                                compare_etag=delta_to.get(path),
                            ) for path in paths
                        ]

                    for future in concurrent.futures.as_completed(futures):
                        try:
                            result = future.result()

                            if result:
                                add_to_tarfile(tar, result)
                                result.payload = None
                                fetch_count += 1
                                fetch_bytes += result.info.size
                                contents.append(result.info)
                            else:
                                skip_count += 1

                            if fetch_count % 1000 == 0:
                                logger.info(
                                    'Bundled {} objects ({}) in {}'.format(
                                        fetch_count, naturalsize(fetch_bytes),
                                        naturaldelta(datetime.datetime.now() -
                                                     fetch_start)))
                        except ClientError as e:
                            error_code = e.response.get('Error',
                                                        {}).get('Code')
                            if 'NoSuchKey' in error_code:
                                miss_count += 1
                            elif '304' == error_code:
                                # The ETag on this object was not modified, so it was not returned
                                skip_count += 1
                            else:
                                raise e

        if skip_count:
            logger.info(f'Skipped {skip_count} unmodified objects')

        if miss_count:
            logger.info(
                f"Made {miss_count} attempts to access object storage paths that didn't exist"
            )

        logger.info('Bundled {} objects ({})'.format(
            fetch_count + (1 if file_tree_in_bundle else 0),
            naturalsize(fetch_bytes +
                        (file_tree_size if file_tree_in_bundle else 0))))

        # Validate fetched paths (check that each path was uniquely processed)

        with CodeTimer('Validate fetched paths'):
            fetched_path_counter = collections.Counter(
                [obj.path for obj in contents])
            duplicates = [
                path for path, count in fetched_path_counter.items()
                if count > 1
            ]
            if duplicates:
                raise Exception(
                    f'Bundle paths contained {len(duplicates)} duplicates: {duplicates}'
                )

        with CodeTimer('Finalize output'):
            contents = sorted(contents, key=lambda obj: obj.path)

        return contents
Beispiel #12
0
 def humanize(cls, value):
     return naturalsize(value, binary=True)
Beispiel #13
0
def overview(request, format=None):
	#External Ceph REST API calls
	status_response, cluster_status = get_data.status(body='json')
	health_response, cluster_health = get_data.health(body='json')
	pg_response, pg_stat = get_data.pg_stat(body='json')
	osd_response, osd_dump = get_data.osd_dump(body='json')
	osd_perf_response, osd_perf = get_data.osd_perf(body='json')
	
	#General Health status lookup
	health_lookup = {'HEALTH_OK':"OK", 'HEALTH_WARN':"WARN"}
	# pg states lookups
	pg_warn_status = re.compile("(creating|degraded|replay|splitting|scrubbing|repair|recovering|backfill|wait-backfill|remapped)")
	pg_crit_status = re.compile("(down|inconsistent|incomplete|stale|peering)")
	pg_ok_status = re.compile("(active+clean)")
	pg_health_lookup = lambda x: "ok" if pg_ok_status.search(x) else "warn" if pg_warn_status.search(x) else "crit" if pg_warn_status.search(x)	else "na"
	#osd states lookups
	osd_up = re.compile("(?=.*exists)(?=.*up)")
	osd_down = re.compile("(?=.*exists)(?=.*autoout)")
	osd_health_lookup = lambda x: "ok" if osd_up.search(str(x)) else "warn" if osd_down.search(str(x)) else "crit"

	#pg activities lookups
	pg_activities_lookup = {'read_bytes_sec':"Read", 'write_bytes_sec':"write", 'op_per_sec': "Ops", 
		'recovering_objects_per_sec': "Recovering Objects", 'recovering_bytes_per_sec': 'Recovery Speed',
		'recovering_keys_per_sec': 'Recovering Keys'}
	#for testing since I have no activity
	#pg_activities_lookup = {'read_bytes_sec':"Read", 'bytes_total' : "Bytes", 'degraded_objects' : "Degraded"}


	#mon status data
	mons_status = filter(lambda x: x['health'] in health_lookup, 
		cluster_status['output']['health']['health']['health_services'][0]['mons'])
	mon_count = len(cluster_status['output']['monmap']['mons'])

	#pg status data
	pg_status = cluster_status['output']['pgmap']['pgs_by_state']

	#osd status data
	osd_status = osd_dump['output']['osds']

	#pg activities data
	pg_activities = cluster_status['output']['pgmap']


	#usage data
	usage_bytes_used = cluster_status['output']['pgmap']['bytes_used']
	usage_bytes_total = cluster_status['output']['pgmap']['bytes_total']
	usage_data_total = filesize.naturalsize(usage_bytes_total, binary=True).split()[0]
	usage_data_scale = filesize.naturalsize(usage_bytes_total).split()[1]
	response = {'health':{
					'clusterHealth': {
						'status': cluster_health['output']['overall_status'],
						'statusDescription': (lambda x, y: "CRIT" if x not in y else y[x]
							)(cluster_health['output']['overall_status'], health_lookup)
					}
				},
				'status':{
					'mons': dict(({'ok':0,'warn':0,'crit':mon_count-len(mons_status)}).items() + 
								Counter(map(lambda x: health_lookup[x['health']].lower(), mons_status)).items())
					,
					'pgs': dict(({'ok':0,'warn':0,'crit':0}).items() + 
							reduce(lambda y,z: y.update(z),
								map(lambda x: {pg_health_lookup(x['state_name']).lower(): x['count']}, pg_status)).items())
					,
					'osds': dict(({'ok':0,'warn':0,'crit':mon_count-len(mons_status)}).items() +
								Counter(map(lambda x: osd_health_lookup(x['state']).lower(), osd_status)).items())
				},
				'usage':{
					'clusterBytesUsed':usage_bytes_used,
					'clusterBytesAvail':usage_bytes_total-usage_bytes_used,
					'clusterBytesTotal':usage_bytes_total,
					'clusterDataUsed': round(float(usage_bytes_used)/pow(1024, 
						filesize.suffixes['decimal'].index(usage_data_scale)+1), 1),
					'clusterDataAvail':round(float(usage_bytes_total-usage_bytes_used)/pow(1024, 
						filesize.suffixes['decimal'].index(usage_data_scale)+1), 1),
					'clusterDataTotal': usage_data_total,
					'clusterDataScale': usage_data_scale

				},
				'activities':reduce(lambda y, z: dict(y.items()+z.items()), 
					map(lambda x: {x: pg_activities.get(x)} if x in pg_activities else {},pg_activities_lookup)),
				'pg_states': reduce(lambda y, z: dict(y.items()+z.items()),
					map(lambda x: {x['state_name']:x['count']}, pg_status)),
				'osd_states': Counter(map(lambda x: '+'.join(x['state']), osd_status))
			}

	return Response(response)
Beispiel #14
0
def home(request):
    """
    Main dashboard, Overall cluster health and status
    """
    ceph = wrapper.CephWrapper(endpoint=settings.CEPH_BASE_URL)

    cresp, cluster_health = ceph.health(body="json")
    sresp, cluster_status = ceph.status(body="json")

    # Monitors
    all_mons = cluster_status["output"]["monmap"]["mons"]
    up_mons = cluster_status["output"]["health"]["timechecks"]["mons"]
    total_mon_count = len(all_mons)
    mons_ok = 0
    mons_warn = 0
    mons_crit = 0

    for mon in up_mons:
        if mon["health"] == "HEALTH_OK":
            mons_ok += 1
        else:
            mons_warn += 1

    mons_crit = total_mon_count - (mons_ok + mons_warn)

    # Activity
    pgmap = cluster_status["output"]["pgmap"]
    activities = {}
    if "read_bytes_sec" in pgmap:
        activities["Read"] = filesize.naturalsize(pgmap.get("read_bytes_sec"))
    if "write_bytes_sec" in pgmap:
        activities["Write"] = filesize.naturalsize(pgmap.get("write_bytes_sec"))
    if "op_per_sec" in pgmap:
        activities["Ops"] = pgmap.get("op_per_sec")
    if "recovering_objects_per_sec" in pgmap:
        activities["Recovering Objects"] = pgmap.get("recovering_objects_per_sec")
    if "recovering_bytes_per_sec" in pgmap:
        activities["Recovery Speed"] = filesize.naturalsize(pgmap.get("recovering_bytes_per_sec"))
    if "recovering_keys_per_sec" in pgmap:
        activities["Recovering Keys"] = pgmap.get("recovering_keys_per_sec")

    # Get a rough estimate of cluster free space. Is this accurate ?
    presp, pg_stat = ceph.pg_stat(body="json")
    bytes_total = cluster_status["output"]["pgmap"]["bytes_total"]
    bytes_used = cluster_status["output"]["pgmap"]["bytes_used"]

    data_avail, data_scale = filesize.naturalsize(bytes_total).split()
    scale = filesize.suffixes["decimal"].index(data_scale) + 1
    data_used = round(float(bytes_used) / pow(1024, scale), 1)

    # pgs
    pg_statuses = cluster_status["output"]["pgmap"]

    pg_ok = 0
    pg_warn = 0
    pg_crit = 0

    # pg states
    pg_warn_status = re.compile(
        "(creating|degraded|replay|splitting|scrubbing|repair|recovering|backfill|wait-backfill|remapped)"
    )
    pg_crit_status = re.compile("(down|inconsistent|incomplete|stale|peering)")

    for state in pg_statuses["pgs_by_state"]:
        if state["state_name"] == "active+clean":
            pg_ok = pg_ok + state["count"]

        elif pg_warn_status.search(state["state_name"]):
            pg_warn = pg_warn + state["count"]

        elif pg_crit_status.search(state["state_name"]):
            pg_crit = pg_crit + state["count"]

    # pg statuses
    pg_states = dict()

    for state in pg_statuses["pgs_by_state"]:
        pg_states[state["state_name"]] = state["count"]

    # osds
    dresp, osd_dump = ceph.osd_dump(body="json")
    osd_state = osd_dump["output"]["osds"]

    osds_ok = 0
    osds_warn = 0
    osds_crit = 0

    # Possible states are: exists, up, autoout, new, ???
    osd_up = re.compile("(?=.*exists)(?=.*up)")
    osd_down = re.compile("(?=.*exists)(?=.*autoout)")

    for osd_status in osd_state:
        if osd_up.search(str(osd_status["state"])):
            osds_ok += 1
        elif osd_down.search(str(osd_status["state"])):
            osds_warn += 1
        else:
            osds_crit += 1

    return render_to_response("dashboard.html", locals())
Beispiel #15
0
def overview(request, format=None):
    #External Ceph REST API calls
    status_response, cluster_status = get_data.status(body='json')
    health_response, cluster_health = get_data.health(body='json')
    pg_response, pg_stat = get_data.pg_stat(body='json')
    osd_response, osd_dump = get_data.osd_dump(body='json')
    osd_perf_response, osd_perf = get_data.osd_perf(body='json')

    #General Health status lookup
    health_lookup = {'HEALTH_OK': "OK", 'HEALTH_WARN': "WARN"}
    # pg states lookups
    pg_warn_status = re.compile(
        "(creating|degraded|replay|splitting|scrubbing|repair|recovering|backfill|wait-backfill|remapped)"
    )
    pg_crit_status = re.compile("(down|inconsistent|incomplete|stale|peering)")
    pg_ok_status = re.compile("(active+clean)")
    pg_health_lookup = lambda x: "ok" if pg_ok_status.search(
        x) else "warn" if pg_warn_status.search(
            x) else "crit" if pg_warn_status.search(x) else "na"
    #osd states lookups
    osd_up = re.compile("(?=.*exists)(?=.*up)")
    osd_down = re.compile("(?=.*exists)(?=.*autoout)")
    osd_health_lookup = lambda x: "ok" if osd_up.search(str(
        x)) else "warn" if osd_down.search(str(x)) else "crit"

    #pg activities lookups
    pg_activities_lookup = {
        'read_bytes_sec': "Read",
        'write_bytes_sec': "write",
        'op_per_sec': "Ops",
        'recovering_objects_per_sec': "Recovering Objects",
        'recovering_bytes_per_sec': 'Recovery Speed',
        'recovering_keys_per_sec': 'Recovering Keys'
    }
    #for testing since I have no activity
    #pg_activities_lookup = {'read_bytes_sec':"Read", 'bytes_total' : "Bytes", 'degraded_objects' : "Degraded"}

    #mon status data
    mons_status = filter(
        lambda x: x['health'] in health_lookup, cluster_status['output']
        ['health']['health']['health_services'][0]['mons'])
    mon_count = len(cluster_status['output']['monmap']['mons'])

    #pg status data
    pg_status = cluster_status['output']['pgmap']['pgs_by_state']

    #osd status data
    osd_status = osd_dump['output']['osds']

    #pg activities data
    pg_activities = cluster_status['output']['pgmap']

    #usage data
    usage_bytes_used = cluster_status['output']['pgmap']['bytes_used']
    usage_bytes_total = cluster_status['output']['pgmap']['bytes_total']
    usage_data_total = filesize.naturalsize(usage_bytes_total,
                                            binary=True).split()[0]
    usage_data_scale = filesize.naturalsize(usage_bytes_total).split()[1]
    response = {
        'health': {
            'clusterHealth': {
                'status':
                cluster_health['output']['overall_status'],
                'statusDescription':
                (lambda x, y: "CRIT" if x not in y else y[x])(
                    cluster_health['output']['overall_status'], health_lookup)
            }
        },
        'status': {
            'mons':
            dict(({
                'ok': 0,
                'warn': 0,
                'crit': mon_count - len(mons_status)
            }).items() + Counter(
                map(lambda x: health_lookup[x['health']].lower(),
                    mons_status)).items()),
            'pgs':
            dict(({
                'ok': 0,
                'warn': 0,
                'crit': 0
            }).items() + reduce(
                lambda y, z: y.update(z),
                map(
                    lambda x:
                    {pg_health_lookup(x['state_name']).lower(): x['count']},
                    pg_status)).items()),
            'osds':
            dict(({
                'ok': 0,
                'warn': 0,
                'crit': mon_count - len(mons_status)
            }).items() + Counter(
                map(lambda x: osd_health_lookup(x['state']).lower(),
                    osd_status)).items())
        },
        'usage': {
            'clusterBytesUsed':
            usage_bytes_used,
            'clusterBytesAvail':
            usage_bytes_total - usage_bytes_used,
            'clusterBytesTotal':
            usage_bytes_total,
            'clusterDataUsed':
            round(
                float(usage_bytes_used) /
                pow(1024,
                    filesize.suffixes['decimal'].index(usage_data_scale) + 1),
                1),
            'clusterDataAvail':
            round(
                float(usage_bytes_total - usage_bytes_used) /
                pow(1024,
                    filesize.suffixes['decimal'].index(usage_data_scale) + 1),
                1),
            'clusterDataTotal':
            usage_data_total,
            'clusterDataScale':
            usage_data_scale
        },
        'activities':
        reduce(
            lambda y, z: dict(y.items() + z.items()),
            map(
                lambda x: {x: pg_activities.get(x)}
                if x in pg_activities else {}, pg_activities_lookup)),
        'pg_states':
        reduce(lambda y, z: dict(y.items() + z.items()),
               map(lambda x: {x['state_name']: x['count']}, pg_status)),
        'osd_states':
        Counter(map(lambda x: '+'.join(x['state']), osd_status))
    }

    return Response(response)
Beispiel #16
0
def home(request):
    """
    Main dashboard, Overall cluster health and status
    """
    ceph = wrapper.CephWrapper(endpoint=settings.CEPH_BASE_URL)

    cresp, cluster_health = ceph.health(body='json')
    sresp, cluster_status = ceph.status(body='json')

    # Monitors
    all_mons = cluster_status['output']['monmap']['mons']
    up_mons = cluster_status['output']['health']['timechecks']['mons']
    total_mon_count = len(all_mons)
    mons_ok = 0
    mons_warn = 0
    mons_crit = 0

    for mon in up_mons:
        if mon['health'] == "HEALTH_OK":
            mons_ok += 1
        else:
            mons_warn += 1

    mons_crit = total_mon_count - (mons_ok + mons_warn)

    # Activity
    pgmap = cluster_status['output']['pgmap']
    activities = {}
    if 'read_bytes_sec' in pgmap:
        activities['Read'] = filesize.naturalsize(pgmap.get('read_bytes_sec'))
    if 'write_bytes_sec' in pgmap:
        activities['Write'] = filesize.naturalsize(
            pgmap.get('write_bytes_sec'))
    if 'op_per_sec' in pgmap:
        activities['Ops'] = pgmap.get('op_per_sec')
    if 'recovering_objects_per_sec' in pgmap:
        activities['Recovering Objects'] = pgmap.get(
            'recovering_objects_per_sec')
    if 'recovering_bytes_per_sec' in pgmap:
        activities['Recovery Speed'] = filesize.naturalsize(
            pgmap.get('recovering_bytes_per_sec'))
    if 'recovering_keys_per_sec' in pgmap:
        activities['Recovering Keys'] = pgmap.get('recovering_keys_per_sec')

    # Get a rough estimate of cluster free space. Is this accurate ?
    presp, pg_stat = ceph.pg_stat(body='json')
    bytes_total = cluster_status['output']['pgmap']['bytes_total']
    bytes_used = cluster_status['output']['pgmap']['bytes_used']

    data_avail, data_scale = filesize.naturalsize(bytes_total).split()
    scale = filesize.suffixes['decimal'].index(data_scale) + 1
    data_used = round(float(bytes_used) / pow(1024, scale), 1)

    # pgs
    pg_statuses = cluster_status['output']['pgmap']

    pg_ok = 0
    pg_warn = 0
    pg_crit = 0

    # pg states
    pg_warn_status = re.compile(
        "(creating|degraded|replay|splitting|scrubbing|repair|recovering|backfill|wait-backfill|remapped)"
    )
    pg_crit_status = re.compile("(down|inconsistent|incomplete|stale|peering)")

    for state in pg_statuses['pgs_by_state']:
        if state['state_name'] == "active+clean":
            pg_ok = pg_ok + state['count']

        elif pg_warn_status.search(state['state_name']):
            pg_warn = pg_warn + state['count']

        elif pg_crit_status.search(state['state_name']):
            pg_crit = pg_crit + state['count']

    # pg statuses
    pg_states = dict()

    for state in pg_statuses['pgs_by_state']:
        pg_states[state['state_name']] = state['count']

    # osds
    dresp, osd_dump = ceph.osd_dump(body='json')
    osd_state = osd_dump['output']['osds']

    osds_ok = 0
    osds_warn = 0
    osds_crit = 0

    # Possible states are: exists, up, autoout, new, ???
    osd_up = re.compile("(?=.*exists)(?=.*up)")
    osd_down = re.compile("(?=.*exists)(?=.*autoout)")

    for osd_status in osd_state:
        if osd_up.search(str(osd_status['state'])):
            osds_ok += 1
        elif osd_down.search(str(osd_status['state'])):
            osds_warn += 1
        else:
            osds_crit += 1

    return render_to_response('dashboard.html', locals())
Beispiel #17
0
def home(request):
    """
    Main dashboard, Overall cluster health and status
    """
    get_data = wrapper.CephWrapper(endpoint=settings.CEPH_BASE_URL)

    cresp, cluster_health = get_data.health(body='json')
    sresp, cluster_status = get_data.status(body='json')

    # Monitors
    all_mons = cluster_status['output']['monmap']['mons']
    up_mons = cluster_status['output']['health']['timechecks']['mons']
    total_mon_count = len(all_mons)
    mons_ok = 0
    mons_warn = 0
    mons_crit = 0

    for mon in up_mons:
        if mon['health'] == "HEALTH_OK":
            mons_ok += 1
        else:
            mons_warn += 1

    mons_crit = total_mon_count - (mons_ok + mons_warn)

    # Activity
    pgmap = cluster_status['output']['pgmap']
    activities = {}
    if 'read_bytes_sec' in pgmap:
        activities['Read'] = filesize.naturalsize(pgmap.get('read_bytes_sec'))
    if 'write_bytes_sec' in pgmap:
        activities['Write'] = filesize.naturalsize(pgmap.get('write_bytes_sec'))
    if 'op_per_sec' in pgmap:
        activities['Ops'] = pgmap.get('op_per_sec')
    if 'recovering_objects_per_sec' in pgmap:
        activities['Recovering Objects'] = pgmap.get('recovering_objects_per_sec')
    if 'recovering_bytes_per_sec' in pgmap:
        activities['Recovery Speed'] = filesize.naturalsize(pgmap.get('recovering_bytes_per_sec'))
    if 'recovering_keys_per_sec' in pgmap:
        activities['Recovering Keys'] = pgmap.get('recovering_keys_per_sec')

    # Get a rough estimate of cluster free space. Is this accurate ?
    presp, pg_stat = get_data.pg_stat(body='json')
    bytes_total = cluster_status['output']['pgmap']['bytes_total']
    bytes_used = cluster_status['output']['pgmap']['bytes_used']

    data_avail, data_scale = filesize.naturalsize(bytes_total).split()
    scale = filesize.suffixes['decimal'].index(data_scale)+1
    data_used = round(float(bytes_used)/pow(1024, scale), 1)

    # pgs
    pg_statuses = cluster_status['output']['pgmap']

    pg_ok = 0
    pg_warn = 0
    pg_crit = 0

    # pg states
    pg_warn_status = re.compile("(creating|degraded|replay|splitting|scrubbing|repair|recovering|backfill|wait-backfill|remapped)")
    pg_crit_status = re.compile("(down|inconsistent|incomplete|stale|peering)")

    for state in pg_statuses['pgs_by_state']:
        if state['state_name'] == "active+clean":
            pg_ok = pg_ok + state['count']

        elif pg_warn_status.search(state['state_name']):
            pg_warn = pg_warn + state['count']

        elif pg_crit_status.search(state['state_name']):
            pg_crit = pg_crit + state['count']

    # pg statuses
    pg_states = dict()

    for state in pg_statuses['pgs_by_state']:
        pg_states[state['state_name']] = state['count']

    # osds
    dresp, osd_dump = get_data.osd_dump(body='json')
    osd_state = osd_dump['output']['osds']

    osds_ok = 0
    osds_warn = 0
    osds_crit = 0

    # Possible states are: exists, up, autoout, new, ???
    osd_up = re.compile("(?=.*exists)(?=.*up)")
    osd_down = re.compile("(?=.*exists)(?=.*autoout)")

    for osd_status in osd_state:
        if osd_up.search(str(osd_status['state'])):
            osds_ok += 1
        elif osd_down.search(str(osd_status['state'])):
            osds_warn += 1
        else:
            osds_crit += 1

    return render_to_response('dashboard.html', locals())
    def run_benchmark(self, app_label):
        logger.info('start running benchmark for {}'.format(app_label))
        # run from 1 to 500 sample block classes
        for num_models in xrange(0, self.num_models + 1, self.step_size):
            # starting at zero makes no sense at all but we want to have
            # the steps counted in an intuitive manner, so start with 1 here
            if num_models == 0:
                num_models = 1

            logger.info('generating models')
            self.runner.generate_models(num_models)
            logger.info('creating tables')
            self.runner.syncdb()

            bm_result = self.runner.create_result(num_models)
            bm_result.start = time()

            start = time()
            logger.info('creating models')

            container = self.runner.query_wrapper.create_query(
                app_label, num_models)

            bm_result.create_time_complete = time() - start

            bm_result.create_time_sql = sum(
                [float(q.get('time', 0)) for q in connection.queries]
            )

            query_times = []
            processing_times = []
            memory_rss = []
            memory_vrt = []
            for it in xrange(0, self.num_queries):
                # clear previously logged queries
                connection.queries = []

                # reset the value for monitored memory usage
                self.runner.monitor.reset_values()

                start = time()
                self.runner.query_wrapper.select_query(container.id)

                processing_times.append((time() - start))

                rss, vrt = self.runner.monitor.get_memory_values()
                memory_rss.append(rss)
                memory_vrt.append(vrt)

                query_time_sql = sum(
                    [float(q.get('time', 0)) for q in connection.queries]
                )
                query_times.append(query_time_sql)

            bm_result.end = time()

            num_queries = len(query_times)
            bm_result.query_time_sql = sum(query_times) / num_queries
            bm_result.query_time_complete = sum(processing_times) / num_queries

            bm_result.db_rss = sum(memory_rss) / len(memory_rss)
            bm_result.db_vrt = sum(memory_vrt) / len(memory_vrt)

            self.storage.save_result(bm_result)

            print ("{num_models}, {test_duration:.4f} s, "
                   "{create_time_sql:.4f} s, {create_time_complete:.4f} s, "
                   "{query_time_sql:.4f} s, {query_time_complete:.4f} s, "
                   "{db_rss}, {db_vrt}").format(
                num_models=num_models,
                test_duration=bm_result.end - bm_result.start,
                create_time_sql=bm_result.create_time_sql,
                create_time_complete=bm_result.create_time_complete,
                query_time_sql=bm_result.query_time_sql,
                query_time_complete=bm_result.query_time_complete,
                db_rss=filesize.naturalsize(bm_result.db_rss),
                db_vrt=filesize.naturalsize(bm_result.db_vrt),
            )
            self.runner.drop_all_tables()
Beispiel #19
0
#!/usr/bin/env python
import os
from datetime import datetime
from humanize import filesize

FOLDER = 'DATA'

FILE_NAME = 'alice.txt'

file_path = os.path.join(FOLDER, FILE_NAME)

print("file path:", file_path)

file_size = os.path.getsize(file_path)
print("file size:", filesize.naturalsize(file_size))

print(filesize.naturalsize(13029093033))

raw_timestamp = os.path.getmtime(file_path)
print("raw timestamp:", raw_timestamp)

timestamp = datetime.fromtimestamp(raw_timestamp)
print("timestamp:", timestamp)

print(os.path.dirname(file_path))
print(os.path.basename(file_path))
print(os.path.abspath(file_path))

for x in 'alice.txt', 'wombat.txt', 'mary.txt', 'koalas.txt':
    print(x, os.path.exists(os.path.join('DATA', x)))
print()