Beispiel #1
0
    def render(self):
        hostname = socket.gethostname()
        mem = human_size(psutil.TOTAL_PHYMEM, False)
        arch = platform.machine()
        disk = human_size(psutil.disk_usage("/").total)
        #        cpu = cmdexec("dmidecode -t4 | grep Version:")
        cpu = cmdexec('cat /proc/cpuinfo | grep "model name"')
        cpu = cpu[0].split("\n")
        cpu_descr = "%s x %s" % (cpu[0].split(":")[1], psutil.NUM_CPUS)

        boot = psutil.BOOT_TIME
        boot = datetime.datetime.fromtimestamp(int(boot)).strftime("%d/%m/%Y %H:%M:%S")

        try:
            manufacturer = cmdexec("dmidecode -t 2 | grep Manufacturer:")
            manufacturer = manufacturer[0].split("\n")
            manufacturer = manufacturer[0].split(":")[1]

            product_name = cmdexec('dmidecode -t 2 | grep "Product Name"')
            product_name = product_name[0].split("\n")
            product_name = product_name[0].split(":")[1]

            baseboard = manufacturer + " " + product_name
        except Exception, e:
            logging.debug(e)
            baseboard = "Não disponível"
Beispiel #2
0
 def render(self):
     y = yum.YumBase()
     installeds = []
     for pkg in y.doPackageLists('installed'):
         installeds.append({"group" : pkg.group,
                            "name" : pkg.name,
                            "summary" : pkg.summary,
                            "size" : human_size(pkg.size),
                            "packager" : pkg.packager})
         
     tmplist = sorted(installeds, key=lambda k: k['group']) 
     installeds = tmplist
                 
     return self.render_string("modules/packages.html", installeds=installeds)
Beispiel #3
0
    def render(self, kills=False):
        ps_list = []
        for ps in psutil.process_iter():
            ps_list.append(
                {
                    "name": ps.name,
                    "username": ps.username,
                    "cpu": "%0.2f" % ps.get_cpu_percent(interval=0),
                    "pid": ps.pid,
                    "memory": human_size(ps.get_memory_info().rss, True),
                }
            )
            ps_list.sort(reverse=True)

        return self.render_string("modules/process.html", ps_list=ps_list, kills=kills)
def main():
    arg_parser = argparse.ArgumentParser(
        description=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter)
    arg_parser.add_argument(
        "--doit",
        action="store_true",
        help="only with this flag, we will delete something")
    args = arg_parser.parse_args()

    relevant_train_setups = {}
    best_recog_scores = []  # list of (score, setup_name, ep)

    for setup_name, info in sorted(train_setups.items()):
        if not info["existing_setup"]:
            continue
        # It's not finished if there are still any suggestions to do recog on.
        if not train_setup_finished(info, allow_existing_recog=False):
            continue
        best_recog_score, best_recog_epochs = train_setup_get_best_recog(info)
        relevant_train_setups[setup_name] = info
        for ep in best_recog_epochs:
            best_recog_scores.append((best_recog_score, setup_name, ep))

    print("Out of the finished setups:")
    best_recog_scores.sort(reverse=not Settings.recog_score_lower_is_better)
    if best_recog_scores:
        print("Number of different available scores: %i" %
              len(best_recog_scores))
        keep_n_best = 10
        print("First %i scores:" % keep_n_best)
        for score, setup_name, ep in best_recog_scores[:keep_n_best]:
            score_str = "%.1f%% %s" % (score,
                                       Settings.recog_metric_name.upper())
            print("  %s, setup %s, epoch %i" % (score_str, setup_name, ep))
        score_str = "%.1f%% %s" % (best_recog_scores[-1][0],
                                   Settings.recog_metric_name.upper())
        print("Worst score: %s" % score_str)
        if len(best_recog_scores) < keep_n_best:
            cutoff_score = best_recog_scores[-1][0]
        else:
            cutoff_score = best_recog_scores[keep_n_best][0]
        score_str = "%.1f%% %s" % (cutoff_score,
                                   Settings.recog_metric_name.upper())
        print(
            "Keep everything for setups with a score better or equal than %s."
            % score_str)
    else:
        print("No recog scores found?")
        cutoff_score = None

    total_fns_to_delete = []
    total_file_size = 0

    for setup_name, info in sorted(relevant_train_setups.items()):
        assert isinstance(setup_name, str) and isinstance(info, dict)
        best_recog_score, best_recog_epochs = train_setup_get_best_recog(info)
        below_cutoff = False
        if best_recog_epochs:
            if Settings.recog_score_lower_is_better:
                if best_recog_score <= cutoff_score:
                    below_cutoff = True
            else:
                if best_recog_score >= cutoff_score:
                    below_cutoff = True
        if below_cutoff:
            # print("setup %s with score %.1f below cutoff, skip" % (setup_name, best_recog_score))
            continue
        if best_recog_score is None:
            best_recog_score = float(
                "inf") if Settings.recog_score_lower_is_better else float(
                    "-inf")
        # TODO: by default, keep nothing.
        #   have clever logic when to keep. e.g. when this is used by other import (import_model_train_epoch1)...
        # Keep the best epochs.
        # If there are no recogs at all, it means we stopped it, usually because it was bad -> cleanup all.
        keep_epochs = list(best_recog_epochs)
        for epoch in ast.literal_eval(info["_multisetup_info"].get(
                "keep_epochs", "[]")):
            assert isinstance(epoch, int)
            keep_epochs.append(epoch)
        delete_epochs = find_models(info)
        for epoch in keep_epochs:
            if epoch in delete_epochs:
                del delete_epochs[epoch]
        if not delete_epochs:  # already cleaned up
            continue
        print("finished setup:", setup_name)
        if keep_epochs:
            print("  best recog: %.1f%% %s, epochs to keep %r" %
                  (best_recog_score, Settings.recog_metric_name.upper(),
                   keep_epochs))
        else:
            print("  (no recog, model was stopped, probably bad)")
        fns_to_delete = []
        for _, fns in sorted(delete_epochs.items(), key=models_sort_key):
            fns_to_delete.extend(fns)
        file_size = 0
        for fn in fns_to_delete:
            file_size += os.path.getsize(fn)
        print("  delete models:",
              sorted(delete_epochs.keys(), key=models_sort_key),
              human_size(file_size))
        total_fns_to_delete.extend(fns_to_delete)
        total_file_size += file_size

    print("Total files to delete: %i num, %s" %
          (len(total_fns_to_delete), human_size(total_file_size)))
    if args.doit:
        print("Deleting now!")
        for fn in total_fns_to_delete:
            os.remove(fn)
        print("Done.")
    else:
        print("Not deleting now. Use --doit.")