예제 #1
0
def main():

    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--data', action='store', help='', metavar='FILE')
    parser.add_argument('--scratch',
                        action='store',
                        help='',
                        metavar="DIR",
                        default="_tmp_")
    parser.add_argument('-j',
                        '--procs',
                        action='store',
                        help='',
                        type=int,
                        metavar='int',
                        default=0)
    args = parser.parse_args()

    if args.scratch[-1] != "/":
        args.scratch += "/"

    header, data = read_csv(args.data, read_header=True)
    data = clean_data(data)

    misc.save_obj(args.scratch + "molecule_data", data)
    misc.save_json(args.scratch + "molecule_data", data)

    return
예제 #2
0
def save_all_proposals(where="/mnt/datasets/public/issam/VOCdevkit/"\
                "proposals/sharpmask/pascal_proposals/",
                path="pascal_val2007"):
    if 1:
        import glob

        loc = "/mnt/datasets/public/issam/VOCdevkit/proposals/"\
                             "sharpmask/{}/jsons".format(path)

        proposals_dict = {}

        jsonList = glob.glob(loc + "/*.json")

        for json in jsonList:
            proposals = ms.load_json(json)
            n = len(proposals)
            for i in range(n):
                print(str(i) + "/" + str(n) + " proposals")

                image_id = proposals[i]["image_id"]

                if image_id in proposals_dict:
                    proposals_dict[image_id] += [proposals[i]]
                else:
                    proposals_dict[image_id] = [proposals[i]]

        n = len(proposals_dict)
        for j, image_id in enumerate(proposals_dict):
            print(str(j) + "/" + str(n))
            ms.save_json(where + "{}.json".format(str(image_id)),
                         proposals_dict[image_id])
예제 #3
0
def test_run(main_dict, metric_name, save, reset, predict_proposal=None):
    if predict_proposal is None:
        predict_proposal = ""

    history = ms.load_history(main_dict)

    if history is None:
        best_epoch = 0
    else:
        best_epoch = history["best_model"]["epoch"]

    fname = main_dict["path_save"] + "/test_{}{}_{}.json".format(
        predict_proposal, metric_name, best_epoch)
    print("Testing: {} - {} - {} - {} - best epoch: {}".format(
        main_dict["dataset_name"], main_dict["config_name"],
        main_dict["loss_name"], metric_name, best_epoch))

    if not os.path.exists(fname) or reset == "reset":
        with torch.no_grad():
            score = ms.val_test(main_dict,
                                metric_name=metric_name,
                                n_workers=1)
        ms.save_json(fname, score)

    else:
        score = ms.load_json(fname)

    return score[metric_name]
예제 #4
0
    def __init__(self, batch):
        # if dataset_name == "pascal":
        self.proposals_path = batch["proposals_path"][0]

        if "SharpProposals_name" in batch:
            batch_name = batch["SharpProposals_name"][0]
        else:
            batch_name = batch["name"][0]
        name_jpg = self.proposals_path + "{}.jpg.json".format(batch_name)
        name_png = self.proposals_path + "{}.json".format(batch_name)

        if os.path.exists(name_jpg):
            name = name_jpg
        else:
            name = name_png

        _, _, self.h, self.w = batch["images"].shape

        if "resized" in batch and batch["resized"].item() == 1:
            name_resized = self.proposals_path + "{}_{}_{}.json".format(
                batch["name"][0], self.h, self.w)

            if not os.path.exists(name_resized):
                proposals = ms.load_json(name)
                json_file = loop_and_resize(self.h, self.w, proposals)
                ms.save_json(name_resized, json_file)
        else:
            name_resized = name
        # name_resized = name
        proposals = ms.load_json(name_resized)
        self.proposals = sorted(proposals,
                                key=lambda x: x["score"],
                                reverse=True)
예제 #5
0
def evaluateAnnList(annList, visualize=False):
    path_base = "/mnt/datasets/public/issam/VOCdevkit/annotations/"
    fname = "{}/instances_val2012.json".format(path_base)

    cocoGt = COCO(fname)
    ms.save_json("tmp.json", annList)
    cocoDt = cocoGt.loadRes("tmp.json")

    cocoEval = COCOeval(cocoGt, cocoDt, "segm")

    # cocoEval = COCOeval(cocoGt, cocoDt, annType)
    cocoEval.params.imgIds = list(
        set([v["image_id"] for v in cocoDt.anns.values()]))

    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()

    print("# images:", len(cocoEval.params.imgIds))

    if visualize:
        for image_id in cocoDt.getImgIds():
            visGT(cocoGt, cocoDt, image_id)

    return cocoEval, cocoDt
예제 #6
0
def text(bot, update):

    log.debug('Handling `text` command. Update: %s', update)

    from_id = str(update.effective_user.id)
    token = settings.USERS.get(from_id, None)

    if token is None:
        update.message.reply_text('Извините, вы не наш клиент :(')
        return

    if from_id not in lul_clients:
        lul_clients[from_id] = LogulifeClient(token=token)

    client = lul_clients[from_id]

    if update.message is None:  # this is update
        try:
            client.records.update_by_ext_id(settings.SOURCE_NAME,
                                            update.edited_message.message_id,
                                            update.edited_message.text)
            update.edited_message.reply_text(messages.OK_UPDATE)
        except lul_exceptions.NotFoundException as exc:
            log.debug(exc)
            update.edited_message.reply_text(messages.NOT_FOUND)
        except Exception as exc:
            log.error(exc)
            save_msg = '{0}\n{1}'.format(str(exc), update.to_json())
            misc.save_json(update.update_id, save_msg)
            update.edited_message.reply_text(messages.OK_UPDATE)
            # Сообщение админу
            bot.send_message(
                settings.ADMIN_ACCOUNT,
                'Запись с id={0} у пользователя {1} не была ' \
                'обновлена. Ошибка: {2}'.format(
                    update.update_id,
                    update.effective_user.username,
                    str(exc)))
    else:
        try:
            client.records.create(update.message.text, settings.SOURCE_NAME,
                                  update.message.message_id,
                                  update.message.date)
        except Exception as exc:
            log.error(exc)
            save_msg = '{0}\n{1}'.format(str(exc), update.to_json())
            misc.save_json(update.update_id, save_msg)
            # Сообщение админу
            bot.send_message(
                settings.ADMIN_ACCOUNT,
                'Запись с id={0} у пользователя {1} не была ' \
                'сохранена. Ошибка: {2}'.format(
                    update.update_id,
                    update.effective_user.username,
                    str(exc)))

        update.message.reply_text(messages.OK)
예제 #7
0
def main():

    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--scratch',
                        action='store',
                        help='',
                        metavar="DIR",
                        default="_tmp_")
    parser.add_argument('--json', action='store', help='', metavar="FILE")
    parser.add_argument('-j',
                        '--procs',
                        action='store',
                        help='pararallize',
                        metavar="int",
                        default=0,
                        type=int)

    args = parser.parse_args()

    if args.scratch[-1] != "/":
        args.scratch += "/"

    data = misc.load_json(args.json)

    keys = data.keys()
    keys = list(keys)

    canonical_data = {}

    for key in keys:

        molobj, status = cheminfo.smiles_to_molobj(key)

        if molobj is None:
            print("error none mol:", key)
            continue

        smiles = cheminfo.molobj_to_smiles(molobj, remove_hs=True)

        if "." in smiles:
            print("error multi mol:", smiles)
            continue

        atoms = cheminfo.molobj_to_atoms(molobj)

        if not is_mol_allowed(atoms):
            print("error heavy mol:", smiles)
            continue

        canonical_data[smiles] = data[key]

    misc.save_json(args.scratch + "molecule_data", canonical_data)
    misc.save_obj(args.scratch + "molecule_data", canonical_data)

    return
예제 #8
0
def main():

    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--scratch', action='store', help='', metavar="DIR", default="_tmp_")
    parser.add_argument('--sdf', action='store', help='', metavar="FILE", nargs="+")
    parser.add_argument('-j', '--procs', action='store', help='pararallize', metavar="int", default=0, type=int)

    args = parser.parse_args()

    if args.scratch[-1] != "/":
        args.scratch += "/"

    if args.procs == -1:
        args.procs = os.cpu_count()
        print("starting", args.procs, "procs")

    # fsdf = gzip.open(args.scratch + "structures.sdf.gz", 'w')
    # fprop = open(args.scratch + "properties.csv", 'w')
    mol_val_dict = {}

    for sdf in args.sdf:

        print("reading", sdf)

        molobjs, values = parse_ochem(sdf, debug=True, procs=args.procs)

        for molobj, value in zip(molobjs, values):

            smiles = cheminfo.molobj_to_smiles(molobj, remove_hs=True)

            if "smiles" not in mol_val_dict:
                mol_val_dict[smiles] = []
            else:
                print("duplicate", smiles)

            mol_val_dict[smiles].append(value)

            # sdfstr = cheminfo.molobj_to_sdfstr(molobj)
            # sdfstr += "$$$$\n"
            #
            # propstr = "{:} {:}\n".format(value, 0.0)
            # fprop.write(propstr)

    # fsdf.close()
    # fprop.close()

    keys = mol_val_dict.keys()
    print("TOTAL ITEMS", len(keys))

    misc.save_json(args.scratch + "molecule_data", mol_val_dict)
    misc.save_obj(args.scratch + "molecule_data", mol_val_dict)

    return
예제 #9
0
def load_predAnnList(main_dict, predict_method, imageList=None, 
                     proposal_type="sharp", reset=None):
    predictList = ["BestObjectness", "UpperBound", "BestDice"]

    
    if predict_method not in predictList:
        raise ValueError("predict method should be in {}".format(predictList))
    dataset_name = main_dict["dataset_name"]
    base = "/mnt/projects/counting/Saves/main/"

    fname = base + "lcfcn_points/{}_{}_{}_annList.json".format(dataset_name, 
                                predict_method, proposal_type)




    if os.path.exists(fname) and reset != "reset":
        return ms.load_json(fname)

    else:
        if predict_method == "BestDice":
            model =  ms.load_best_model(main_dict)

        _, val_set = load_trainval(main_dict)

        loader = data.DataLoader(val_set, 
                       batch_size=1, 
                       num_workers=0, 
                       drop_last=False)

        # pointDict = load_LCFCNPoints(main_dict)

        annList = []
        for i, batch in enumerate(loader):
            print(i, "/", len(loader), " - annList")

            pointList = batch["lcfcn_pointList"]
            if len(pointList) == 0:
                continue

            if predict_method == "BestDice":
                pred_dict = model.predict(batch, predict_method="BestDice",
                                            proposal_type=proposal_type)
            else:
                pred_dict = eval("pointList2{}".format(predict_method))(pointList, batch, proposal_type)
            
            annList += pred_dict["annList"]

        ms.save_json(fname, annList)
        return annList
예제 #10
0
def copy_models(exp_dict, path_dst):
    history = load_history(exp_dict)

    # src_model, src_opt = ms.load_model_src(exp_dict)
    # tgt_model, tgt_opt, disc, disc_opt  = ms.load_model_tgt(exp_dict)

    # create_dirs(path_dst + "/tmp")
    # torch.save(src_model.state_dict(), path_dst+"/model_src.pth")
    # torch.save(src_opt.state_dict(), path_dst+"/opt_src.pth")

    # torch.save(tgt_model.state_dict(), path_dst+"/model_tgt.pth")
    # torch.save(tgt_opt.state_dict(), path_dst+"/opt_tgt.pth")

    # torch.save(disc.state_dict(), path_dst+"/disc.pth")
    # torch.save(disc_opt.state_dict(), path_dst+"/disc_opt.pth")

    ms.save_json(path_dst + "/history.json", history)

    print("copied...")
예제 #11
0
def clean_data(df, scratch):

    smiles = df.iloc[1]

    data = {}

    atom_types = []

    for index, row in df.iterrows():

        smi = row.smiles
        value = row.mpC + 273.15

        molobj, status = cheminfo.smiles_to_molobj(smi)

        if molobj is None:
            print("error:", smi)
            continue

        smi = cheminfo.molobj_to_smiles(molobj, remove_hs=True)

        # Atoms
        atoms = cheminfo.molobj_to_atoms(molobj)
        atom_types += list(atoms)

        if smi not in data:
            data[smi] = []

        data[smi].append(value)

    atom_types, counts = np.unique(atom_types, return_counts=True)

    for atom, count in zip(atom_types, counts):
        print(atom, count)

    misc.save_obj(scratch + "molecule_data", data)
    misc.save_json(scratch + "molecule_data", data)

    return
예제 #12
0
    def __init__(self,
                 root="",
                 split=None,
                 transform_function=None,
                 ratio=None,
                 year="2017"):
        super().__init__()
        fname = split

        if fname == "test":
            fname = "val"

        dataset_name = "COCO"

        if year == "2014":
            dataset_name = "COCO2014"

        self.n_classes = 81

        self.path = "/mnt/datasets/public/issam/{}/".format(dataset_name)
        self.proposals_path = "{}/ProposalsSharp/".format(self.path)
        self.split = split
        self.year = year
        self.transform_function = transform_function()
        fname_names = self.path + "/{}.json".format(self.split)
        fname_catids = self.path + "/{}_catids.json".format(self.split)
        fname_categories = self.path + "/categories.json"
        fname_ids = self.path + "/{}_ids.json".format(self.split)

        if os.path.exists(fname_names):

            self.image_names = ms.load_json(fname_names)
            self.catids = ms.load_json(fname_catids)
            self.categories = ms.load_json(fname_categories)
            self.ids = ms.load_json(fname_ids)
        else:
            # Save ids

            annFile = "{}/annotations/instances_{}{}.json".format(
                self.path, fname, year)
            self.coco = COCO(annFile)
            self.ids = list(self.coco.imgs.keys())

            self.image_names = []
            # Save Labels
            for index in range(len(self.ids)):
                print(index, "/", len(self.ids))
                img_id = self.ids[index]
                ann_ids = self.coco.getAnnIds(imgIds=img_id)
                annList = self.coco.loadAnns(ann_ids)
                name = self.coco.loadImgs(img_id)[0]['file_name']

                self.image_names += [name]
                ms.save_pkl(
                    self.path +
                    "/groundtruth/{}_{}.pkl".format(self.split, name), annList)

            ms.save_json(fname_names, self.image_names)

            # Catgory
            self.catids = self.coco.getCatIds()
            ms.save_json(fname_catids, self.catids)

            self.categories = []

            categories = self.coco.cats.values()

            for c in categories:
                c["id"] = self.category2label[c["id"]]
                self.categories += [c]
            ms.save_json(fname_categories, self.categories)

            ms.save_json(fname_ids, self.ids)

            if split == "val":
                # gt_annDict = ms.load_json(annFile)

                annDict = {}
                # fname_ann = '/mnt/datasets/public/issam/COCO2014//annotations/val_gt_annList.json'
                annDict["categories"] = self.categories
                annDict["images"] = self.coco.loadImgs(self.ids[:5000])

                annIDList = self.coco.getAnnIds(self.ids[:5000])
                annList = self.coco.loadAnns(annIDList)

                for p in annList:
                    # p["id"] = str(p["id"])
                    p["image_id"] = str(p["image_id"])
                    p["category_id"] = self.category2label[p["category_id"]]

                for p in annDict["images"]:
                    p["id"] = str(p["id"])
                annDict["annotations"] = annList

                ms.save_json(
                    '{}//annotations/val_gt_annList.json'.format(self.path),
                    annDict)

        self.category2label = {}
        self.label2category = {}

        for i, c in enumerate(self.catids):
            self.category2label[c] = i + 1
            self.label2category[i + 1] = c

        if split == "val":
            # gt_annList_path = '/mnt/datasets/public/issam/COCO2014//annotations/val_gt_annList.json'

            annList_path = self.path + "/annotations/{}_gt_annList.json".format(
                split)

            assert os.path.exists(annList_path)
            self.annList_path = annList_path

            # self.image_names.sort()
            self.image_names = self.image_names[:5000]
            self.ids = self.ids[:5000]

        elif split == "test":
            # self.image_names.sort()
            self.image_names = self.ids[-5000:]
예제 #13
0
        figName = "%s/pdf_plots/TGT_%s.pdf" % (path, exp_name)
        ms.create_dirs(figName)
        pp_main.fig.savefig(figName, dpi=600)

        print("saved {}".format(figName))
    ####################################################
    if args.mode == "test_model":
        OA = 0.
        AA = 0.
        Kappa = 0.
        for run in range(N_Runs):
            OA = OA + results[run]['OA']
            AA = AA + results[run]['AA']
            Kappa = Kappa + results[run]['Kappa']
        print("====================="
              "\nOvearll Accuracy {}\n"
              "=====================".format(OA / N_Runs))
        print("====================="
              "\nAverage Accuracy {}\n"
              "=====================".format(AA / N_Runs))
        print("====================="
              "\nKappa {}\n"
              "=====================".format(Kappa / N_Runs))
        result = {}
        result['OA'] = [OA / N_Runs]
        result['AA'] = [AA / N_Runs]
        result['Kappa'] = [Kappa / N_Runs]
        ms.save_json(
            'results/' + exp_name + '_center_{}_disc_{}.json'.format(
                exp_dict["options"]["center"], exp_dict["options"]["disc"]),
            result)
예제 #14
0
def load_gtAnnDict(main_dict, reset=None):
    reset = None
    _, val_set = load_trainval(main_dict)
    annList_path = val_set.annList_path

    if os.path.exists(annList_path) and reset != "reset":
        return ms.load_json(annList_path)

    else:        
        ann_json = {}
        ann_json["categories"] = val_set.categories
        ann_json["type"] = "instances"


        # Images
        imageList = []
        annList = []
        id = 1

        for i in range(len(val_set)):
            print("{}/{}".format(i, len(val_set)))
            batch = val_set[i]

            image_id = batch["name"]

            height, width = batch["images"].shape[-2:]
            imageList += [{"file_name":batch["name"],
                          "height":height,
                          "width":width,
                          "id":batch["name"]}]

            maskObjects = batch["maskObjects"]
            maskClasses = batch["maskClasses"]
            n_objects = maskObjects[maskObjects!=255].max().item()
            
            for obj_id in range(1, n_objects+1):
                if obj_id == 0:
                    continue

                binmask = (maskObjects == obj_id)
                segmentation = maskUtils.encode(np.asfortranarray(ms.t2n(binmask))) 
                segmentation["counts"] = segmentation["counts"].decode("utf-8")
                uniques = (binmask.long()*maskClasses).unique()
                uniques = uniques[uniques!=0]
                assert len(uniques) == 1

                category_id = uniques[0].item()
                
                annList += [{"segmentation":segmentation,
                              "iscrowd":0,
                              # "bbox":maskUtils.toBbox(segmentation).tolist(),
                              "area":int(maskUtils.area(segmentation)),
                              "id":id,
                             "image_id":image_id,
                             "category_id":category_id}]
                id += 1

        ann_json["annotations"] = annList
        ann_json["images"] = imageList

        ms.save_json(annList_path, ann_json)

        # Save dummy results
        anns = ms.load_json(annList_path)
        fname_dummy = annList_path.replace(".json","_best.json")
        annList = anns["annotations"]
        for a in annList:
            a["score"] = 1

        ms.save_json(fname_dummy, annList)
예제 #15
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument('-e', '--exp')
    parser.add_argument('-b', '--borgy', default=0, type=int)
    parser.add_argument('-br', '--borgy_running', default=0, type=int)
    parser.add_argument('-m', '--mode', default="summary")
    parser.add_argument('-r', '--reset', default="None")
    parser.add_argument('-s', '--status', type=int, default=0)
    parser.add_argument('-k', '--kill', type=int, default=0)
    parser.add_argument('-g', '--gpu', type=int)
    parser.add_argument('-c', '--configList', nargs="+", default=None)
    parser.add_argument('-l', '--lossList', nargs="+", default=None)
    parser.add_argument('-d', '--datasetList', nargs="+", default=None)
    parser.add_argument('-metric', '--metricList', nargs="+", default=None)
    parser.add_argument('-model', '--modelList', nargs="+", default=None)
    parser.add_argument('-p', '--predictList', nargs="+", default=None)

    args = parser.parse_args()

    if args.borgy or args.kill:
        global_prompt = input("Do all? \n(y/n)\n")

    # SEE IF CUDA IS AVAILABLE
    assert torch.cuda.is_available()
    print("CUDA: %s" % torch.version.cuda)
    print("Pytroch: %s" % torch.__version__)

    mode = args.mode
    exp_name = args.exp

    exp_dict = experiments.get_experiment_dict(args, exp_name)

    pp_main = None
    results = {}

    # Get Main Class
    project_name = os.path.realpath(__file__).split("/")[-2]
    MC = ms.MainClass(path_models="models",
                      path_datasets="datasets",
                      path_metrics="metrics/metrics.py",
                      path_losses="losses/losses.py",
                      path_samplers="addons/samplers.py",
                      path_transforms="addons/transforms.py",
                      path_saves="/mnt/projects/counting/Saves/main/",
                      project=project_name)

    key_set = set()
    for model_name, config_name, metric_name, dataset_name, loss_name in product(
            exp_dict["modelList"], exp_dict["configList"],
            exp_dict["metricList"], exp_dict["datasetList"],
            exp_dict["lossList"]):

        # if model_name in ["LC_RESFCN"]:
        #   loss_name = "water_loss"

        config = configs.get_config_dict(config_name)

        key = ("{} - {} - {}".format(model_name, config_name, loss_name),
               "{}_({})".format(dataset_name, metric_name))

        if key in key_set:
            continue

        key_set.add(key)

        main_dict = MC.get_main_dict(mode, dataset_name, model_name,
                                     config_name, config, args.reset,
                                     exp_dict["epochs"], metric_name,
                                     loss_name)
        main_dict["predictList"] = exp_dict["predictList"]

        if mode == "paths":
            print("\n{}_({})".format(dataset_name, model_name))
            print(main_dict["path_best_model"])
            # print( main_dict["exp_name"])

        predictList_str = ' '.join(exp_dict["predictList"])

        if args.status:
            results[key] = borgy.borgy_status(mode, config_name, metric_name,
                                              model_name, dataset_name,
                                              loss_name, args.reset,
                                              predictList_str)

            continue

        if args.kill:
            results[key] = borgy.borgy_kill(mode, config_name, metric_name,
                                            model_name, dataset_name,
                                            loss_name, args.reset,
                                            predictList_str)
            continue

        if args.borgy:
            results[key] = borgy.borgy_submit(project_name, global_prompt,
                                              mode, config_name, metric_name,
                                              model_name, dataset_name,
                                              loss_name, args.reset,
                                              predictList_str)

            continue

        if mode == "debug":
            debug.debug(main_dict)

        if mode == "validate":
            validate.validate(main_dict)
        if mode == "save_gam_points":
            train_set, _ = au.load_trainval(main_dict)
            model = ms.load_best_model(main_dict)
            for i in range(len(train_set)):
                print(i, "/", len(train_set))
                batch = ms.get_batch(train_set, [i])
                fname = train_set.path + "/gam_{}.pkl".format(
                    batch["index"].item())
                points = model.get_points(batch)
                ms.save_pkl(fname, points)
            import ipdb
            ipdb.set_trace()  # breakpoint ee49ab9f //

        if mode == "save_prm_points":
            train_set, _ = au.load_trainval(main_dict)
            model = ms.load_best_model(main_dict)
            for i in range(len(train_set)):
                print(i, "/", len(train_set))
                batch = ms.get_batch(train_set, [i])

                fname = "{}/prm{}.pkl".format(batch["path"][0],
                                              batch["name"][0])
                points = model.get_points(batch)
                ms.save_pkl(fname, points)
            import ipdb
            ipdb.set_trace()  # breakpoint 679ce152 //

            # train_set, _ = au.load_trainval(main_dict)
            # model = ms.load_best_model(main_dict)
            # for i in range(len(train_set)):
            #   print(i, "/", len(train_set))
            #   batch = ms.get_batch(train_set, [i])
            #   fname = train_set.path + "/gam_{}.pkl".format(batch["index"].item())
            #   points = model.get_points(batch)
            #   ms.save_pkl(fname, points)

        # if mode == "pascal_annList":
        #   data_utils.pascal2lcfcn_points(main_dict)
        if mode == "upperboundmasks":
            import ipdb
            ipdb.set_trace()  # breakpoint 02fac8ce //

            results = au.test_upperboundmasks(main_dict, reset=args.reset)
            print(pd.DataFrame(results))

        if mode == "model":

            results = au.test_model(main_dict, reset=args.reset)
            print(pd.DataFrame(results))

        if mode == "upperbound":
            results = au.test_upperbound(main_dict, reset=args.reset)

            print(pd.DataFrame(results))

        if mode == "MUCov":
            gtAnnDict = au.load_gtAnnDict(main_dict, reset=args.reset)

            # model = ms.load_best_model(main_dict)
            fname = main_dict["path_save"] + "/pred_annList.pkl"
            if not os.path.exists(fname):
                _, val_set = au.load_trainval(main_dict)
                model = ms.load_best_model(main_dict)
                pred_annList = au.dataset2annList(model,
                                                  val_set,
                                                  predict_method="BestDice",
                                                  n_val=None)
                ms.save_pkl(fname, pred_annList)

            else:
                pred_annList = ms.load_pkl(fname)
            import ipdb
            ipdb.set_trace()  # breakpoint 527a7f36 //
            pred_annList = au.load_predAnnList(main_dict,
                                               predict_method="BestObjectness")
            # 0.31 best objectness pred_annList =
            # 0.3482122335421256
            # au.get_MUCov(gtAnnDict, pred_annList)
            au.get_SBD(gtAnnDict, pred_annList)

        if mode == "dic_sbd":
            import ipdb
            ipdb.set_trace()  # breakpoint 4af08a17 //

        if mode == "point_mask":
            from datasets import base_dataset

            import ipdb
            ipdb.set_trace()  # breakpoint 7fd55e0c //
            _, val_set = ms.load_trainval(main_dict)
            batch = ms.get_batch(val_set, [1])
            model = ms.load_best_model(main_dict)
            pred_dict = model.LCFCN.predict(batch)
            # ms.pretty_vis(batch["images"], base_dataset.batch2annList(batch))
            ms.images(ms.pretty_vis(
                batch["images"],
                model.LCFCN.predict(batch,
                                    predict_method="original")["annList"]),
                      win="blobs")
            ms.images(ms.pretty_vis(batch["images"],
                                    base_dataset.batch2annList(batch)),
                      win="erww")
            ms.images(batch["images"],
                      batch["points"],
                      denorm=1,
                      enlarge=1,
                      win="e21e")
            import ipdb
            ipdb.set_trace()  # breakpoint ab9240f0 //

        if mode == "lcfcn_output":
            import ipdb
            ipdb.set_trace()  # breakpoint 7fd55e0c //

            gtAnnDict = au.load_gtAnnDict(main_dict, reset=args.reset)

        if mode == "load_gtAnnDict":
            _, val_set = au.load_trainval(main_dict)
            gtAnnDict = au.load_gtAnnDict(val_set)

            # gtAnnClass = COCO(gtAnnDict)
            # au.assert_gtAnnDict(main_dict, reset=None)
            # _,val_set = au.load_trainval(main_dict)
            # annList_path = val_set.annList_path

            # fname_dummy = annList_path.replace(".json","_best.json")
            # predAnnDict = ms.load_json(fname_dummy)
            import ipdb
            ipdb.set_trace()  # breakpoint 100bfe1b //
            pred_annList = ms.load_pkl(main_dict["path_best_annList"])
            # model = ms.load_best_model(main_dict)
            _, val_set = au.load_trainval(main_dict)
            batch = ms.get_batch(val_set, [1])

            import ipdb
            ipdb.set_trace()  # breakpoint 2310bb33 //
            model = ms.load_best_model(main_dict)
            pred_dict = model.predict(batch, "BestDice", "mcg")
            ms.images(batch["images"],
                      au.annList2mask(pred_dict["annList"])["mask"],
                      denorm=1)
            # pointList2UpperBoundMCG
            pred_annList = au.load_predAnnList(main_dict,
                                               predict_method="BestDice",
                                               proposal_type="mcg",
                                               reset="reset")
            # annList = au.pointList2UpperBoundMCG(batch["lcfcn_pointList"], batch)["annList"]
            ms.images(batch["images"],
                      au.annList2mask(annList)["mask"],
                      denorm=1)
            pred_annList = au.load_BestMCG(main_dict, reset="reset")
            # pred_annList = au.dataset2annList(model, val_set,
            #                   predict_method="BestDice",
            #                   n_val=None)
            au.get_perSizeResults(gtAnnDict, pred_annList)

        if mode == "vis":
            _, val_set = au.load_trainval(main_dict)
            batch = ms.get_batch(val_set, [3])

            import ipdb
            ipdb.set_trace()  # breakpoint 05e6ef16 //

            vis.visBaselines(batch)

            model = ms.load_best_model(main_dict)
            vis.visBlobs(model, batch)

        if mode == "qual":
            model = ms.load_best_model(main_dict)
            _, val_set = au.load_trainval(main_dict)
            path = "/mnt/home/issam/Summaries/{}_{}".format(
                dataset_name, model_name)
            try:
                ms.remove_dir(path)
            except:
                pass
            n_images = len(val_set)
            base = "{}_{}".format(dataset_name, model_name)
            for i in range(50):
                print(i, "/10", "- ", base)
                index = np.random.randint(0, n_images)
                batch = ms.get_batch(val_set, [index])
                if len(batch["lcfcn_pointList"]) == 0:
                    continue
                image = vis.visBlobs(model, batch, return_image=True)

                # image_baselines = vis.visBaselines(batch, return_image=True)
                # imgAll = np.concatenate([image, image_baselines], axis=1)

                fname = path + "/{}_{}.png".format(i, base)
                ms.create_dirs(fname)
                ms.imsave(fname, image)

        if mode == "test_baselines":
            import ipdb
            ipdb.set_trace()  # breakpoint b51c5b1f //
            results = au.test_baselines(main_dict, reset=args.reset)
            print(pd.DataFrame(results))

        if mode == "test_best":
            au.test_best(main_dict)

        if mode == "qualitative":
            au.qualitative(main_dict)

        if mode == "figure1":
            from PIL import Image
            from addons import transforms
            model = ms.load_best_model(main_dict)
            _, val_set = au.load_trainval(main_dict)
            # proposals_path = "/mnt/datasets/public/issam/Cityscapes/demoVideo/leftImg8bit/demoVideo/ProposalsSharp/"
            # vidList = glob("/mnt/datasets/public/issam/Cityscapes/demoVideo/leftImg8bit/demoVideo/stuttgart_01/*")
            # vidList.sort()

            # pretty_image = ms.visPretty(model, batch = ms.get_batch(val_set, [i]), with_void=1, win="with_void")
            batch = ms.get_batch(val_set, [68])
            bestdice = ms.visPretty(model,
                                    batch=batch,
                                    with_void=0,
                                    win="no_void")
            blobs = ms.visPretty(model,
                                 batch=batch,
                                 predict_method="blobs",
                                 with_void=0,
                                 win="no_void")

            ms.images(bestdice, win="BestDice")
            ms.images(blobs, win="Blobs")
            ms.images(batch["images"], denorm=1, win="Image")
            ms.images(batch["images"],
                      batch["points"],
                      enlarge=1,
                      denorm=1,
                      win="Points")
            import ipdb
            ipdb.set_trace()  # breakpoint cf4bb3d3 //

        if mode == "video2":
            from PIL import Image
            from addons import transforms
            model = ms.load_best_model(main_dict)
            _, val_set = au.load_trainval(main_dict)
            # proposals_path = "/mnt/datasets/public/issam/Cityscapes/demoVideo/leftImg8bit/demoVideo/ProposalsSharp/"
            # vidList = glob("/mnt/datasets/public/issam/Cityscapes/demoVideo/leftImg8bit/demoVideo/stuttgart_01/*")
            # vidList.sort()
            index = 0
            for i in range(len(val_set)):

                # pretty_image = ms.visPretty(model, batch = ms.get_batch(val_set, [i]), with_void=1, win="with_void")
                batch = ms.get_batch(val_set, [i])
                pretty_image = ms.visPretty(model,
                                            batch=batch,
                                            with_void=0,
                                            win="no_void")
                # pred_dict = model.predict(batch, predict_method="BestDice")
                path_summary = main_dict["path_summary"]
                ms.create_dirs(path_summary + "/tmp")
                ms.imsave(
                    path_summary + "vid_mask_{}.png".format(index),
                    ms.get_image(batch["images"],
                                 batch["points"],
                                 enlarge=1,
                                 denorm=1))
                index += 1
                ms.imsave(path_summary + "vid_mask_{}.png".format(index),
                          pretty_image)
                index += 1
                # ms.imsave(path_summary+"vid1_full_{}.png".format(i), ms.get_image(img, pred_dict["blobs"], denorm=1))
                print(i, "/", len(val_set))

        if mode == "video":
            from PIL import Image
            from addons import transforms
            model = ms.load_best_model(main_dict)
            # _, val_set = au.load_trainval(main_dict)
            proposals_path = "/mnt/datasets/public/issam/Cityscapes/demoVideo/leftImg8bit/demoVideo/ProposalsSharp/"
            vidList = glob(
                "/mnt/datasets/public/issam/Cityscapes/demoVideo/leftImg8bit/demoVideo/stuttgart_01/*"
            )
            vidList.sort()
            for i, img_path in enumerate(vidList):
                image = Image.open(img_path).convert('RGB')
                image = image.resize((1200, 600), Image.BILINEAR)
                img, _ = transforms.Tr_WTP_NoFlip()([image, image])

                pred_dict = model.predict(
                    {
                        "images": img[None],
                        "split": ["test"],
                        "resized": torch.FloatTensor([1]),
                        "name": [ms.extract_fname(img_path)],
                        "proposals_path": [proposals_path]
                    },
                    predict_method="BestDice")
                path_summary = main_dict["path_summary"]
                ms.create_dirs(path_summary + "/tmp")
                ms.imsave(path_summary + "vid1_mask_{}.png".format(i),
                          ms.get_image(pred_dict["blobs"]))
                ms.imsave(path_summary + "vid1_full_{}.png".format(i),
                          ms.get_image(img, pred_dict["blobs"], denorm=1))
                print(i, "/", len(vidList))

        if mode == "5_eval_BestDice":
            gtAnnDict = au.load_gtAnnDict(main_dict)
            gtAnnClass = COCO(gtAnnDict)
            results = au.assert_gtAnnDict(main_dict, reset=None)

        if mode == "cp_annList":
            ms.dataset2cocoformat(dataset_name="CityScapes")

        if mode == "pascal2lcfcn_points":
            data_utils.pascal2lcfcn_points(main_dict)

        if mode == "cp2lcfcn_points":
            data_utils.cp2lcfcn_points(main_dict)

        if mode == "train":

            train.main(main_dict)
            import ipdb
            ipdb.set_trace()  # breakpoint a5d091b9 //

        if mode == "train_only":

            train.main(main_dict, train_only=True)
            import ipdb
            ipdb.set_trace()  # breakpoint a5d091b9 //

        if mode == "sharpmask2psfcn":
            for split in ["train", "val"]:
                root = "/mnt/datasets/public/issam/COCO2014/ProposalsSharp/"
                path = "{}/sharpmask/{}/jsons/".format(root, split)

                jsons = glob(path + "*.json")
                propDict = {}
                for k, json in enumerate(jsons):
                    print("{}/{}".format(k, len(jsons)))
                    props = ms.load_json(json)
                    for p in props:
                        if p["image_id"] not in propDict:
                            propDict[p["image_id"]] = []
                        propDict[p["image_id"]] += [p]

                for k in propDict.keys():
                    fname = "{}/{}.json".format(root, k)
                    ms.save_json(fname, propDict[k])

        if mode == "cp2coco":
            import ipdb
            ipdb.set_trace()  # breakpoint f2eb9e70 //
            dataset2cocoformat.cityscapes2cocoformat(main_dict)
            # train.main(main_dict)
            import ipdb
            ipdb.set_trace()  # breakpoint a5d091b9 //

        if mode == "train_lcfcn":
            train_lcfcn.main(main_dict)
            import ipdb
            ipdb.set_trace()  # breakpoint a5d091b9 //

        if mode == "summary":

            try:
                history = ms.load_history(main_dict)

                # if predictList_str == "MAE":
                #   results[key] = "{}/{}: {:.2f}".format(history["best_model"]["epoch"],
                #                                                           history["epoch"],
                #                                                           history["best_model"][metric_name])

                # else:
                val_dict = history["val"][-1]
                val_dict = history["best_model"]
                iou25 = val_dict["0.25"]
                iou5 = val_dict["0.5"]
                iou75 = val_dict["0.75"]
                results[key] = "{}/{}: {:.1f} - {:.1f} - {:.1f}".format(
                    val_dict["epoch"], history["epoch"], iou25 * 100,
                    iou5 * 100, iou75 * 100)
                # if history["val"][-1]["epoch"] != history["epoch"]:
                #   results[key] += " | Val {}".format(history["epoch"])
                try:
                    results[key] += " | {}/{}".format(
                        len(history["trained_batch_names"]),
                        history["train"][-1]["n_samples"])
                except:
                    pass
            except:
                pass
        if mode == "vals":

            history = ms.load_history(main_dict)

            for i in range(1, len(main_dict["predictList"]) + 1):
                if len(history['val']) == 0:
                    res = "NaN"
                    continue
                else:
                    res = history["val"][-i]

                map50 = res["map50"]
                map75 = res["map75"]

                # if map75 < 1e-3:
                #   continue

                string = "{} - {} - map50: {:.2f} - map75: {:.2f}".format(
                    res["epoch"], res["predict_name"], map50, map75)

                key_tmp = list(key).copy()
                key_tmp[1] += " {} - {}".format(metric_name,
                                                res["predict_name"])
                results[tuple(key_tmp)] = string

            # print("map75", pd.DataFrame(history["val"])["map75"].max())
            # df = pd.DataFrame(history["vals"][:20])["water_loss_B"]
            # print(df)
    try:
        print(ms.dict2frame(results))
    except:
        print("Results not printed...")
예제 #16
0
파일: misc.py 프로젝트: szubing/ED-DMM-UDA
def copy_models(exp_dict, path_dst):
    history = load_history(exp_dict)

    ms.save_json(path_dst + "/history.json", history)

    print("copied...")
예제 #17
0
def test_COCOmap(main_dict):
    # create_voc2007(main_dict)

    model = ms.load_best_model(main_dict)
    _, val_set = ms.load_trainval(main_dict)

    path_base = "/mnt/datasets/public/issam/VOCdevkit/annotations/"
    fname = "{}/instances_val2012.json".format(path_base)

    cocoGt = COCO(fname)
    # fname = "{}/instances_val2012.json".format(path_base)
    # cocoGt = COCO(fname)

    fname = (path_base + "/results/" + main_dict["exp_name"] + "_" +
             str(main_dict["model_options"]["predict_proposal"]) + ".json")
    # test_list(model, cocoGt, val_set, [0,1,2,3], prp.Blobs)
    import ipdb
    ipdb.set_trace()  # breakpoint 06c353ef //
    # test_list(model, cocoGt, val_set, [0], prp.BestObjectness)
    # test_list(model, cocoGt, val_set, [0,1,2,3], prp.Blobs)
    if not os.path.exists(fname) or 1:
        annList = []
        for i in range(len(val_set)):
            batch = ms.get_batch(val_set, [i])
            try:
                annList += model.predict(batch, "annList")
            except Exception as exc:
                import ipdb
                ipdb.set_trace()  # breakpoint 5f61b0cfx //
            if (i % 100) == 0:

                cocoEval, _ = d_helpers.evaluateAnnList(annList)
                ms.save_json(fname.replace(".json", "inter.json"), annList)

            # ms.save_json("tmp.json", annList)
            # cocoDt = cocoGt.loadRes("tmp.json")

            # cocoEval = COCOeval(cocoGt, cocoDt, "segm")
            # cocoEval.params.imgIds  = list(set([v["image_id"] for v in cocoDt.anns.values()]))
            # cocoEval.evaluate()
            # cocoEval.accumulate()
            # cocoEval.summarize()

            print("{}/{}".format(i, len(val_set)))

        ms.save_json(fname, annList)
    # cocoEval = d_helpers.evaluateAnnList(ms.load_json(fname))

    # cocoEval = COCOeval(cocoGt, cocoDt, annType)
    #cocoEval.params.imgIds  = list(set([v["image_id"] for v in cocoDt.anns.values()]))
    if 1:
        #cocoEval.params.imgIds = [2007000033]
        cocoDt = cocoGt.loadRes(fname)
        cocoEval = COCOeval(cocoGt, cocoDt, "segm")
        cocoEval.evaluate()
        cocoEval.accumulate()
        cocoEval.summarize()
        print("Images:", len(cocoEval.params.imgIds))
        print("Model: {}, Loss: {}, Pred: {}".format(
            main_dict["model_name"], main_dict["loss_name"],
            main_dict["model_options"]["predict_proposal"]))
    import ipdb
    ipdb.set_trace()  # breakpoint c6f8f580 //
    # d_helpers.visGT(cocoGt, cocoDt,ms.get_batch(val_set, [169]))
    # d_helpers.valList(cocoGt, cocoDt, val_set, [173,174])
    # model.set_proposal(None); vis.visBlobs(model, ms.get_batch(val_set, [169]), "blobs")
    return "mAP25: {:.2f} - mAP75:{:.2f}".format(cocoEval.stats[1],
                                                 cocoEval.stats[2])
예제 #18
0
def pascal2cocoformat():
    dataset = ms.load_trainval({"dataset_name": "VOC"})
    fname = "/mnt/datasets/public/issam/VOCdevkit/annotations/"
    fname += "instances_val2012.json"

    tmp = ms.load_json("/mnt/datasets/public/issam/"
                       "VOCdevkit/annotations/pascal_val2012.json")

    ann_json = {}
    ann_json["categories"] = tmp["categories"]
    ann_json["type"] = "instances"

    # Images
    imageList = []
    annList = []
    id = 1
    for i in range(len(dataset)):
        print("{}/{}".format(i, len(dataset)))
        batch = dataset[i]
        image_id = int(batch["name"])

        height, width = batch["images"].shape[-2:]
        imageList += [{
            "file_name": batch["name"] + ".jpg",
            "height": height,
            "width": width,
            "id": image_id
        }]

        maskObjects = batch["maskObjects"]
        maskClasses = batch["maskClasses"]
        n_objects = maskObjects[maskObjects != 255].max()
        for obj_id in range(1, n_objects + 1):
            if obj_id == 0:
                continue

            binmask = (maskObjects == obj_id)
            segmentation = maskUtils.encode(np.asfortranarray(ms.t2n(binmask)))
            segmentation["counts"] = segmentation["counts"].decode("utf-8")
            uniques = (binmask.long() * maskClasses).unique()
            uniques = uniques[uniques != 0]
            assert len(uniques) == 1

            category_id = uniques[0].item()

            annList += [{
                "segmentation": segmentation,
                "iscrowd": 0,
                # "bbox":maskUtils.toBbox(segmentation).tolist(),
                "area": int(maskUtils.area(segmentation)),
                "id": id,
                "image_id": image_id,
                "category_id": category_id
            }]
            id += 1

    ann_json["annotations"] = annList
    ann_json["images"] = imageList

    ms.save_json(fname, ann_json)

    anns = ms.load_json(fname)
    fname_dummy = fname.replace(".json", "_best.json")
    annList = anns["annotations"]
    for a in annList:
        a["score"] = 1

    ms.save_json(fname_dummy, annList)
예제 #19
0
def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--scratch',
                        action='store',
                        help='',
                        metavar="DIR",
                        default="_tmp_")
    parser.add_argument('--sdf',
                        action='store',
                        help='',
                        metavar="FILE",
                        nargs="+",
                        default=[])
    parser.add_argument('--dict',
                        action='store',
                        help='',
                        metavar="FILE",
                        nargs="+",
                        default=[])
    parser.add_argument('--name',
                        action='store',
                        help='',
                        metavar="STR",
                        nargs="+")
    parser.add_argument('--filename', action='store', help='', metavar="STR")
    parser.add_argument('--filter', action='store_true', help='')
    parser.add_argument('-j',
                        '--procs',
                        action='store',
                        help='pararallize',
                        metavar="int",
                        default=0,
                        type=int)

    args = parser.parse_args()

    if args.scratch[-1] != "/":
        args.scratch += "/"

    print()
    databases_set = []
    databases_dict = []

    for sdf in args.sdf:
        molobjs = cheminfo.read_sdffile(sdf)
        molobjs = list(molobjs)
        smiles = [
            cheminfo.molobj_to_smiles(molobj, remove_hs=True)
            for molobj in molobjs
        ]
        smiles = set(smiles)
        databases_set.append(smiles)
        print(sdf, len(smiles))

    for filename in args.dict:
        data = misc.load_obj(filename)
        smiles = data.keys()
        smiles = set(smiles)
        databases_set.append(smiles)
        databases_dict.append(data)
        print(filename, len(smiles))

    if args.scratch is not None:

        # Merge databases
        everything = {}

        for data in databases_dict:

            keys = data.keys()

            for key in keys:

                if key not in everything:
                    everything[key] = []

                everything[key] += data[key]

        if args.filter:
            everything = filter_dict(everything)

        keys = everything.keys()
        print("n items", len(keys))

        # Save
        misc.save_json(args.scratch + "molecule_data", everything)
        misc.save_obj(args.scratch + "molecule_data", everything)

    if args.name is not None:

        n_db = len(databases_set)

        if n_db == 2:
            venn2(databases_set, set_labels=args.name)
        elif n_db == 3:
            venn3(databases_set, set_labels=args.name)

        plt.savefig(args.scratch + "venndiagram")

    return
예제 #20
0
def main():

    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--scratch',
                        action='store',
                        help='',
                        metavar="DIR",
                        default="_tmp_")
    parser.add_argument('--sdf', action='store', help='',
                        metavar="FILE")  #, nargs="+", default=[])
    parser.add_argument('--properties',
                        action='store',
                        help='',
                        metavar="FILE")  #, nargs="+", default=[])
    parser.add_argument('-j',
                        '--procs',
                        action='store',
                        help='pararallize',
                        metavar="int",
                        default=0,
                        type=int)

    args = parser.parse_args()

    if args.scratch[-1] != "/":
        args.scratch += "/"

    fsdf = gzip.open(args.scratch + "structures.sdf.gz", 'w')
    fprop = open(args.scratch + "properties.csv", 'w')

    molecules = cheminfo.read_sdffile(args.sdf)
    properties = open(args.properties, 'r')

    moledict = {}

    for molobj, line in zip(molecules, properties):

        status = molobjfilter(molobj)

        if not status:
            continue

        status = valuefilter(line)

        if not status:
            continue

        smiles = cheminfo.molobj_to_smiles(molobj, remove_hs=True)

        print(smiles)

        sdfstr = cheminfo.molobj_to_sdfstr(molobj)
        sdfstr += "$$$$\n"
        fsdf.write(sdfstr.encode())
        fprop.write(line)

        values = [float(x) for x in line.split()[1:]]
        moledict[smiles] = values

    fsdf.close()
    fprop.close()

    properties.close()

    misc.save_json(args.scratch + "molecules", moledict)
    misc.save_obj(args.scratch + "molecules", moledict)

    return
예제 #21
0
def dump_kernel_scores(scr, names=[]):

    # Predefined reg
    l2regs = [10**-x for x in range(1, 6, 2)] + [0.0]
    n_l2regs = len(l2regs)

    # Define n_training
    # n_trains=[2**x for x in range(4, 12)]
    n_trains=[2**x for x in range(4, 17)]
    n_trains = np.array(n_trains, dtype=int)
    n_items = misc.load_txt(scr + "n_items")

    n_train_idx, = np.where(n_trains < n_items*4.0/5.0)
    n_trains = n_trains[n_train_idx]
    n_trains = list(n_trains) # + [-1]

    print("Assume total items", n_items,
            "N train", "{:5.1f}".format(np.floor(n_items*4/5)),
            "N test", "{:5.1f}".format(np.ceil(n_items*1/5)))
    print("Training:", list(n_trains))
    misc.save_npy(scr + "n_train", n_trains)

    # Load properties
    try:
        properties = misc.load_npy(scr + "properties")
    except:
        with open(scr + "properties.csv", 'r') as f:
            lines = f.readlines()
            properties = []
            for line in lines:

                values = [float(x) for x in line.split()]
                values = values[1:]
                value = np.median(values)
                properties.append(value)

            properties = np.array(properties)
            misc.save_npy(scr + "properties", properties)


    print(n_items, "==", len(properties))
    assert n_items == len(properties)

    # Load done kernel
    this_names = ["rdkitfp", "morgan"]
    for name in names:

        break

        if name not in this_names:
            continue

        print("scoring", name)

        now = time.time()

        print("load kernel", name)
        kernel = misc.load_npy(scr + "kernel." + name)

        n_len = kernel.shape[0]
        diaidx = np.diag_indices(n_len)

        def scan_kernels(debug=True):
            kernel[diaidx] += l2regs[0]
            yield kernel
            # for i in tqdm.tqdm(range(1, n_l2regs), ncols=47, ascii=True, desc=name):
            for i in range(1, n_l2regs):
                kernel[diaidx] += -l2regs[i-1] +l2regs[i]
                yield kernel

        generator = functools.partial(tqdm, scan_kernels(), ncols=75, ascii=True, desc=name+ " kernels", total=n_l2regs)

        print("scan kernels", name)
        idx_winners, scores = cross_validation(generator(), properties, training_points=n_trains)
        misc.save_npy(scr + "score."+name, scores)
        scores = np.around(np.mean(scores, axis=1), decimals=2)

        # Save parameters
        winner_parameters = {}
        for ni, index in enumerate(idx_winners):

            n = n_trains[ni]
            l2reg = l2regs[index]

            parameters = {
                "reg": l2reg,
            }

            winner_parameters[str(n)] = parameters

        nower = time.time()

        print("time: {:10.1f}s".format(nower-now))
        print(name, list(scores))

        misc.save_json(scr + "parameters."+name, winner_parameters)

        print("saved")

        kernel = None
        del kernel

    # Load multi kernels (reg search)
    this_names = ["fchl19", "fchl18"]
    for name in names:
        break
        kernels = misc.load_npy(scr + "kernels." + name)

        n_l2regs = len(l2regs)
        n_kernels = kernels.shape[0]
        n_len = kernels[0].shape[0]

        diaidx = np.diag_indices(n_len)

        def scan_kernels():
            for kernel in kernels:
                kernel[diaidx] += l2regs[0]
                yield kernel
                for i in range(1, n_l2regs):
                    kernel[diaidx] += -l2regs[i-1] +l2regs[i]
                    yield kernel

        idx_winners, scores = cross_validation(scan_kernels(), properties, training_points=n_trains)
        misc.save_npy(scr + "score."+name, scores)
        scores = np.around(np.mean(scores, axis=1), decimals=2)

        # Clean
        kernels = None
        del kernels

        # Save parameters
        winner_parameters = {}
        for ni, index in enumerate(idx_winners):

            # convert linear index to multi-dimensions
            idx_parameters = np.unravel_index([index], (n_kernels, n_l2regs))
            i, j = idx_parameters
            i = int(i[0])
            j = int(j[0])

            n = n_trains[ni]
            sigma = i
            l2reg = l2regs[j]

            parameters = {
                "sigma": sigma,
                "reg": l2reg,
            }

            winner_parameters[str(n)] = parameters

        misc.save_json(scr + "parameters."+name, winner_parameters)

        print(name, scores)


    # Load distance kernels
    models = []
    parameters = {
        "name": "rdkitfp",
        "sigma": [2**x for x in range(1, 12, 2)],
        # "sigma": [2**x for x in np.arange(20, 40, 0.5)],
        # "lambda": l2regs,
        # "lambda":  [10.0**-x for x in np.arange(1, 10, 1)]
        "lambda":  [10.0**-6],
    }
    models.append(parameters)
    parameters = {
        "name": "slatm",
        "sigma": [2**x for x in range(1, 12, 2)],
        # "sigma": [2**x for x in np.arange(20, 40, 0.5)],
        # "lambda": l2regs,
        # "lambda":  [10.0**-x for x in np.arange(1, 10, 1)]
        "lambda":  [10.0**-6],
    }
    models.append(parameters)
    parameters = {
        "name": "cm",
        "sigma": [2**x for x in range(1, 12, 2)],
        "lambda": l2regs,
    }
    models.append(parameters)
    parameters = {
        "name": "bob",
        "sigma": [2**x for x in range(1, 12, 2)],
        "lambda": l2regs,
    }
    models.append(parameters)
    parameters = {
        "name": "avgslatm",
        "sigma": [2**x for x in range(1, 20, 2)],
        "lambda": l2regs,
    }
    # models.append(parameters)

    for model in models:
        name = model["name"]

        if name not in names:
            continue

        print("scoring", name)

        parameters = model

        n_sigma = len(parameters["sigma"])
        n_lambda = len(parameters["lambda"])

        print("parameter range")
        print("sigma", min(parameters["sigma"]), max(parameters["sigma"]))

        dist = misc.load_npy(scr + "dist." + name)
        kernels = get_kernels_l2distance(dist, parameters)

        # Cross validate
        idx_winners, scores = cross_validation(kernels, properties, training_points=n_trains)

        # Save scores
        misc.save_npy(scr + "score."+name, scores)
        scores = np.around(np.mean(scores, axis=1), decimals=2)

        # Save parameters
        winner_parameters = {}
        for ni, index in enumerate(idx_winners):

            # convert linear index to multi-dimensions
            idx_parameters = np.unravel_index([index], (n_sigma, n_lambda))
            i, j = idx_parameters
            i = int(i[0])
            j = int(j[0])

            n = n_trains[ni]
            sigma = parameters["sigma"][i]
            l2reg = parameters["lambda"][j]

            this_parameters = {
                "sigma": str(sigma),
                "reg": str(l2reg),
            }

            winner_parameters[str(n)] = this_parameters


        print(name, scores)
        misc.save_json(scr + "parameters."+name, winner_parameters)



    quit()

    return