예제 #1
0
def main():
    w = PyStopwatch()

    parser = ConfigArgumentParser(conflict_handler="resolve")
    parser.add_argument(
        "--dataroot",
        type=str,
        default="/data/private/pretrainedmodels",
        help="torchvision data folder",
    )
    parser.add_argument("--until", type=int, default=5)
    parser.add_argument("--num-op", type=int, default=2)
    parser.add_argument("--num-policy", type=int, default=5)
    parser.add_argument("--num-search", type=int, default=200)
    parser.add_argument("--cv-ratio", type=float, default=0.4)
    parser.add_argument("--decay", type=float, default=-1)
    parser.add_argument("--redis", type=str, default="gpu-cloud-vnode30.dakao.io:23655")
    parser.add_argument("--per-class", action="store_true")
    parser.add_argument("--resume", action="store_true")
    parser.add_argument("--smoke-test", action="store_true")
    args = parser.parse_args()

    if args.decay > 0:
        logger.info("decay=%.4f" % args.decay)
        C.get()["optimizer"]["decay"] = args.decay

    add_filehandler(
        logger,
        os.path.join(
            "models",
            "%s_%s_cv%.1f.log"
            % (C.get()["dataset"], C.get()["model"]["type"], args.cv_ratio),
        ),
    )
    logger.info("configuration...")
    logger.info(json.dumps(C.get().conf, sort_keys=True, indent=4))
    logger.info("initialize ray...")
    ray.init(address=args.redis)

    num_result_per_cv = 10
    cv_num = 5
    copied_c = copy.deepcopy(C.get().conf)

    logger.info(
        "search augmentation policies, dataset=%s model=%s"
        % (C.get()["dataset"], C.get()["model"]["type"])
    )
    logger.info(
        "----- Train without Augmentations cv=%d ratio(test)=%.1f -----"
        % (cv_num, args.cv_ratio)
    )
    w.start(tag="train_no_aug")
    paths = [
        _get_path(
            C.get()["dataset"],
            C.get()["model"]["type"],
            "ratio%.1f_fold%d" % (args.cv_ratio, i),
        )
        for i in range(cv_num)
    ]
    print(paths)
    reqs = [
        train_model.remote(
            copy.deepcopy(copied_c),
            args.dataroot,
            C.get()["aug"],
            args.cv_ratio,
            i,
            save_path=paths[i],
            skip_exist=True,
        )
        for i in range(cv_num)
    ]

    tqdm_epoch = tqdm(range(C.get()["epoch"]))
    is_done = False
    for epoch in tqdm_epoch:
        while True:
            epochs_per_cv = OrderedDict()
            for cv_idx in range(cv_num):
                try:
                    latest_ckpt = torch.load(paths[cv_idx])
                    if "epoch" not in latest_ckpt:
                        epochs_per_cv["cv%d" % (cv_idx + 1)] = C.get()["epoch"]
                        continue
                    epochs_per_cv["cv%d" % (cv_idx + 1)] = latest_ckpt["epoch"]
                except Exception as e:
                    continue
            tqdm_epoch.set_postfix(epochs_per_cv)
            if (
                len(epochs_per_cv) == cv_num
                and min(epochs_per_cv.values()) >= C.get()["epoch"]
            ):
                is_done = True
            if len(epochs_per_cv) == cv_num and min(epochs_per_cv.values()) >= epoch:
                break
            time.sleep(10)
        if is_done:
            break

    logger.info("getting results...")
    pretrain_results = ray.get(reqs)
    for r_model, r_cv, r_dict in pretrain_results:
        logger.info(
            "model=%s cv=%d top1_train=%.4f top1_valid=%.4f"
            % (r_model, r_cv + 1, r_dict["top1_train"], r_dict["top1_valid"])
        )
    logger.info("processed in %.4f secs" % w.pause("train_no_aug"))

    if args.until == 1:
        sys.exit(0)

    logger.info("----- Search Test-Time Augmentation Policies -----")
    w.start(tag="search")

    ops = augment_list(False)
    space = {}
    for i in range(args.num_policy):
        for j in range(args.num_op):
            space["policy_%d_%d" % (i, j)] = hp.choice(
                "policy_%d_%d" % (i, j), list(range(0, len(ops)))
            )
            space["prob_%d_%d" % (i, j)] = hp.uniform("prob_%d_ %d" % (i, j), 0.0, 1.0)
            space["level_%d_%d" % (i, j)] = hp.uniform(
                "level_%d_ %d" % (i, j), 0.0, 1.0
            )

    final_policy_set = []
    total_computation = 0
    reward_attr = "top1_valid"  # top1_valid or minus_loss
    for _ in range(1):  # run multiple times.
        for cv_fold in range(cv_num):
            name = "search_%s_%s_fold%d_ratio%.1f" % (
                C.get()["dataset"],
                C.get()["model"]["type"],
                cv_fold,
                args.cv_ratio,
            )
            print(name)

            # def train(augs, rpt):
            def train(config, reporter):
                return eval_tta(
                    copy.deepcopy(copied_c), config, reporter, num_class, get_model, get_dataloaders
                )

            register_trainable(name, train)
            algo = HyperOptSearch(
                space, max_concurrent=4 * 20, metric=reward_attr, mode="max"
            )

            results = run(
                train,
                name=name,
                config={
                    "dataroot": args.dataroot,
                    "save_path": paths[cv_fold],
                    "cv_ratio_test": args.cv_ratio,
                    "cv_fold": cv_fold,
                    "num_op": args.num_op,
                    "num_policy": args.num_policy,
                },
                num_samples=4 if args.smoke_test else args.num_search,
                resources_per_trial={"gpu": 1},
                stop={"training_iteration": args.num_policy},
                search_alg=algo,
                scheduler=None,
                verbose=0,
                queue_trials=True,
                resume=args.resume,
                raise_on_failed_trial=False,
            )
            print()
            df = results.results_df

            import pickle

            with open("results.pickle", "wb") as fp:
                pickle.dump(results, fp)
            df.to_csv("df.csv")

            results = df.sort_values(by=reward_attr, ascending=False)
            # results = [x for x in results if x.last_result is not None]
            # results = sorted(results, key=lambda x: x.last_result[reward_attr], reverse=True)

            # calculate computation usage
            for _, result in results.iterrows():
                total_computation += result["elapsed_time"]

            for _, result in results.iloc[:num_result_per_cv].iterrows():
                final_policy = policy_decoder(
                    result, args.num_policy, args.num_op, prefix="config."
                )
                logger.info(
                    "loss=%.12f top1_valid=%.4f %s"
                    % (result["minus_loss"], result["top1_valid"], final_policy)
                )

                final_policy = remove_deplicates(final_policy)
                final_policy_set.extend(final_policy)

    logger.info(json.dumps(final_policy_set))
    logger.info("final_policy=%d" % len(final_policy_set))
    logger.info(
        "processed in %.4f secs, gpu hours=%.4f"
        % (w.pause("search"), total_computation / 3600.0)
    )
    logger.info(
        "----- Train with Augmentations model=%s dataset=%s aug=%s ratio(test)=%.1f -----"
        % (C.get()["model"]["type"], C.get()["dataset"], C.get()["aug"], args.cv_ratio)
    )
    w.start(tag="train_aug")

    num_experiments = 5
    default_path = [
        _get_path(
            C.get()["dataset"],
            C.get()["model"]["type"],
            "ratio%.1f_default%d" % (args.cv_ratio, _),
        )
        for _ in range(num_experiments)
    ]
    augment_path = [
        _get_path(
            C.get()["dataset"],
            C.get()["model"]["type"],
            "ratio%.1f_augment%d" % (args.cv_ratio, _),
        )
        for _ in range(num_experiments)
    ]
    reqs = [
        train_model.remote(
            copy.deepcopy(copied_c),
            args.dataroot,
            C.get()["aug"],
            0.0,
            0,
            save_path=default_path[_],
            skip_exist=True,
        )
        for _ in range(num_experiments)
    ] + [
        train_model.remote(
            copy.deepcopy(copied_c),
            args.dataroot,
            final_policy_set,
            0.0,
            0,
            save_path=augment_path[_],
        )
        for _ in range(num_experiments)
    ]

    tqdm_epoch = tqdm(range(C.get()["epoch"]))
    is_done = False
    for epoch in tqdm_epoch:
        while True:
            epochs = OrderedDict()
            for exp_idx in range(num_experiments):
                try:
                    if os.path.exists(default_path[exp_idx]):
                        latest_ckpt = torch.load(default_path[exp_idx])
                        epochs["default_exp%d" % (exp_idx + 1)] = latest_ckpt["epoch"]
                except:
                    pass
                try:
                    if os.path.exists(augment_path[exp_idx]):
                        latest_ckpt = torch.load(augment_path[exp_idx])
                        epochs["augment_exp%d" % (exp_idx + 1)] = latest_ckpt["epoch"]
                except:
                    pass

            tqdm_epoch.set_postfix(epochs)
            if (
                len(epochs) == num_experiments * 2
                and min(epochs.values()) >= C.get()["epoch"]
            ):
                is_done = True
            if len(epochs) == num_experiments * 2 and min(epochs.values()) >= epoch:
                break
            time.sleep(10)
        if is_done:
            break

    logger.info("getting results...")
    final_results = ray.get(reqs)

    for train_mode in ["default", "augment"]:
        avg = 0.0
        for _ in range(num_experiments):
            r_model, r_cv, r_dict = final_results.pop(0)
            logger.info(
                "[%s] top1_train=%.4f top1_test=%.4f"
                % (train_mode, r_dict["top1_train"], r_dict["top1_test"])
            )
            avg += r_dict["top1_test"]
        avg /= num_experiments
        logger.info(
            "[%s] top1_test average=%.4f (#experiments=%d)"
            % (train_mode, avg, num_experiments)
        )
    logger.info("processed in %.4f secs" % w.pause("train_aug"))

    logger.info(w)
예제 #2
0
    logger.info(json.dumps(C.get().conf, sort_keys=True, indent=4))
    logger.info('initialize ray...')
    C.get()['args'] = args
    # ray.init(redis_address=args.redis)
    ray.init(num_gpus=2)

    num_result_per_cv = args.topk
    cv_num = args.num_cv
    copied_c = copy.deepcopy(C.get().conf)

    logger.info('search augmentation policies, dataset=%s model=%s' %
                (C.get()['dataset'], C.get()['model']['type']))
    logger.info(
        '----- Train without Augmentations cv=%d ratio(test)=%.1f -----' %
        (cv_num, args.cv_ratio))
    w.start(tag='train_no_aug')
    if args.ideal_dc:
        paths = [
            _get_path(
                C.get()['dataset'],
                C.get()['model']['type'],
                'ratio{:.1f}_fold{}_{}2{}_op{}_ncv{}_npy{}_ideal_dc'.format(
                    args.cv_ratio, i,
                    C.get()['source'],
                    C.get()['target'], args.num_op, args.num_cv,
                    args.num_policy)) for i in range(cv_num)
        ]
    else:
        paths = [
            _get_path(
                C.get()['dataset'],
예제 #3
0
    logger.info('initialize ray...')
    #sys.exit(0)
    if args.remote:
        ray.init(redis_address=args.redis)  # 启动分布式

    num_result_per_cv = 10  # ? 可能是交叉验证那个
    #cv_num = 5 # ? 可能是交叉验证那个
    cv_num = 1  # ! temp change
    copied_c = copy.deepcopy(C.get().conf)  #copy一份config

    logger.info('search augmentation policies, dataset=%s model=%s' %
                (C.get()['dataset'], C.get()['model']['type']))
    logger.info(
        '----- Train without Augmentations cv=%d ratio(test)=%.1f -----' %
        (cv_num, args.cv_ratio))
    w.start(tag='train_no_aug')  # 秒表开始,no_aug训练
    paths = [
        _get_path(C.get()['dataset'],
                  C.get()['model']['type'],
                  'ratio%.1f_fold%d' % (args.cv_ratio, i))
        for i in range(cv_num)
    ]  #5个模型的存储path
    print(paths)
    #sys.exit(0)
    # 训练no_aug模型
    # 底下的计算是对应在ray.get(reqs),
    # 代码逻辑是,如果save_path有效,就从save_path读取然后训练,训练结束存储在save_path里

    if args.remote:
        reqs = [
            train_model.remote(copy.deepcopy(copied_c),
예제 #4
0
    time_ = datetime.datetime.now()
    best_val_top1 = 0

    dataiter = iter(tr_loader)
    num_steps = 100000 // C.get()['batch']

    from pystopwatch2 import PyStopwatch

    for epoch in range(C.get()['epochs']):
        w = PyStopwatch()
        metrics = Accumulator()
        scheduler.step()
        model.train()
        cnt = 0
        for iter_ in range(num_steps):
            w.start(tag='step1')
            _, x, label = next(dataiter)
            if cuda:
                x, label = x.cuda(), label.cuda()

            w.pause(tag='step1')
            cutmix = C.get().conf.get('cutmix', defaultdict(lambda: 0.))
            cutmix_alpha = cutmix['alpha']
            cutmix_prob = cutmix['prob']

            if cutmix_alpha <= 0.0 or np.random.rand(1) > cutmix_prob:
                pred = model(x)
                loss = loss_fn(pred, label)
            else:
                # CutMix : generate mixed sample
                lam = np.random.beta(cutmix_alpha, cutmix_alpha)
예제 #5
0
    num_result_per_cv = args.rpc
    gr_num = args.gr_num
    cv_num = args.cv_num
    C.get()["cv_num"] = cv_num
    ori_aug = C.get()["aug"]
    if 'test_dataset' not in C.get().conf:
        C.get()['test_dataset'] = C.get()['dataset']
    copied_c = copy.deepcopy(C.get().conf)

    logger.info('search augmentation policies, dataset=%s model=%s' %
                (C.get()['dataset'], C.get()['model']['type']))
    logger.info(
        '----- Train without Augmentations cv=%d ratio(test)=%.1f -----' %
        (cv_num, args.cv_ratio))
    w.start(tag='train_no_aug')
    paths = [
        _get_path(C.get()['dataset'],
                  C.get()['model']['type'],
                  '%s_ratio%.1f_fold%d' % (args.childaug, args.cv_ratio, i))
        for i in range(cv_num)
    ]
    print(paths)
    reqs = [
        train_model.remote(copy.deepcopy(copied_c),
                           None,
                           args.dataroot,
                           args.childaug,
                           args.cv_ratio,
                           i,
                           save_path=paths[i],