def main():
    dataset = data.fetch_dataset(args, verbose=False)

    net = model.Encoder(args)
    net = torch.nn.DataParallel(net).cuda()
    torch.backends.cudnn.benchmark = True

    # Using the latest optimizer, better than Adam and SGD
    optimizer = Adastand(net.parameters(), lr=args.learning_rate,
                         weight_decay=args.weight_decay,)

    TrainLoss = []
    for epoch in range(args.epoch_num):
        epoch_loss = fit(args, net, dataset, optimizer, is_train=True)
        TrainLoss.append(epoch_loss)
        if (epoch + 1) % 100 == 0:
            util.save_model(args, args.curr_epoch, net.state_dict(), prefix=args.model_prefix,
                            keep_latest=25)
        if epoch >= 5:
            # Train losses
            plot_loss = torch.stack(TrainLoss, dim=0).view(-1, 3, 3).permute(2, 1, 0).numpy()
            #vb.plot_curves(plot_loss, ["L1_Loss", "L2_Loss", "KL_Div_Loss"],
                           #args.loss_log, dt + "_loss", window=5, title=args.model_prefix)
            vb.plot_multi_loss_distribution(plot_loss, [["Definite_S", "Challenge_S", "Definite_D"] for _ in range(3)],
                                            save_path=args.loss_log, name=dt + "_loss",
                                            window=5, fig_size=(15, 15), grid=True,
                                            titles=["L1_Loss", "L2_Loss", "KL_Div_Loss"],)
                                            #bound=[{"low": 0, "high": 1} for _ in range(3)])
        args.curr_epoch += 1
示例#2
0
def main():
    aug = aug_aocr(args)
    datasets = data.fetch_data(args, args.datasets, batch_size=args.batch_size_per_gpu,
                              batch_size_val=args.batch_size_per_gpu_val, k_fold=1, split_val=0.1,
                               pre_process=None, aug=aug)

    for idx, (train_set, val_set) in enumerate(datasets):
        losses = []
        lev_dises, str_accus = [], []
        print("\n =============== Cross Validation: %s/%s ================ " %
                  (idx + 1, len(datasets)))
        # Prepare Network
        encoder = att_model.Attn_CNN(backbone_require_grad=True)
        decoder = att_model.AttnDecoder(args)
        encoder.apply(init.init_rnn).apply(init.init_others)
        decoder.apply(init.init_rnn).apply(init.init_others)
        criterion = nn.NLLLoss()
        encoder = torch.nn.DataParallel(encoder).cuda()
        decoder = torch.nn.DataParallel(decoder).cuda()
        torch.backends.cudnn.benchmark = True
        if args.finetune:
            encoder, decoder = util.load_latest_model(args, [encoder, decoder],
                                                      prefix=["encoder", "decoder"], strict=False)
        
        # Prepare loss function and optimizer
        encoder_optimizer = AdaBound(encoder.parameters(), lr=args.learning_rate,
                                     final_lr=args.learning_rate * 10, weight_decay=args.weight_decay)
        decoder_optimizer = AdaBound(decoder.parameters(), lr=args.learning_rate,
                                     final_lr=args.learning_rate * 10, weight_decay=args.weight_decay)

        for epoch in range(args.epoch_num):
            loss = fit(args, encoder, decoder, train_set, encoder_optimizer,
                       decoder_optimizer, criterion, is_train=True)
            losses.append(loss)
            train_losses = [np.asarray(losses)]
            if val_set is not None:
                lev_dis, str_accu = fit(args, encoder, decoder, val_set, encoder_optimizer,
                                        decoder_optimizer, criterion, is_train=False)
                lev_dises.append(lev_dis)
                str_accus.append(str_accu)
                val_scores = [np.asarray(lev_dises), np.asarray(str_accus)]
            if epoch % 5 == 0:
                util.save_model(args, args.curr_epoch, encoder.state_dict(), prefix="encoder",
                                keep_latest=20)
                util.save_model(args, args.curr_epoch, decoder.state_dict(), prefix="decoder",
                                keep_latest=20)
            if epoch > 4:
                vb.plot_multi_loss_distribution(
                    multi_line_data= [train_losses, val_scores],
                    multi_line_labels= [["NLL Loss"], ["Levenstein", "String-Level"]],
                    save_path = args.loss_log, window=5, name = dt,
                    bound=[None, {"low": 0.0, "high": 100.0}],
                    titles=["Train Loss", "Validation Score"]
                )
示例#3
0
    if type(m) == nn.Conv2d:
        torch.nn.init.xavier_normal_(m.weight)
        #torch.nn.init.kaiming_uniform_(m.bias)

if __name__ == "__main__":
    args = util.get_args(presets.PRESET)
    with torch.cuda.device(2):
        net = model.CifarNet_Vanilla()
        if args.finetune:
            net = util.load_latest_model(args, net)
        else:
            #net.apply(init_weight)
            keras_model = get_keras_model()
            model_path = os.path.join(os.getcwd(), 'test', 'models', "cifar10_cnn.h5")
            net = weight_transfer.initialize_with_keras_hdf5(keras_model, map_dict, net, model_path)
            omth_util.save_model(args, args.curr_epoch, net.state_dict())
        #net.to(args.device)
        net.cuda()
        #summary(net, input_size=(3, 32, 32), device=device)

        #train_set = fetch_data(args, [("data_batch_1", "data_batch_2", "data_batch_3", "data_batch_4", "data_batch_5")])
        #test_set = fetch_data(args, ["test_batch"])

        transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
        ])

        trainset = torchvision.datasets.CIFAR10(root=os.path.expanduser("~/Pictures/dataset/cifar-10/"),
                                                train=True, download=True, transform=transform)
        train_set = torch.utils.data.DataLoader(trainset, batch_size=256,
示例#4
0
def main():
    aug = aug_temp(args)
    dt = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M")
    datasets = data.fetch_detection_data(args,
                                         sources=args.train_sources,
                                         k_fold=1,
                                         batch_size=args.batch_size_per_gpu,
                                         batch_size_val=1,
                                         auxiliary_info=args.train_aux,
                                         split_val=0.1,
                                         aug=aug)
    for idx, (train_set, val_set) in enumerate(datasets):
        loc_loss, conf_loss = [], []
        accuracy, precision, recall, f1_score = [], [], [], []
        print("\n =============== Cross Validation: %s/%s ================ " %
              (idx + 1, len(datasets)))
        net = model.SSD(cfg,
                        connect_loc_to_conf=args.loc_to_conf,
                        fix_size=args.fix_size,
                        conf_incep=args.conf_incep,
                        loc_incep=args.loc_incep,
                        nms_thres=args.nms_threshold,
                        loc_preconv=args.loc_preconv,
                        conf_preconv=args.conf_preconv,
                        FPN=args.feature_pyramid_net,
                        SA=args.self_attention,
                        in_wid=args.inner_filters,
                        m_factor=args.inner_m_factor)
        net = torch.nn.DataParallel(net,
                                    device_ids=args.gpu_id,
                                    output_device=args.output_gpu_id).cuda()
        detector = model.Detect(num_classes=2,
                                bkg_label=0,
                                top_k=800,
                                conf_thresh=0.05,
                                nms_thresh=0.3)
        #detector = None
        # Input dimension of bbox is different in each step
        torch.backends.cudnn.benchmark = True
        if args.fix_size:
            net.module.prior = net.module.prior.cuda()
        if args.finetune:
            net = util.load_latest_model(args,
                                         net,
                                         prefix=args.model_prefix_finetune,
                                         strict=True)
        # Using the latest optimizer, better than Adam and SGD
        optimizer = AdaBound(
            net.parameters(),
            lr=args.learning_rate,
            final_lr=20 * args.learning_rate,
            weight_decay=args.weight_decay,
        )

        for epoch in range(args.epoch_num):
            loc_avg, conf_avg = fit(args,
                                    cfg,
                                    net,
                                    detector,
                                    train_set,
                                    optimizer,
                                    is_train=True)
            loc_loss.append(loc_avg)
            conf_loss.append(conf_avg)
            train_losses = [np.asarray(loc_loss), np.asarray(conf_loss)]
            if val_set is not None:
                accu, pre, rec, f1 = fit(args,
                                         cfg,
                                         net,
                                         detector,
                                         val_set,
                                         optimizer,
                                         is_train=False)
                accuracy.append(accu)
                precision.append(pre)
                recall.append(rec)
                f1_score.append(f1)
                val_losses = [
                    np.asarray(accuracy),
                    np.asarray(precision),
                    np.asarray(recall),
                    np.asarray(f1_score)
                ]
            if epoch != 0 and epoch % 10 == 0:
                util.save_model(args,
                                args.curr_epoch,
                                net.state_dict(),
                                prefix=args.model_prefix,
                                keep_latest=3)
            if epoch > 5:
                vb.plot_multi_loss_distribution(
                    multi_line_data=[train_losses, val_losses],
                    multi_line_labels=[["location", "confidence"],
                                       [
                                           "Accuracy", "Precision", "Recall",
                                           "F1-Score"
                                       ]],
                    save_path=args.loss_log,
                    window=5,
                    name=dt,
                    bound=[{
                        "low": 0.0,
                        "high": 3.0
                    }, {
                        "low": 0.0,
                        "high": 1.0
                    }],
                    titles=["Train Loss", "Validation Score"])
        # Clean the data for next cross validation
        del net, optimizer
        args.curr_epoch = 0
示例#5
0
def main():
    dt = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M")
    datasets = data.fetch_probaV_data(args,
                                      sources=args.train_sources,
                                      k_fold=args.cross_val,
                                      split_val=0.1,
                                      batch_size=args.batch_size_per_gpu,
                                      auxiliary_info=[2, 2])
    for idx, (train_set, val_set) in enumerate(datasets):
        Loss, Measure = [], []
        val_Loss, val_Measure = [], []
        print("\n =============== Cross Validation: %s/%s ================ " %
              (idx + 1, len(datasets)))
        if args.which_model.lower() == "carn":
            net = model.CARN(args.n_selected_img,
                             args.filters,
                             3,
                             s_MSE=args.s_MSE,
                             trellis=args.trellis)
        elif args.which_model.lower() == "rdn":
            net = model.RDN(args.n_selected_img,
                            3,
                            3,
                            filters=args.filters,
                            s_MSE=args.s_MSE,
                            group=args.n_selected_img,
                            trellis=args.trellis)
        elif args.which_model.lower() == "meta_rdn":
            net = RDN_Meta(args.n_selected_img,
                           filters=args.filters,
                           scale=3,
                           s_MSE=args.s_MSE,
                           group=args.n_selected_img,
                           trellis=args.trellis)
        elif args.which_model.lower() == "basic":
            net = model.ProbaV_basic(inchannel=args.n_selected_img)
        else:
            print(
                "args.which_model or -wm should be one of [carn, rdn, basic], "
                "your -wm %s is illegal, and switched to 'basic' automatically"
                % (args.which_model.lower()))
            net = model.ProbaV_basic(inchannel=args.n_selected_img)
        net.apply(init_cnn)
        if args.half_precision:
            net.half()
        net = torch.nn.DataParallel(net,
                                    device_ids=args.gpu_id,
                                    output_device=args.output_gpu_id).cuda()
        torch.backends.cudnn.benchmark = True
        if args.finetune:
            net = util.load_latest_model(args,
                                         net,
                                         prefix=args.model_prefix_finetune,
                                         strict=True)
        optimizer = AdaBound(net.parameters(),
                             lr=args.learning_rate,
                             final_lr=10 * args.learning_rate,
                             weight_decay=args.weight_decay)
        #criterion = ListedLoss(type="l1", reduction="mean")
        #criterion = torch.nn.DataParallel(criterion, device_ids=args.gpu_id, output_device=args.output_gpu_id).cuda()
        measure = MultiMeasure(type="l2",
                               reduction="mean",
                               half_precision=args.half_precision)
        #measure = torch.nn.DataParallel(measure, device_ids=args.gpu_id, output_device=args.output_gpu_id).cuda()
        for epoch in range(args.epoch_num):
            _l, _m = fit(args,
                         net,
                         train_set,
                         optimizer,
                         measure,
                         is_train=True)
            Loss.append(_l)
            Measure.append(_m)
            if val_set is not None:
                _vl, _vm = val(args, net, val_set, optimizer, measure)
                val_Loss.append(_vl)
                val_Measure.append(_vm)

            if (epoch + 1) % 10 == 0:
                util.save_model(args,
                                args.curr_epoch,
                                net.state_dict(),
                                prefix=args.model_prefix,
                                keep_latest=10)
            if (epoch + 1) > 5:
                vb.plot_multi_loss_distribution(
                    multi_line_data=[
                        to_array(Loss) + to_array(val_Loss),
                        to_array(Measure) + to_array(val_Measure)
                    ],
                    multi_line_labels=[[
                        "train_mae", "train_smse", "val_mae", "val_smse"
                    ], [
                        "train_PSNR",
                        "train_L1",
                        "val_PSNR",
                        "val_L1",
                    ]],
                    save_path=args.loss_log,
                    window=3,
                    name=dt + "cv_%d" % (idx + 1),
                    bound=[{
                        "low": 0.0,
                        "high": 15
                    }, {
                        "low": 10,
                        "high": 50
                    }],
                    titles=["Loss", "Measure"])
        # Clean the data for next cross validation
        del net, optimizer, measure
        args.curr_epoch = 0