示例#1
0
def train(steps=400, evaluate_size=None):
    func.use_last_gpu()
    opt = make_options()
    model, optimizer, feeder, ckpt = models.load_or_create_models(opt, True)
    autodecay = optimization.AutoDecay(optimizer, patience=30, max_lr=opt.learning_rate)
    log = Logger(opt)
    if ckpt is not None:
        _, last_accuracy = evaluate.evaluate_accuracy(model, feeder.dataset, batch_size=opt.batch_size, char_limit=opt.char_limit, size=evaluate_size)
    else:
        last_accuracy = 0
    while True:
        run_epoch(opt, model, feeder, optimizer, steps)
        em, accuracy = evaluate.evaluate_accuracy(model, feeder.dataset, batch_size=opt.validate_batch_size, char_limit=opt.char_limit, size=evaluate_size)
        if accuracy > last_accuracy:
            models.save_models(opt, model, optimizer, feeder)
            last_accuracy = accuracy
            autodecay.better()
            log('MODEL SAVED WITH ACCURACY EM:{:>.2F}, F1:{:>.2F}.'.format(em, accuracy))
        else:
            autodecay.worse()
            if autodecay.should_stop():
                models.restore(opt, model, optimizer, feeder)
                autodecay = optimization.AutoDecay(optimizer, max_lr=opt.learning_rate)
                log(f'MODEL RESTORED {accuracy:>.2F}/{last_accuracy:>.2F}, decay = {autodecay.decay_counter}, lr = {autodecay.learning_rate:>.6F}.')
            else:
                log(f'CONTINUE TRAINING {accuracy:>.2F}/{last_accuracy:>.2F}, decay = {autodecay.decay_counter}, lr = {autodecay.learning_rate:>.6F}.')
示例#2
0
def main():
    # loading data
    train_input, train_target, train_class, test_input, test_target, test_class = prologue.generate_pair_sets(
        1000)

    # if you wish to train an optimized CNN model
    # just uncomment this and comment the other model
    '''
    temp_train_input,temp_train_target,temp_train_class=init.data_init(train_input,train_target, train_class, num=1000)
    model_cnn=model.ConvNet_2(bn=True)
    model_cnn.apply(init.weights_init)
    model_cnn,train_loss_cnn,train_accuracy_cnn, test_loss_cnn,test_accuracy_cnn=\
           train.train_model(model_cnn,\
                       temp_train_input,temp_train_target,\
                       test_input, test_target,\
                       if_print=True, epochs=45,
                       optim='Adam', learning_rate=0.01)
    evaluate.evaluate_result(train_loss_cnn,train_accuracy_cnn, test_loss_cnn,test_accuracy_cnn, \
               'Learning Curve of Optimized CNN')
   '''

    # if you wish to train a model with weight sharing and auxiliary loss
    # just uncomment this and comment the other model
    '''
    temp_train_input,temp_train_target,temp_train_class=init.data_init(train_input,train_target, train_class, num=1000)
    model_ws_al=model.ConvNet_WS()
    model_ws_al.apply(init.weights_init)
    model_ws_al,train_loss_ws_al,train_accuracy_ws_al, test_loss_ws_al,test_accuracy_ws_al=\
                                  train.train_model_WS(model_ws_al, \
                                                       temp_train_input,temp_train_target,temp_train_class,\
                                                       test_input, test_target, test_class,\
                                                       optim='Adam', decay=True,
                                                       if_auxiliary_loss=True,epochs=32,if_print=True,
                                                       auxiliary_loss_ratio=5)
    evaluate.evaluate_result(train_loss_ws_al,train_accuracy_ws_al, test_loss_ws_al,test_accuracy_ws_al,\
                'Learning Curve of Second ConvNet with Weight Sharing and Auxiliart Loss') 
                
    
                

    
    '''
    # training data random initialization
    temp_train_input, temp_train_target, temp_train_class = init.data_init(
        train_input, train_target, train_class, num=1000)
    # import the model
    model_digit = model.CNN_digit()
    # model weight initialization
    model_digit.apply(init.weights_init)
    # get the training history of the model with input hyperparameters
    _,_,_,train_accuracy_from_digit,_,_,test_accuracy_from_digit=train.train_by_digit(model_digit,\
                                                                                      temp_train_input,temp_train_target,temp_train_class, \
                                                                                      test_input, test_target, test_class, \
                                                                                      if_print=True, \
                                                                                      epochs=25,optim='Adam',learning_rate=0.01)
    # plot the learning curves and print the accuracy on testing set
    evaluate.evaluate_accuracy(train_accuracy_from_digit,test_accuracy_from_digit,\
                               'Accuracy of Boolean Classification from CNN Trained Directly on Digit Class')
示例#3
0
def main():

    current_th = min_th
    rate_list_auto = []
    interval_list = []
    rate_list_lin = []

    while current_th < max_th:

        loader = data_loader.DataLoader(validation_folder)
        all_ious = []
        all_itervals = []
        while (True):
            imgs, gts = loader.get_next()

            if imgs == None:
                break
            # Do the auto tracking
            pred_auto = methods.auto_select(imgs, gts, stride=current_th)
            iou, est_interval = evaluate.evaluate_estimation_iou(
                pred_auto, gts)
            # evaluate the system
            all_ious += iou
            all_itervals.append(est_interval)

            rate_list_auto.append(evaluate.evaluate_accuracy(iou, accuracy_th))

            interval_list.append(1. / est_interval)

            pred_lin = methods.linear_annotation(imgs, gts, stride=current_th)
            iou, est_interval = evaluate.evaluate_estimation_iou(pred_lin, gts)

            rate_list_lin.append(evaluate.evaluate_accuracy(iou, accuracy_th))

            print("Processed data point - ", len(rate_list_auto))

            visualize.visualize_video(imgs, pred_lin, pred_auto, gts)
        current_th += inter_th

        print("Evaluating for TH = ", current_th)

    pickle.dump([rate_list_lin, rate_list_auto, interval_list],
                open("save2f.p", "wb"))
示例#4
0
def train(auto_stop, steps=200, evaluate_size=500):
    opt = make_options()
    generator, discriminator, g_optimizer, d_optimizer, feeder, ckpt = models.load_or_create_models(
        opt, True)
    if ckpt is not None:
        last_accuracy = evaluate.evaluate_accuracy(generator,
                                                   feeder.dataset,
                                                   size=evaluate_size)
    else:
        last_accuracy = 0
    while True:
        if opt.using_gan == 1:
            mini_steps = steps // 10
            state = feeder.state()
            run_gan_epoch(opt, generator, discriminator, feeder, d_optimizer,
                          mini_steps, 'discriminator')
            feeder.load_state(state)
            run_gan_epoch(opt, generator, discriminator, feeder, g_optimizer,
                          mini_steps * 9, 'generator')
        else:
            run_epoch(opt, generator, feeder, g_optimizer, steps)
        accuracy = evaluate.evaluate_accuracy(generator,
                                              feeder.dataset,
                                              size=evaluate_size)
        if accuracy > last_accuracy:
            utils.mkdir(config.checkpoint_folder)
            models.save_models(opt, generator, discriminator, g_optimizer,
                               d_optimizer, feeder)
            last_accuracy = accuracy
            print('MODEL SAVED WITH ACCURACY {:>.2F}.'.format(accuracy))
        else:
            if random.randint(0, 4) == 0:
                models.restore(generator, discriminator, g_optimizer,
                               d_optimizer)
                print('MODEL RESTORED {:>.2F}/{:>.2F}.'.format(
                    accuracy, last_accuracy))
            else:
                print('CONTINUE TRAINING {:>.2F}/{:>.2F}.'.format(
                    accuracy, last_accuracy))
示例#5
0
def eval_epoch(args, logger, writer, model, data_type, data_loader, device,
               epoch):
    model.eval()
    epoch_step = len(data_loader)
    total_step = args.epochs * epoch_step
    total_cnt = 0
    total_ce = 0.0
    total_mlce = 0.0
    total_loss = 0.0

    results = {
        "data": {
            "id": list(),
            "relation": list(),
            "prefered_relation": list()
        },
        "prediction": {
            "prob": list(),
            "pred": list()
        },
        "error": {
            "ce": list(),
            "mlce": list(),
            "mean_ce": INF,
            "mean_mlce": INF
        },
        "evaluation": {
            "accuracy": dict(),
            "precision_recall_f1": dict()
        }
    }

    with torch.no_grad():
        for batch_id, batch in enumerate(data_loader):
            step = epoch * epoch_step + batch_id
            _id, arg1, arg1_mask, arg2, arg2_mask, relation, prefered_relation = batch
            prefered_relation = (relation[:, 1] >= 0.5).long()
            bsz = len(_id)
            total_cnt += bsz

            results["data"]["id"].extend(_id)
            results["data"]["relation"].extend(relation)
            results["data"]["prefered_relation"].extend(prefered_relation)

            arg1 = arg1.to(device)
            arg2 = arg2.to(device)
            if arg1_mask is not None:
                arg1_mask = arg1_mask.to(device)
            if arg2_mask is not None:
                arg2_mask = arg2_mask.to(device)
            relation = relation.to(device)
            prefered_relation = prefered_relation.to(device)

            output = model(arg1, arg2, arg1_mask, arg2_mask)
            logp = F.log_softmax(output, dim=-1)
            prob = logp.exp()

            results["prediction"]["prob"].extend(prob.cpu().detach())
            results["prediction"]["pred"].extend(
                prob.cpu().argmax(dim=1).detach())

            ce = F.nll_loss(logp, prefered_relation, reduction="none")
            mlce = F.multilabel_soft_margin_loss(output,
                                                 relation,
                                                 reduction="none")

            results["error"]["ce"].extend(ce.cpu().detach())
            results["error"]["mlce"].extend(mlce.cpu().detach())

            if args.loss == "ce":
                loss = ce
            elif args.loss == "mlce":
                loss = mlce
            else:
                raise NotImplementedError(
                    "Error: loss=%s is not supported now." % (args.loss))

            avg_ce = ce.mean()
            avg_mlce = mlce.mean()
            avg_loss = loss.mean()

            total_ce += avg_ce.item() * bsz
            total_mlce += avg_mlce.item() * bsz
            total_loss += avg_loss.item() * bsz

            if writer:
                writer.add_scalar("%s/pdtb-loss" % (data_type),
                                  avg_loss.item(), step)
                writer.add_scalar("%s/pdtb-ce" % (data_type), avg_ce.item(),
                                  step)
                writer.add_scalar("%s/pdtb-mlce" % (data_type),
                                  avg_mlce.item(), step)
            if logger and batch_id == epoch_step - 1:
                logger.info(
                    "epoch: {:0>3d}/{:0>3d}\tdata_type: {:<5s}\tbatch: {:0>5d}/{:0>5d}"
                    .format(epoch, args.epochs, data_type, batch_id,
                            epoch_step) + "\n" +
                    "\tpdtb-loss: {:10.4f}\tpdtb-ce: {:10.4f}\tpdtb-mlce: {:10.4f}"
                    .format(avg_loss.item(), avg_ce.item(), avg_mlce.item()) +
                    "\n" +
                    "\tpdtb-gold: {}".format(results["data"]["relation"][-1]) +
                    "\n" +
                    "\tpdtb-pred: {}".format(results["prediction"]["prob"][-1])
                )

        mean_ce = total_ce / (total_cnt + 1e-6)
        mean_mlce = total_mlce / (total_cnt + 1e-6)
        mean_loss = total_loss / (total_cnt + 1e-6)

        pred = np.array(results["prediction"]["pred"])
        target = torch.cat(results["data"]["relation"],
                           dim=0).view(total_cnt, -1).int().numpy()
        prefered_target = np.array(results["data"]["prefered_relation"])

        results["error"]["mean_ce"] = mean_ce
        results["error"]["mean_mlce"] = mean_mlce
        results["evaluation"]["accuracy"] = evaluate_accuracy(
            pred, target, prefered_target)
        results["evaluation"][
            "precision_recall_f1"] = evaluate_precision_recall_f1(
                pred, target, prefered_target, "binary")

        if writer:
            writer.add_scalar("%s/pdtb-loss-epoch" % (data_type), mean_loss,
                              epoch)
            writer.add_scalar("%s/pdtb-ce-epoch" % (data_type), mean_ce, epoch)
            writer.add_scalar("%s/pdtb-mlce-epoch" % (data_type), mean_mlce,
                              epoch)

        if logger:
            logger.info(
                "epoch: {:0>3d}/{:0>3d}\tdata_type: {:<5s}".format(
                    epoch, args.epochs, data_type) + "\n" +
                "\tpdtb-loss-epoch: {:10.4f}\tpdtb-ce-epoch: {:10.4f}\tpdtb-mlce-epoch: {:10.4f}"
                .format(mean_loss, mean_ce, mean_mlce) + "\n" +
                "\tpdtb-accuray: {}".format(
                    pprint.pformat(results["evaluation"]["accuracy"]).replace(
                        "\n", "\n\t\t")) + "\n" +
                "\tpdtb-precision_recall_f1: {}".format(
                    pprint.pformat(results["evaluation"]["precision_recall_f1"]
                                   ).replace("\n", "\n\t\t")))
    gc.collect()
    return mean_loss, results