コード例 #1
0
def build_model(args, i_cv):
    args.net = ARCHI(n_in=29, n_out=2, n_unit=args.n_unit)
    args.adv_net = ARCHI(n_in=2, n_out=2, n_unit=args.n_unit)
    args.net_optimizer = get_optimizer(args)
    args.adv_optimizer = get_optimizer(args, args.adv_net)
    args.net_criterion = WeightedCrossEntropyLoss()
    args.adv_criterion = WeightedGaussEntropyLoss()
    model = get_model(args, PivotClassifier)
    model.set_info(DATA_NAME, BENCHMARK_NAME, i_cv)
    return model
コード例 #2
0
def build_model(args, i_cv):
    args.net = ARCHI(n_in=29, n_out=2, n_unit=args.n_unit)
    args.optimizer = get_optimizer(args)
    model = get_model(args, Regressor)
    model.base_name = CALIB
    model.set_info(DATA_NAME, BENCHMARK_NAME, i_cv)
    return model
コード例 #3
0
def build_model(args, i_cv):
    args.net = ARCHI(n_in=3, n_out=args.n_bins)
    args.optimizer = get_optimizer(args)
    args.criterion = S3DLoss()
    model = get_model(args, Inferno)
    model.set_info(DATA_NAME, BENCHMARK_NAME, i_cv)
    return model
コード例 #4
0
def main():
    # BASIC SETUP
    logger = set_logger()
    args = REG_parse_args(
        main_description="Training launcher for Regressor on S3D2 benchmark")
    logger.info(args)
    flush(logger)
    # INFO
    args.net = AR5R5E(n_in=3, n_out=2, n_extra=2)
    args.optimizer = get_optimizer(args)
    model = get_model(args, Regressor)
    model.set_info(DATA_NAME, BENCHMARK_NAME, -1)
    pb_config = S3D2Config()

    # RUN
    results = [run(args, i_cv) for i_cv in range(N_ITER)]
    results = pd.concat(results, ignore_index=True)
    results.to_csv(os.path.join(model.results_directory, 'results.csv'))
    # EVALUATION
    eval_table = evaluate_estimator(pb_config.INTEREST_PARAM_NAME, results)
    print_line()
    print_line()
    print(eval_table)
    print_line()
    print_line()
    eval_table.to_csv(os.path.join(model.results_directory, 'evaluation.csv'))
    gather_images(model.results_directory)
コード例 #5
0
def build_model(args, i_cv):
    args.net = ARCHI(n_in=3, n_out=2, n_unit=args.n_unit)
    args.optimizer = get_optimizer(args)
    model = get_model(args, NeuralNetClassifier)
    model.base_name = "DataAugmentation"
    model.set_info(DATA_NAME, BENCHMARK_NAME, i_cv)
    return model
コード例 #6
0
ファイル: BiDAF.py プロジェクト: junzhou94/CS224n-Assignment4
    def add_training_op(self, loss):
        variables = tf.trainable_variables()
        gradients = tf.gradients(loss, variables)
        gradients, _ = tf.clip_by_global_norm(gradients,
                                              self.config.max_grad_norm)

        if self.config.learning_rate_annealing:
            global_step = tf.Variable(0, trainable=False)
            learning_rate = tf.train.exponential_decay(
                self.config.learning_rate,
                global_step,
                1250,
                0.96,
                staircase=False)
            global_step = tf.add(1, global_step)
        else:
            learning_rate = self.config.learning_rate

        optimizer = get_optimizer(self.config.optimizer, learning_rate)
        train_op = optimizer.apply_gradients(zip(gradients, variables))

        # For applying EMA for trained parameters

        if self.config.ema_for_weights:
            ema = tf.train.ExponentialMovingAverage(0.999)
            ema_op = ema.apply(variables)

            with tf.control_dependencies([train_op]):
                train_op = tf.group(ema_op)

        return train_op
コード例 #7
0
def build_model(args, i_cv):
    args.net = ARCHI(n_in=29, n_out=N_BINS, n_unit=args.n_unit)
    args.optimizer = get_optimizer(args)
    args.criterion = HiggsLoss()
    model = get_model(args, Inferno)
    model.set_info(DATA_NAME, BENCHMARK_NAME, i_cv)
    return model
コード例 #8
0
def load_calib_rescale():
    args = lambda: None
    args.n_unit = 80
    args.optimizer_name = "adam"
    args.beta1 = 0.5
    args.beta2 = 0.9
    args.learning_rate = 1e-4
    args.n_samples = 1000
    args.n_steps = 1000
    args.batch_size = 20

    args.net = CALIB_ARCHI(n_in=1, n_out=2, n_unit=args.n_unit)
    args.optimizer = get_optimizer(args)
    model = get_model(args, Regressor)
    model.base_name = CALIB_RESCALE
    model.set_info(DATA_NAME, BENCHMARK_NAME, 0)
    model.load(model.model_path)
    return model
コード例 #9
0
def load_calib_lam(DATA_NAME, BENCHMARK_NAME):
    from model.regressor import Regressor
    from archi.reducer import A1AR8MR8L1 as CALIB_ARCHI
    args = lambda: None
    args.n_unit = 200
    args.optimizer_name = "adam"
    args.beta1 = 0.5
    args.beta2 = 0.9
    args.learning_rate = 1e-4
    args.n_samples = 1000
    args.n_steps = 2000
    args.batch_size = 20

    args.net = CALIB_ARCHI(n_in=3, n_out=2, n_unit=args.n_unit)
    args.optimizer = get_optimizer(args)
    model = get_model(args, Regressor)
    model.base_name = "Calib_lam"
    model.set_info(DATA_NAME, BENCHMARK_NAME, 0)
    model.load(model.model_path)
    return model
コード例 #10
0
def model_fn(features, labels, mode, params):
    if mode == tf.estimator.ModeKeys.TRAIN:
        params.update({"training": True})
    else:
        params.update({"training": False})

    features = get_processed_feature(features)

    output = model_fn_core(features, params)

    if (mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL):
        loss = get_loss(output, labels)
        eval_metric_ops = get_metric(output, labels)
    else:
        loss = None
        eval_metric_ops = None

    if mode == tf.estimator.ModeKeys.TRAIN:
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):  # do not forget this | important for batch_normalization
            train_op = get_optimizer(opt_algo=params["opt_algo"],
                                     learning_rate=params["learning_rate"],
                                     loss=loss,
                                     global_step=tf.train.get_global_step())
    else:
        train_op = None

    if mode == tf.estimator.ModeKeys.PREDICT:
        predictions = get_predict(output)
        export_outputs = get_export_outputs(output)
    else:
        predictions = None
        export_outputs = None

    return tf.estimator.EstimatorSpec(
        mode=mode,
        predictions=predictions,
        loss=loss,
        train_op=train_op,
        export_outputs=export_outputs,
        eval_metric_ops=eval_metric_ops)
コード例 #11
0
ファイル: REG.py プロジェクト: victor-estrade/SystGradDescent
def run(args, i_cv):
    logger = logging.getLogger()
    print_line()
    logger.info('Running iter n°{}'.format(i_cv))
    print_line()

    result_row = {'i_cv': i_cv}
    result_table = []

    # LOAD/GENERATE DATA
    logger.info('Set up data generator')
    pb_config = AP1Config()
    seed = config.SEED + i_cv * 5
    train_generator = Generator(param_generator, AP1(seed))
    valid_generator = AP1(seed + 1)
    test_generator = AP1(seed + 2)

    # SET MODEL
    logger.info('Set up rergessor')
    args.net = F3R3(n_in=1, n_out=2)
    args.optimizer = get_optimizer(args)
    model = get_model(args, Regressor)
    model.set_info(BENCHMARK_NAME, i_cv)
    flush(logger)

    # TRAINING / LOADING
    if not args.retrain:
        try:
            logger.info('loading from {}'.format(model.path))
            model.load(model.path)
        except Exception as e:
            logger.warning(e)
            args.retrain = True
    if args.retrain:
        logger.info('Training {}'.format(model.get_name()))
        model.fit(train_generator)
        logger.info('Training DONE')

        # SAVE MODEL
        save_model(model)

    # CHECK TRAINING
    logger.info('Plot losses')
    plot_REG_losses(model)
    plot_REG_log_mse(model)
    result_row['loss'] = model.losses[-1]
    result_row['mse_loss'] = model.mse_losses[-1]

    # MEASUREMENT
    for mu in pb_config.TRUE_APPLE_RATIO_RANGE:
        pb_config.TRUE_APPLE_RATIO = mu
        logger.info('Generate testing data')
        X_test, y_test, w_test = test_generator.generate(
            apple_ratio=pb_config.TRUE_APPLE_RATIO,
            n_samples=pb_config.N_TESTING_SAMPLES)

        pred, sigma = model.predict(X_test, w_test)
        name = pb_config.INTEREST_PARAM_NAME
        result_row[name] = pred
        result_row[name + _ERROR] = sigma
        result_row[name + _TRUTH] = pb_config.TRUE_APPLE_RATIO

        logger.info('{} =vs= {} +/- {}'.format(pb_config.TRUE_APPLE_RATIO,
                                               pred, sigma))
        result_table.append(result_row.copy())
    result_table = pd.DataFrame(result_table)

    logger.info('Plot params')
    param_names = pb_config.PARAM_NAMES
    for name in param_names:
        plot_params(name, result_table, model)

    logger.info('DONE')
    return result_table
コード例 #12
0
def run(args, i_cv):
    logger = logging.getLogger()
    print_line()
    logger.info('Running iter n°{}'.format(i_cv))
    print_line()

    result_row = {'i_cv': i_cv}
    result_table = []

    # LOAD/GENERATE DATA
    logger.info('Set up data generator')
    pb_config = S3D2Config()
    seed = config.SEED + i_cv * 5
    train_generator = S3D2(seed)
    valid_generator = S3D2(seed + 1)
    test_generator = S3D2(seed + 2)

    # SET MODEL
    logger.info('Set up rergessor')
    args.net = AR5R5E(n_in=3, n_out=2, n_extra=2)
    args.optimizer = get_optimizer(args)
    model = get_model(args, Regressor)
    model.set_info(BENCHMARK_NAME, i_cv)
    model.param_generator = param_generator
    flush(logger)

    # TRAINING / LOADING
    if not args.retrain:
        try:
            logger.info('loading from {}'.format(model.model_path))
            model.load(model.model_path)
        except Exception as e:
            logger.warning(e)
            args.retrain = True
    if args.retrain:
        logger.info('Training {}'.format(model.get_name()))
        model.fit(train_generator)
        logger.info('Training DONE')

        # SAVE MODEL
        save_model(model)

    # CHECK TRAINING
    logger.info('Plot losses')
    plot_REG_losses(model)
    plot_REG_log_mse(model)
    result_row['loss'] = model.losses[-1]
    result_row['mse_loss'] = model.mse_losses[-1]

    # MEASUREMENT
    for mu in pb_config.TRUE_MU_RANGE:
        pb_config.TRUE_MU = mu
        logger.info('Generate testing data')
        test_generator.reset()
        X_test, y_test, w_test = test_generator.generate(
            # pb_config.TRUE_R,
            # pb_config.TRUE_LAMBDA,
            pb_config.CALIBRATED_R,
            pb_config.CALIBRATED_LAMBDA,
            pb_config.TRUE_MU,
            n_samples=pb_config.N_TESTING_SAMPLES)

        p_test = np.array(
            (pb_config.CALIBRATED_R, pb_config.CALIBRATED_LAMBDA))

        pred, sigma = model.predict(X_test, w_test, p_test)
        name = pb_config.INTEREST_PARAM_NAME
        result_row[name] = pred
        result_row[name + _ERROR] = sigma
        result_row[name + _TRUTH] = pb_config.TRUE_MU
        logger.info('{} =vs= {} +/- {}'.format(pb_config.TRUE_MU, pred, sigma))
        result_table.append(result_row.copy())
    result_table = pd.DataFrame(result_table)

    logger.info('Plot params')
    name = pb_config.INTEREST_PARAM_NAME
    plot_params(name,
                result_table,
                title=model.full_name,
                directory=model.results_path)

    logger.info('DONE')
    return result_table
コード例 #13
0
def build_model(args, i_cv):
    args.net = ARCHI(n_in=1, n_out=2, n_unit=args.n_unit)
    args.optimizer = get_optimizer(args)
    model = get_model(args, TangentPropClassifier)
    model.set_info(DATA_NAME, BENCHMARK_NAME, i_cv)
    return model
コード例 #14
0
def build_model(args, i_cv):
    args.net = ARCHI(n_in=1, n_out=2, n_unit=args.n_unit)
    args.optimizer = get_optimizer(args)
    model = get_model(args, NeuralNetClassifier)
    model.set_info(DATA_NAME, f"{BENCHMARK_NAME}/learning_curve", i_cv)
    return model
コード例 #15
0
def build_model(args, i_cv):
    args.net = ARCHI(n_in=29, n_out=2, n_extra=3, n_unit=args.n_unit)
    args.optimizer = get_optimizer(args)
    model = get_model(args, FilterRegressor)
    model.set_info(DATA_NAME, BENCHMARK_NAME, i_cv)
    return model
コード例 #16
0
def build_model(args, i_cv):
    args.net = ARCHI(n_in=31, n_out=2, n_unit=args.n_unit)
    args.optimizer = get_optimizer(args)
    model = get_model(args, Model)
    model.set_info(DATA_NAME, BENCHMARK_NAME, i_cv)
    return model
コード例 #17
0
    def train(self, train_loader, dev_loader, eval_loader):
        self._init_embeddings(self.token2id)
        # 对数据集按照长度进行排序

        epoch = 1  # epoch
        patience_count = 0
        start = time.time()
        while True:
            self.step = 0
            losses = 0.
            if epoch == self.embedding_le:
                print("Start Learning Embedding!")
                self.optimizer = change_embedding_lr(self.optimizer,
                                                     self.embedding_lr)

            for articles, tags, lengths in train_loader:
                losses += self.train_step(articles, tags, lengths)
                if self.step % TrainingConfig.print_step == 0:
                    total_step = len(train_loader)
                    print(
                        "Epoch {}, step/total_step: {}/{} {:.2f}% Loss:{:.4f}".
                        format(epoch, self.step, total_step,
                               100. * self.step / total_step,
                               losses / self.print_step))
                    losses = 0.

            # 每轮结束测试在验证集上的性能,保存最好的一个
            val_loss = self.validate(dev_loader)
            print("Epoch {}, Val Loss:{:.4f}".format(epoch, val_loss))

            # 计算验证集上的f1分数,保存最好的一个
            metrics = self.cal_scores(eval_loader)
            metrics.report()
            if metrics.final_f1 > self.best_f1_score:
                print("更新f1并保存模型...")
                self.best_metrics = metrics
                self.best_f1_model = copy.deepcopy(self.model)
                assert id(self.best_f1_model) != id(self.model)  # 确认有复制
                self.best_f1_score = metrics.final_f1
                patience_count = 0  # 有改进,patience_count清零
            else:
                patience_count += 1

                # 如果连续lr_patience个回合f1分数下降,将当前模型换成best_f1_model
                # 并且重新初始化学习器 ,降低学习率为原来的一半
                if patience_count >= self.lr_patience:
                    print("Reduce Learning Rate....")
                    self.embedding_lr *= self.lr_decay
                    self.lr *= self.lr_decay
                    self.optimizer = get_optimizer(self.model,
                                                   self.embedding_lr, self.lr)
                    self.model = copy.deepcopy(self.best_f1_model)

                # 如果连续patience个回合分数下降,则结束
                if patience_count >= self.patience:
                    end = time.time()
                    print("在Epoch {} 训练终止,用时: {}".format(
                        epoch, time_format(end - start)))
                    break  # 终止训练

            epoch += 1
コード例 #18
0
    def __init__(self, vocab_size, out_size, token2id, tag2id, method="lstm"):
        """功能:对LSTM的模型进行训练与测试
           参数:
            vocab_size:词典大小
            out_size:标注种类
            method: 三种方法,["lstm", "lstm_crf", "lstm_lstm"]
            crf选择是否添加CRF层"""
        self.method = method
        self.out_size = out_size
        self.token2id = token2id
        self.tag2id = tag2id
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")

        assert method in ["lstm", "lstm_crf", "lstm_lstm"]

        # 加载模型参数
        self.emb_size = LSTMConfig.emb_size
        self.max_len = TrainingConfig.max_len
        self.clip = TrainingConfig.clip

        # 根据是否添加crf初始化不同的模型 选择不一样的损失计算函数
        if method == "lstm":
            self.hidden_size = LSTMConfig.hidden_size
            self.model = BiLSTM(vocab_size, self.emb_size, self.hidden_size,
                                out_size).to(self.device)
            self.alpha = TrainingConfig.alpha
            self.cal_loss_func = cal_weighted_loss
        elif method == "lstm_crf":
            self.hidden_size = LSTMConfig.hidden_size
            self.model = BiLSTM_CRF(vocab_size, self.emb_size,
                                    self.hidden_size, out_size).to(self.device)
            self.cal_loss_func = cal_lstm_crf_loss
        elif method == "lstm_lstm":
            self.enc_hsize = LSTMConfig.enc_hidden_size
            self.dec_hsize = LSTMConfig.dec_hidden_size
            self.dropout_p = LSTMConfig.dropout_p
            self.model = DoubleLSTM(vocab_size, self.emb_size, self.enc_hsize,
                                    self.dec_hsize, out_size, self.dropout_p)
            self.alpha = TrainingConfig.alpha
            self.cal_loss_func = cal_weighted_loss

        self.model = self.model.to(self.device)

        # 加载训练参数:
        self.patience = TrainingConfig.training_patience
        self.print_step = TrainingConfig.print_step
        self.lr = TrainingConfig.lr
        self.lr_patience = TrainingConfig.lr_patience
        self.lr_decay = TrainingConfig.lr_decay
        self.batch_size = TrainingConfig.batch_size
        self.decoding_batch_size = TrainingConfig.decoding_batch_size

        self.embedding_lr = TrainingConfig.embedding_lr
        self.embedding_le = TrainingConfig.embedding_learning_epoch
        self.token_method = TrainingConfig.token_method

        # 初始化优化器 一开始embedding层学习速率设为0
        self.optimizer = get_optimizer(self.model, 0.0, self.lr)
        # 初始化其他指标
        self.step = 0
        self.best_val_loss = 1e18
        self.best_model = None

        # 最好的分数(f1分数)
        self.best_metrics = None
        self.best_f1_model = None
        self.best_f1_score = 0.