Exemple #1
0
 def __init__(self, cfg_file, checkpoint=False):
     super(Model, self).__init__()
     self.checkpoint = checkpoint
     self.feature_layers, self.classifier = create_model(cfg_file)
     self.dummy_tensor = torch.ones(1,
                                    dtype=torch.float32,
                                    requires_grad=True)
Exemple #2
0
def evaluate_line():
    # 加载意图识别模型

    id_to_cat = get_id_to_cat('{}/categories.txt'.format(data_path))

    print(
        "==========================Loading the Intention Classification model....=========================="
    )
    model_1 = ImportGraph('{}/model_cnn'.format(model_path))
    model_2 = ImportGraph('{}/model_rnn'.format(model_path))
    print("Model loaded..")
    flag = 0

    # 加载命名实体识别模型
    print(
        "==========================Loading the NER model....=========================="
    )
    config = load_config(FLAGS.config_file)
    logger = get_logger(FLAGS.log_file)
    # limit GPU memory
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    with open(FLAGS.map_file, "rb") as f:
        char_to_id, id_to_char, tag_to_id, id_to_tag = pickle.load(f)
    with tf.Session(config=tf_config) as sess:
        model = create_model(sess, Model, FLAGS.ckpt_path, load_word2vec,
                             config, id_to_char, logger)

        # 循环识别

        while True:
            # try:
            #     line = input("请输入测试句子:")
            #     result = model.evaluate_line(sess, input_from_line(line, char_to_id), id_to_tag)
            #     print(result)
            # except Exception as e:
            #     logger.info(e)

            # 获取测试句子
            text = input("请输入要进行识别的句子:")

            # intent 识别
            id_text = process_text(text, '{}/vocab.txt'.format(data_path))
            pred_1 = model_1.run(id_text, 1.0)
            pred_2 = model_2.run(id_text, 1.0)
            pred = pred_1 + pred_2
            res = id_to_cat[int(np.argmax(pred))]
            print(res)

            # NER 识别
            result = model.evaluate_line(sess,
                                         input_from_line(text, char_to_id),
                                         id_to_tag)
            print(result)
def evaluate_line():
    config = load_config(args.config_file)
    logger = get_logger(args.log_file)
    # limit GPU memory 限制GPU的内存大小
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    with open(args.map_file, "rb") as f:
        char_to_id, id_to_char, tag_to_id, id_to_tag, intent_to_id, id_to_intent = pickle.load(
            f)
    with tf.Session(config=tf_config) as sess:
        model = create_model(sess, Model, args.ckpt_path, load_word2vec,
                             config, id_to_char, logger)
        while True:
            try:
                line = input("请输入测试句子:")
                result = model.evaluate_line(sess,
                                             input_from_line(line, char_to_id),
                                             id_to_tag, id_to_intent)
                print(result)
            except Exception as e:
                logger.info(e)
def evaluate_test():
    config = load_config(args.config_file)
    logger = get_logger(args.log_file)

    with open(args.map_file, "rb") as f:
        char_to_id, id_to_char, tag_to_id, id_to_tag, intent_to_id, id_to_intent = pickle.load(
            f)

    test_sentences = load_sentences(args.test_file, args.lower, args.zeros)
    update_tag_scheme(test_sentences, args.tag_schema)
    test_data = prepare_dataset(test_sentences, char_to_id, tag_to_id,
                                intent_to_id, args.lower)
    test_manager = BatchManager(test_data, 100)

    # limit GPU memory 限制GPU的内存大小
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    with tf.Session(config=tf_config) as sess:
        model = create_model(sess, Model, args.ckpt_path, load_word2vec,
                             config, id_to_char, logger)

        evaluate(sess, model, "test", test_manager, id_to_tag, logger)
Exemple #5
0
def evaluate_line_ner():
    config = load_config(FLAGS.config_file)
    logger = get_logger(FLAGS.log_file)
    # limit GPU memory
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    with open(FLAGS.map_file, "rb") as f:
        char_to_id, id_to_char, tag_to_id, id_to_tag = pickle.load(f)
    with tf.Session(config=tf_config) as sess:
        model = create_model(sess, Model, FLAGS.ckpt_path, load_word2vec,
                             config, id_to_char, logger)
        while True:
            # try:
            #     line = input("请输入测试句子:")
            #     result = model.evaluate_line(sess, input_from_line(line, char_to_id), id_to_tag)
            #     print(result)
            # except Exception as e:
            #     logger.info(e)

            line = input("请输入测试句子:")
            result = model.evaluate_line(sess,
                                         input_from_line(line, char_to_id),
                                         id_to_tag)
            print(result)
Exemple #6
0
def train_ner():
    clean(FLAGS)
    # load data sets
    train_sentences = load_sentences(FLAGS.train_file, FLAGS.lower,
                                     FLAGS.zeros)
    dev_sentences = load_sentences(FLAGS.dev_file, FLAGS.lower, FLAGS.zeros)
    test_sentences = load_sentences(FLAGS.test_file, FLAGS.lower, FLAGS.zeros)

    # Use selected tagging scheme (IOB / IOBES)
    update_tag_scheme(train_sentences, FLAGS.tag_schema)
    update_tag_scheme(test_sentences, FLAGS.tag_schema)
    update_tag_scheme(dev_sentences, FLAGS.tag_schema)

    # create maps if not exist
    if not os.path.isfile(FLAGS.map_file):
        # create dictionary for word
        if FLAGS.pre_emb:
            dico_chars_train = char_mapping(train_sentences, FLAGS.lower)[0]
            dico_chars, char_to_id, id_to_char = augment_with_pretrained(
                dico_chars_train.copy(), FLAGS.emb_file,
                list(
                    itertools.chain.from_iterable([[w[0] for w in s]
                                                   for s in test_sentences])))
        else:
            _c, char_to_id, id_to_char = char_mapping(train_sentences,
                                                      FLAGS.lower)

        # Create a dictionary and a mapping for tags
        _t, tag_to_id, id_to_tag = tag_mapping(train_sentences)
        with open(FLAGS.map_file, "wb") as f:
            pickle.dump([char_to_id, id_to_char, tag_to_id, id_to_tag], f)
    else:
        with open(FLAGS.map_file, "rb") as f:
            char_to_id, id_to_char, tag_to_id, id_to_tag = pickle.load(f)

    # prepare data, get a collection of list containing index
    train_data = prepare_dataset(train_sentences, char_to_id, tag_to_id,
                                 FLAGS.lower)
    dev_data = prepare_dataset(dev_sentences, char_to_id, tag_to_id,
                               FLAGS.lower)
    test_data = prepare_dataset(test_sentences, char_to_id, tag_to_id,
                                FLAGS.lower)
    print("%i / %i / %i sentences in train / dev / test." %
          (len(train_data), 0, len(test_data)))

    train_manager = BatchManager(train_data, FLAGS.batch_size)
    dev_manager = BatchManager(dev_data, 100)
    test_manager = BatchManager(test_data, 100)
    # make path for store log and model if not exist
    make_path(FLAGS)
    if os.path.isfile(FLAGS.config_file):
        config = load_config(FLAGS.config_file)
    else:
        config = config_model(char_to_id, tag_to_id)
        save_config(config, FLAGS.config_file)
    make_path(FLAGS)

    log_path = os.path.join("log", FLAGS.log_file)
    logger = get_logger(log_path)
    print_config(config, logger)

    # limit GPU memory
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    steps_per_epoch = train_manager.len_data
    with tf.Session(config=tf_config) as sess:
        model = create_model(sess, Model, FLAGS.ckpt_path, load_word2vec,
                             config, id_to_char, logger)
        logger.info("start training")
        loss = []
        for i in range(25):
            for batch in train_manager.iter_batch(shuffle=True):
                step, batch_loss = model.run_step(sess, True, batch)
                loss.append(batch_loss)
                if step % FLAGS.steps_check == 0:
                    iteration = step // steps_per_epoch + 1
                    logger.info("iteration:{} step:{}/{}, "
                                "NER loss:{:>9.6f}".format(
                                    iteration, step % steps_per_epoch,
                                    steps_per_epoch, np.mean(loss)))
                    loss = []

            best = evaluate(sess, model, "dev", dev_manager, id_to_tag, logger)
            if best:
                save_model(sess, model, FLAGS.ckpt_path, logger)
            evaluate(sess, model, "test", test_manager, id_to_tag, logger)
def train():
    # load data sets
    train_sentences = load_sentences(args.train_file, args.lower, args.zeros)
    dev_sentences = load_sentences(args.dev_file, args.lower, args.zeros)
    test_sentences = load_sentences(args.test_file, args.lower, args.zeros)

    # Use selected tagging scheme (IOB / IOBES)
    # 检测并维护数据集的 tag 标记
    update_tag_scheme(train_sentences, args.tag_schema)
    update_tag_scheme(test_sentences, args.tag_schema)
    update_tag_scheme(dev_sentences, args.tag_schema)

    # create maps if not exist
    # 根据数据集创建 char_to_id, id_to_char, tag_to_id, id_to_tag 字典,并储存为 pkl 文件
    if not os.path.isfile(args.map_file):
        # create dictionary for word
        if args.pre_emb:
            dico_chars_train = char_mapping(train_sentences, args.lower)[0]
            # 利用预训练嵌入集增强(扩充)字符字典,然后返回字符与位置映射关系
            dico_chars, char_to_id, id_to_char = augment_with_pretrained(
                dico_chars_train.copy(), args.emb_file,
                list(
                    itertools.chain.from_iterable([[w[0] for w in s]
                                                   for s in test_sentences])))
        else:
            _c, char_to_id, id_to_char = char_mapping(train_sentences,
                                                      args.lower)

        # Create a dictionary and a mapping for tags
        # 获取标记与位置映射关系
        tag_to_id, id_to_tag, intent_to_id, id_to_intent = tag_mapping(
            train_sentences)

        with open(args.map_file, "wb") as f:
            pickle.dump([
                char_to_id, id_to_char, tag_to_id, id_to_tag, intent_to_id,
                id_to_intent
            ], f)
    else:
        with open(args.map_file, "rb") as f:
            char_to_id, id_to_char, tag_to_id, id_to_tag, intent_to_id, id_to_intent = pickle.load(
                f)

    # 提取句子特征
    # prepare data, get a collection of list containing index
    train_data = prepare_dataset(train_sentences, char_to_id, tag_to_id,
                                 intent_to_id, args.lower)
    dev_data = prepare_dataset(dev_sentences, char_to_id, tag_to_id,
                               intent_to_id, args.lower)
    test_data = prepare_dataset(test_sentences, char_to_id, tag_to_id,
                                intent_to_id, args.lower)

    # code.interact(local=locals())

    print("%i / %i / %i sentences in train / dev / test." %
          (len(train_data), len(dev_data), len(test_data)))

    # 获取可供模型训练的单个批次数据
    train_manager = BatchManager(train_data, args.batch_size)
    dev_manager = BatchManager(dev_data, 100)
    test_manager = BatchManager(test_data, 100)

    # make path for store log and model if not exist
    make_path(args)
    if os.path.isfile(args.config_file):
        config = load_config(args.config_file)
    else:
        config = config_model(char_to_id, tag_to_id, intent_to_id)
        save_config(config, args.config_file)
    make_path(args)

    logger = get_logger(args.log_file)
    print_config(config, logger)

    # limit GPU memory
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True

    # 训练集全量跑一次需要迭代的次数
    steps_per_epoch = train_manager.len_data

    with tf.Session(config=tf_config) as sess:
        # 此处模型创建为项目最核心代码
        model = create_model(sess, Model, args.ckpt_path, load_word2vec,
                             config, id_to_char, logger)
        logger.info("start training")
        loss_slot = []
        loss_intent = []

        # with tf.device("/gpu:0"):
        for i in range(100):
            for batch in train_manager.iter_batch(shuffle=True):
                step, batch_loss_slot, batch_loss_intent = model.run_step(
                    sess, True, batch)
                loss_slot.append(batch_loss_slot)
                loss_intent.append(batch_loss_intent)

                if step % args.steps_check == 0:
                    iteration = step // steps_per_epoch + 1
                    logger.info("iteration:{} step:{}/{}, "
                                "INTENT loss:{:>9.6f}, "
                                "NER loss:{:>9.6f}".format(
                                    iteration, step % steps_per_epoch,
                                    steps_per_epoch, np.mean(loss_intent),
                                    np.mean(loss_slot)))
                    loss_slot = []
                    loss_intent = []

            best = evaluate(sess, model, "dev", dev_manager, id_to_tag, logger)
            if best:
                # if i%7 == 0:
                save_model(sess, model, args.ckpt_path, logger)
        evaluate(sess, model, "test", test_manager, id_to_tag, logger)
Exemple #8
0
def main():

    parser = argparse.ArgumentParser(description='predict alaska2')
    parser.add_argument('--result_path', type=str)
    parser.add_argument('--submission_file', type=str)
    parser.add_argument('--batchsize', type=int, default=4)
    parser.add_argument('--num_workers', type=int, default=4)
    parser.add_argument('--TTA', action='store_true')
    args = parser.parse_args()

    # get config
    config = Config()

    # create model
    model = utils.create_model(config)

    #load model
    state = torch.load(os.path.join(args.result_path, 'best_snapshot'))
    #print(type(state['models']['main']))
    model.load_state_dict(state['models']['main'])

    device = 'cuda'
    model.cuda()
    model.eval()

    # predict transform
    pred_trans = transforms.eval_transform(resize=(config.input_size_h,
                                                   config.input_size_w),
                                           normalize=config.normalize)

    tta = [False]
    if args.TTA:
        tta.extend(['hflip', 'vflip'])

    for t in tta:
        # create dataset
        pred_dataset = ALASKA2TestDataset(config.data, pred_trans, TTA=t)

        # create data loader
        pred_loader = torch.utils.data.DataLoader(pred_dataset,
                                                  batch_size=args.batchsize,
                                                  num_workers=args.num_workers,
                                                  shuffle=False,
                                                  pin_memory=True)

        result = {'Id': [], 'Label': []}
        for img_names, imgs in pred_loader:
            pred = model(imgs.cuda())
            pred = 1 - torch.nn.functional.softmax(
                pred, dim=1).data.cpu().numpy()[:, 0]

            result['Id'].extend(img_names)
            result['Label'].extend(pred)

        if t == 'hflip' or t == 'vflip':
            submission = pd.merge(submission,
                                  pd.DataFrame(result),
                                  how='inner',
                                  on='Id')
        else:
            submission = pd.DataFrame(result)

    if args.TTA:
        print(submission.columns)
        submission['Label'] = (submission['Label_x'] + submission['Label_y'] +
                               submission['Label']) / 3
        submission = submission[['Id', 'Label']]

    submission.to_csv(os.path.join(args.result_path, args.submission_file),
                      index=False)
Exemple #9
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument('--snapshot', type=str)
    parser.add_argument('--snapmodel', type=str)
    args = parser.parse_args()

    # get config
    config = Config()

    # set seed
    random.seed(config.seed)
    torch.manual_seed(config.seed)
    torch.cuda.manual_seed(config.seed)
    torch.backends.cudnn.deterministic = True

    # create model
    model = utils.create_model(config)
    device = 'cuda'
    model.cuda()

    # define transforms
    train_trans = transforms.train_transform(resize=(config.input_size_h,
                                                     config.input_size_w),
                                             normalize=config.normalize)
    val_trans = transforms.eval_transform(resize=(config.input_size_h,
                                                  config.input_size_w),
                                          normalize=config.normalize)

    # copy config and src
    if not os.path.exists(os.path.join(config.result, 'src')):
        os.makedirs(os.path.join(config.result, 'src'), exist_ok=True)
    for src_file in glob.glob('/work/*.py') + glob.glob('/work/*/*.py'):
        shutil.copy(
            src_file,
            os.path.join(config.result, 'src', os.path.basename(src_file)))

    # create dataset
    train_dataset = dataset.Alaska2Dataset(
        root=config.data,
        transforms=train_trans,
        train=True,
        batchsize=config.batchsize,
        uniform=config.batch_uniform,
    )
    val_dataset = dataset.Alaska2Dataset(root=config.data,
                                         transforms=val_trans,
                                         train=False,
                                         uniform=False)

    # create data loader
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=config.batchsize,
                                               num_workers=config.num_workers,
                                               shuffle=True)

    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=config.batchsize,
                                             num_workers=config.num_workers,
                                             shuffle=False)

    # set optimizer
    #    optimizer = torch.optim.AdamW([{'params': model.parameters()}, {'params':metrics_fc.parameters()}],
    #                                 lr=config.lr
    #                                 )
    #else:
    optimizer = torch.optim.AdamW(model.parameters(), lr=config.lr)
    #optimizer = torch.optim.SGD(model.parameters(),
    #                              lr=config.lr,
    #                            momentum=0.9)

    # Initialize Amp.  Amp accepts either values or strings for the optional override arguments,
    # for convenient interoperation with argparse.
    if config.fp16:
        opt_level = 'O1'
        model, optimizer = amp.initialize(model,
                                          optimizer,
                                          opt_level=opt_level
                                          #keep_batchnorm_fp32=True
                                          )

    # set scheduler
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer,
        factor=0.5,
        patience=2,
        threshold_mode='abs',
        min_lr=1e-8,
        eps=1e-08)

    # set criterion
    #criterion = torch.nn.CrossEntropyLoss()
    criterion = LabelSmoothing().cuda()
    num_epochs = config.num_epochs

    # set manager
    iters_per_epoch = len(train_loader)

    manager = ppe.training.ExtensionsManager(model,
                                             optimizer,
                                             num_epochs,
                                             iters_per_epoch=iters_per_epoch,
                                             out_dir=config.result,
                                             stop_trigger=None)

    log_interval = (100, 'iteration')
    #eval_interval = (500, 'iteration')
    eval_interval = (1, 'epoch')

    manager.extend(extensions.snapshot(filename='best_snapshot'),
                   trigger=MaxValueTrigger('validation/auc',
                                           trigger=eval_interval))
    if config.fp16:
        manager.extend(extensions.snapshot_object(amp, filename='amp.ckpt'),
                       trigger=MaxValueTrigger('validation/auc',
                                               trigger=eval_interval))

    manager.extend(extensions.LogReport(trigger=log_interval))

    manager.extend(extensions.PlotReport(['train/loss', 'validation/loss'],
                                         'epoch',
                                         filename='loss.png'),
                   trigger=(1, 'epoch'))

    manager.extend(extensions.PrintReport([
        'epoch', 'iteration', 'train/loss', 'validation/loss',
        'validation/auc', 'lr', 'elapsed_time'
    ]),
                   trigger=log_interval)

    manager.extend(extensions.ProgressBar(update_interval=100))
    manager.extend(extensions.observe_lr(optimizer=optimizer),
                   trigger=log_interval)
    #manager.extend(extensions.ParnnameterStatistics(model, prefix='model'))
    #manager.extend(extensions.VariableStatisticsPlot(model))

    manager.extend(ALASKAEvaluator(val_loader,
                                   model,
                                   eval_hook=None,
                                   eval_func=None,
                                   loss_criterion=criterion,
                                   auc_criterion=auc_eval_func,
                                   device=device,
                                   scheduler=scheduler,
                                   metric_learning=config.metric_learning),
                   trigger=eval_interval)

    # Lets load the snapshot
    if args.snapshot is not None:
        state = torch.load(args.snapshot)
        manager.load_state_dict(state)
        #amp = torch.load('amp.ckpt')
    elif args.snapmodel is not None:
        print('load snapshot model {}'.format(args.snapmodel))
        state = torch.load(args.snapmodel)
        manager._models['main'].load_state_dict(state['models']['main'])

    train_func(manager,
               model,
               criterion,
               optimizer,
               train_loader,
               device,
               metric_learning=config.metric_learning,
               fp16=config.fp16)
Exemple #10
0
    def train(self, classes_path, anchors_path):
        '''
        Args:
            classes_path:classes路径
            anchors_path:anchor路径
        '''
        classes_names = get_classes(classes_path)
        num_classes = len(classes_names)
        anchors = get_anchors(anchors_path)

        is_tiny_version = len(anchors) == 6  # default setting
        if is_tiny_version:
            model = create_tiny_model(input_shape,
                                      anchors,
                                      num_classes,
                                      freeze_body=2)
        else:
            model = create_model(input_shape,
                                 anchors,
                                 num_classes,
                                 load_pretrained=False)

        logging = TensorBoard(log_dir=log_dir)
        # checkpoint = ModelCheckpoint(log_dir + 'car_mobilenet_yolov3.ckpt',
        #    monitor='val_loss', save_weights_only=False, period=1)
        checkpoint = ModelCheckpoint(
            log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
            monitor='val_loss',
            save_weights_only=False,
            save_best_only=True,
            period=3)

        # reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)

        reduce_lr = ReduceLROnPlateau(monitor="val_loss",
                                      factor=0.1,
                                      min_lr=1e-9,
                                      patience=5,
                                      verbose=1)
        # early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)

        with open(train_path) as t_f:
            t_lines = t_f.readlines()
        np.random.seed(666)
        np.random.shuffle(t_lines)
        num_val = int(len(t_lines) * val_split)
        num_train = len(t_lines) - num_val
        t_lines = t_lines[:num_train]
        v_lines = t_lines[num_train:]

        # train with frozen layers first ,to get a stable loss.
        # adjust num epochs to your dataset,This step is enough to obtrain a not bad model
        if True:
            model.compile(
                optimizer=Adam(lr=1e-3),
                loss={
                    # use custom yolo_loss Lambda layer.
                    'yolo_loss': lambda y_true, y_pred: y_pred
                })

        print('Train on {} samples, val on {} samples, with batch size {}.'.
              format(num_train, num_val, batch_num))
        model.fit_generator(data_generator_wrapper(t_lines, batch_num,
                                                   input_shape, anchors,
                                                   num_classes),
                            steps_per_epoch=max(1, num_train // batch_num),
                            validation_data=data_generator_wrapper(
                                v_lines, batch_num, input_shape, anchors,
                                num_classes),
                            validation_steps=max(1, num_val // batch_num),
                            epochs=epochs,
                            initial_epoch=0,
                            callbacks=[logging, checkpoint])
        model.save(log_dir + 'trained_weights_stage_1.h5')

        # Unfreeze and continue training, to fine-tune.
        # Train longer if the result is not good.
        if True:
            print("Unfreeze and continue training, to fine-tune.")
            for i in range(len(model.layers)):
                model.layers[i].trainable = True
            model.compile(optimizer=Adam(lr=1e-4),
                          loss={
                              'yolo_loss': lambda y_true, y_pred: y_pred
                          })  # recompile to apply the change
            batch_size = 16  # note that more GPU memory is required after unfreezing the body
            print(
                'Train on {} samples, val on {} samples, with batch size {}.'.
                format(num_train, num_val, batch_size))
            model.fit_generator(
                data_generator_wrapper(t_lines, batch_size, input_shape,
                                       anchors, num_classes),
                steps_per_epoch=max(1, num_train // batch_size),
                validation_data=data_generator_wrapper(v_lines, batch_size,
                                                       input_shape, anchors,
                                                       num_classes),
                validation_steps=max(1, num_val // batch_size),
                epochs=20,
                initial_epoch=0,
                callbacks=[logging, checkpoint, reduce_lr])
            model.save(log_dir + 'trained_weights_final.h5')
Exemple #11
0
    load_model = base_config.get("load_model", None)
    restore_best_ckpt = base_config.get("restore_best_ckpt", False)
    base_ckpt_dir = check_base_model_logdir(load_model, args,
                                            restore_best_ckpt)

    base_config["load_model"] = base_ckpt_dir

    ckpt = check_logdir(args, base_config, restore_best_ckpt)

    if args.mode == "train":
        if ckpt is None:
            if base_ckpt_dir:
                print("Starting from base model")
        else:
            print("Restored ckpt from {}. Resuming training".format(ckpt))

    elif args.mode == "eval":
        print("Loading model friom {}".format(ckpt))

    with tf.Graph().as_default():
        model = create_model(args, base_config, config_module, base_model,
                             ckpt)

        if args.mode == "train":
            train(model, eval_model=None, args=args)
        elif args.mode == "eval":
            evaluate(model, ckpt)
        elif args.mode == "infer":
            inter(model, ckpt)