예제 #1
0
except ImportError:
    pass

#from crnn_model import crnn_model
#from global_configuration import config
#from local_utils import log_utils, data_utils
import sys
sys.path.append('/data2/hdia_ocr_data/CRNN')
sys.path.append('/data2/hdia_ocr_data/CRNN/crnn_model')
sys.path.append('/data2/hdia_ocr_data/CRNN/local_utils')
sys.path.append('/data2/hdia_ocr_data/CRNN/global_configuration')
import crnn_model
import data_utils, log_utils
import config

logger = log_utils.init_logger()


def init_args():
    """

    :return:
    """
    parser = argparse.ArgumentParser()
    parser.add_argument('--image_path', type=str, help='Where you store the image',
                        default='data/test_images/test_01.jpg')
    parser.add_argument('--weights_path', type=str, help='Where you store the weights',
                        default='model/shadownet/shadownet_2017-09-29-19-16-33.ckpt-39999')

    return parser.parse_args()
예제 #2
0
파일: train.py 프로젝트: Ba1Jun/MPCE
            p.requires_grad = False
        # p.data.uniform_(-opt.param_init, opt.param_init)
        elif 'rezero_alpha' in pr_name:
            logger.info('{} is rezero param'.format(pr_name))
            nn.init.zeros_(p)
        else:
            if p.dim() == 1:
                # p.data.zero_()
                p.data.normal_(0, math.sqrt(6 / (1 + p.size(0))))
            else:
                nn.init.xavier_normal_(p, math.sqrt(3))
        logger.info("{}: requires_grad {}".format(pr_name, p.requires_grad))


if __name__ == '__main__':
    init_logger(level='info', log_file='train.log')
    config = load_config()
    device = torch.device('cpu') if config['gpu'] < 0 else torch.device('cuda:{}'.format(config['gpu']))
    logger.info("training with param:\n{}".format(config))
    logger.info("training with device: {}".format(device))
    if config['albert']:
        word_vocab = AlbertVocab(config['albert_model_name'], cache_dir=config['albert_cache_dir'])
    else:
        word_vocab = load_word_vocab('squad_out/train.txt.vocab.word', config['vocab_size'])
    logger.info(word_vocab)
    bio_vocab = load_bio_vocab('squad_out/train.txt.vocab.bio')
    logger.info(bio_vocab)
    feat_vocab = load_feat_vocab('squad_out/train.txt.vocab.feat')
    logger.info(feat_vocab)
    train_instances = load_instances('squad_out/train.ins')
    dev_instances = load_instances('squad_out/dev.ins')
예제 #3
0
파일: evaluate.py 프로젝트: Ba1Jun/MPCE
                zip(atten_engy, instances, copy_hypothesis)):
            obj.append({
                'idx': idx,
                'decode_engy': str(engy),
                'src_tokens': ' '.join(instance.src),
                'output_tokens': ' '.join(hypothesis)
            })
        json.dump(obj, open(predict_atten_engy_path, 'w'), indent=2)
    logger.info("{} of {} is completed hypothesis".format(
        total_completed, len(instances)))
    return copy_hypothesis


if __name__ == '__main__':
    config = load_config()
    init_logger(log_file='evaluate.log')
    device = torch.device('cpu') if config['gpu'] < 0 else torch.device(
        'cuda:{}'.format(config['gpu']))
    if config['model'] == 'nmt':
        model = NMT.load(config['model_save_path'])
        model.to(device)
    else:
        model = QGModel.load(config['model_save_path'], device)

    test_instances = load_instances(config['save_dir'] + '/test.ins')
    bleus = evaluate_bleu(model, test_instances, config, model.word_vocab,
                          config['predict_save_path'])
    logger.info(
        '\nBLEU_1: {}\nBLEU_2: {}\nBLEU_3: {}\nBLEU_4: {}\nBLEU :{}'.format(
            *bleus))
예제 #4
0
RFSCRIPT_VAR = env.get('SCRIPT_VAR')
root = env.get('DEV_ENV').get(RFSCRIPT_VAR)
qtPath = env.get('QTPATH')
sys.path.append('%s/%s' % (root, qtPath))

os.environ['QT_PREFERRED_BINDING'] = os.pathsep.join(['PySide', 'PySide2'])
from Qt import wrapInstance
from Qt import QtCore
from Qt import QtWidgets
from Qt import QtGui

import log_utils
import load

logFile = log_utils.name(appName, user='******')
logger = log_utils.init_logger(logFile)
logger.setLevel(logging.INFO)

logger.info('\n\n==============================================')


class RFEnvSelector(QtWidgets.QMainWindow):
    def __init__(self, parent=None):
        #Setup Window
        super(RFEnvSelector, self).__init__(parent)

        # ui read
        uiFile = '%s/env_selector.ui' % moduleDir

        self.ui = load.setup_ui(uiFile, self)
        self.ui.show()
예제 #5
0
import Model
import Data
import Loss
import Frame
import log_utils
from Config import cfg

import torch

import os
import cv2
import numpy as np
from tqdm import tqdm

# log
logger = log_utils.init_logger(cfg['logger_path'])
logger.info(cfg)
# dataset
Dataset = Data.Data(cfg['root_path'] + '/train', cfg['loader'])
train_loader = torch.utils.data.DataLoader(Dataset,
                                           batch_size=cfg['batch_size'],
                                           shuffle=True,
                                           num_workers=4)
Dataset_val = Data.Data(cfg['root_path'] + '/valid', cfg['loader'])
val_loader = torch.utils.data.DataLoader(Dataset_val,
                                         batch_size=cfg['batch_size'],
                                         shuffle=True,
                                         num_workers=4)

frame = Frame.Frame(Model.Model, Loss.dice_bce_loss())
예제 #6
0
        if len(final_tokens['src']) > max_src_len:
            logger.info("trimmed seq length {} to {}".format(len(final_tokens['src']), max_src_len))
        final_tokens['src'] = final_tokens['src'][:max_src_len]
        final_tokens['tgt'] = final_tokens['tgt'][:max_src_len]
        final_tokens['bio'] = final_tokens['bio'][:max_src_len]
        final_tokens['ner'] = final_tokens['ner'][:max_src_len]
        final_tokens['case'] = final_tokens['case'][:max_src_len]
        final_tokens['pos'] = final_tokens['pos'][:max_src_len]
        final_tokens['ans'] = final_tokens['ans'][:max_src_len]
        instance = SquadInstance(**final_tokens)
        instances.append(instance)
    return instances


if __name__ == '__main__':
    init_logger(level='debug')
    config = load_config()
    data_dir = config['data_dir']
    output_dir = config['save_dir']
    pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)
    albert_tokenizer = None
    if config['albert']:
        albert_tokenizer = AlbertTokenizer.from_pretrained(config['albert_model_name'], cache_dir=config['albert_cache_dir'])
    source_fmt = data_dir + "/{}.source.txt"
    target_fmt = data_dir + "/{}.target.txt"
    feat_fmt = data_dir + "/{}.{}"
    train_instances = collect_instances(source=source_fmt.format("train.txt"),
                                        target=target_fmt.format("train.txt"),
                                        pos=feat_fmt.format('train.txt', 'pos'),
                                        ner=feat_fmt.format('train.txt', 'ner'),
                                        bio=feat_fmt.format('train.txt', 'bio'),