コード例 #1
0
ファイル: perceptrons.py プロジェクト: zhoujiang2013/isan
    def __init__(self,model_file,Task=None,Searcher=None,
            Updater=None,
            beam_width=8,logger=None,cmd_args={},**conf):
        """
        初始化
        如果不设置,则读取已有模型。如果设置,就是学习新模型
        """
        if logger==None :
            logger=logging.getLogger(__name__)
            console=logging.StreamHandler()
            console.setLevel(logging.INFO)
            logger.addHandler(console)
            logger.setLevel(logging.INFO)
        self.result_logger=logger

        self.beam_width=beam_width#:搜索宽度
        self.conf=conf

        if model_file!=None:
            file=gzip.open(model_file,"rb")
            self.task=Task(model=pickle.load(file),logger=logger)
            file.close()
        else : # new model to train
            self.paras=Parameters(Updater)
            #self.paras=Parameters(Ada_Grad)
            self.task=Task(logger=logger,paras=self.paras)
        if hasattr(self.task,'init'):
            self.task.init()
        self.searcher=Searcher(self.task,beam_width)
        self.step=0
コード例 #2
0
def isan(**args):
    orginal_args = args
    ns = argparse.Namespace()
    ns.logfile = '/dev/null'
    for k, v in args.items():
        setattr(ns, k, v)
    args = ns
    info_color = '34'

    instream = sys.stdin if args.input == None else open(args.input, 'r')
    outstream = sys.stdout if args.output == None else open(
        args.output, 'a' if args.append else 'w')

    rec = Recorder()
    logger = logging.getLogger('s' + str(random.random()))
    console = logging.StreamHandler()
    logfile = logging.FileHandler(args.logfile, 'w')
    logfile.setLevel(logging.DEBUG)
    logfile.addFilter(ContextFilter())
    recstream = logging.StreamHandler(rec)

    console.setLevel(logging.INFO)

    logger.addHandler(console)
    logger.addHandler(logfile)
    logger.addHandler(recstream)
    if hasattr(args, 'log_handlers'):
        for handler in args.log_handlers:
            #handler.addFilter(ContextFilter())
            logger.addHandler(handler)
    logger.setLevel(logging.DEBUG)

    if args.model_module:
        mod, _, cls = args.model_module.rpartition('.')
        Model = getattr(__import__(mod, globals(), locals(), [cls], 0), cls)
    if args.task:
        mod, _, cls = args.task.rpartition('.')
        Task = getattr(__import__(mod, globals(), locals(), [cls], 0), cls)
    if args.decoder:
        mod, _, cls = args.decoder.rpartition('.')
        Decoder = getattr(__import__(mod, globals(), locals(), [cls], 0), cls)
    if args.updater:
        mod, _, cls = args.updater.rpartition('.')
        Updater = getattr(__import__(mod, globals(), locals(), [cls], 0), cls)

    name_model = Model.name if hasattr(Model, 'name') else '给定学习算法'
    name_decoder = Decoder.name if hasattr(Decoder, 'name') else '给定解码算法'
    name_task = Task.name if hasattr(Task, 'name') else '给定任务算法'
    name_updater = Updater.name if hasattr(Updater, 'name') else '某参数更新算法'
    logger.info("""模型: %s 解码器: %s 搜索宽度: %s
任务: %s""" % (
        make_color(name_model, info_color),
        make_color(name_decoder, info_color),
        make_color(args.beam_width, info_color),
        make_color(name_task, info_color),
    ))

    if args.train or args.append_model:
        """如果指定了训练集,就训练模型"""

        logger.info(
            """参数更新算法 : %(updater)s batch size : %(bs)s""" % {
                'bs': make_color(args.batch_size, info_color),
                'updater': make_color(name_updater, info_color),
            })

        random.seed(args.seed)
        model = Model(None, (lambda **x: Task(cmd_args=args, **x)),
                      Decoder,
                      beam_width=int(args.beam_width),
                      Updater=Updater,
                      logger=logger,
                      cmd_args=args)

        if args.train:
            logger.info('随机数种子: %s' % (make_color(str(args.seed))))

            logger.info(
                "由训练语料库%s迭代%s次,训练%s模型保存在%s。" %
                (make_color(' '.join(args.train)), make_color(
                    args.iteration), name_task, make_color(args.model_file)))
            if args.dev_file:
                logger.info("开发集使用%s" % (make_color(' '.join(args.dev_file))))

            model.train(args.train,
                        int(args.iteration),
                        peek=args.peek,
                        batch_size=args.batch_size,
                        dev_files=args.dev_file)
            model.save(args.model_file)

        if args.append_model:  ### append multiple models
            task = Task(cmd_args=args, paras=Parameters(Updater))
            for m in args.append_model:
                print(m)
                task.add_model(pickle.load(gzip.open(m, 'rb')))
            pickle.dump(task.dump_weights(), gzip.open(args.model_file, 'wb'))

    if args.train and not args.test_file:
        del logger
        del model
        return list(rec)

    if not args.train:
        print("使用模型文件%s进行%s" % (make_color(args.model_file), name_task),
              file=sys.stderr)

    #print(args.model_file)
    model = Model(
        args.model_file,
        (lambda **x: Task(cmd_args=args, **x)),
        Searcher=Decoder,
        beam_width=int(args.beam_width),
        logger=logger,
        cmd_args=args,
    )
    """如果指定了测试集,就测试模型"""
    if args.test_file:
        print("使用已经过%s的文件%s作为测试集" % (name_task, make_color(args.test_file)),
              file=sys.stderr)
        model.test(args.test_file)
        return list(rec)

    if not args.test_file and not args.append_model and not args.train:
        threshold = args.threshold
        print("以 %s 作为输入,以 %s 作为输出" %
              (make_color('标准输入流'), make_color('标准输出流')),
              file=sys.stderr)
        if threshold:
            print("输出分数差距在 %s 之内的候选词" % (make_color(threshold)),
                  file=sys.stderr)
        for line in instream:
            line = line.strip()
            line = model.task.codec.decode(line)
            raw = line.get('raw', '')
            Y = line.get('Y_a', None)
            if threshold:
                print(model.task.codec.encode_candidates(
                    model(raw, Y, threshold=threshold)),
                      file=outstream)
            else:
                print(model.task.codec.encode(model(raw, Y)), file=outstream)
    return list(rec)