def dataset_build(self, opt):
        fields = onmt.inputters.get_fields("text", 0, 0)

        if hasattr(opt, 'src_vocab') and len(opt.src_vocab) > 0:
            with codecs.open(opt.src_vocab, 'w', 'utf-8') as f:
                f.write('a\nb\nc\nd\ne\nf\n')
        if hasattr(opt, 'tgt_vocab') and len(opt.tgt_vocab) > 0:
            with codecs.open(opt.tgt_vocab, 'w', 'utf-8') as f:
                f.write('a\nb\nc\nd\ne\nf\n')

        src_reader = onmt.inputters.str2reader[opt.data_type].from_opt(opt)
        tgt_reader = onmt.inputters.str2reader["text"].from_opt(opt)
        train_data_files = preprocess.build_save_dataset(
            'train', fields, [src_reader, tgt_reader], opt)

        preprocess.build_save_vocab(train_data_files, fields, opt)

        preprocess.build_save_dataset('valid', fields,
                                      [src_reader, tgt_reader], opt)

        # Remove the generated *pt files.
        for pt in glob.glob(SAVE_DATA_PREFIX + '*.pt'):
            os.remove(pt)
        if hasattr(opt, 'src_vocab') and os.path.exists(opt.src_vocab):
            os.remove(opt.src_vocab)
        if hasattr(opt, 'tgt_vocab') and os.path.exists(opt.tgt_vocab):
            os.remove(opt.tgt_vocab)
示例#2
0
    def dataset_build(self, opt):
        fields = onmt.inputters.get_fields("text", 0, 0)

        if hasattr(opt, 'src_vocab') and len(opt.src_vocab) > 0:
            with codecs.open(opt.src_vocab, 'w', 'utf-8') as f:
                f.write('a\nb\nc\nd\ne\nf\n')
        if hasattr(opt, 'tgt_vocab') and len(opt.tgt_vocab) > 0:
            with codecs.open(opt.tgt_vocab, 'w', 'utf-8') as f:
                f.write('a\nb\nc\nd\ne\nf\n')

        src_reader = onmt.inputters.str2reader[opt.data_type].from_opt(opt)
        tgt_reader = onmt.inputters.str2reader["text"].from_opt(opt)
        train_data_files = preprocess.build_save_dataset(
            'train', fields, src_reader, tgt_reader, opt)

        preprocess.build_save_vocab(train_data_files, fields, opt)

        preprocess.build_save_dataset(
            'valid', fields, src_reader, tgt_reader, opt)

        # Remove the generated *pt files.
        for pt in glob.glob(SAVE_DATA_PREFIX + '*.pt'):
            os.remove(pt)
        if hasattr(opt, 'src_vocab') and os.path.exists(opt.src_vocab):
            os.remove(opt.src_vocab)
        if hasattr(opt, 'tgt_vocab') and os.path.exists(opt.tgt_vocab):
            os.remove(opt.tgt_vocab)
示例#3
0
    def dataset_build(self, opt):
        fields = onmt.io.get_fields("text", 0, 0)

        train_data_files = preprocess.build_save_dataset('train', fields, opt)

        preprocess.build_save_vocab(train_data_files, fields, opt)

        preprocess.build_save_dataset('valid', fields, opt)

        # Remove the generated *pt files.
        for pt in glob.glob(SAVE_DATA_PREFIX + '*.pt'):
            os.remove(pt)
示例#4
0
    def dataset_build(self, opt):
        fields = onmt.io.get_fields("text", 0, 0)

        train_data_files = preprocess.build_save_dataset('train', fields, opt)

        preprocess.build_save_vocab(train_data_files, fields, opt)

        preprocess.build_save_dataset('valid', fields, opt)

        # Remove the generated *pt files.
        for pt in glob.glob(SAVE_DATA_PREFIX + '*.pt'):
            os.remove(pt)
    def dataset_build(self, opt):
        fields = onmt.inputters.get_fields("text", 0, 0)

        if hasattr(opt, "src_vocab") and len(opt.src_vocab) > 0:
            with codecs.open(opt.src_vocab, "w", "utf-8") as f:
                f.write("a\nb\nc\nd\ne\nf\n")
        if hasattr(opt, "tgt_vocab") and len(opt.tgt_vocab) > 0:
            with codecs.open(opt.tgt_vocab, "w", "utf-8") as f:
                f.write("a\nb\nc\nd\ne\nf\n")

        train_data_files = preprocess.build_save_dataset("train", fields, opt)

        preprocess.build_save_vocab(train_data_files, fields, opt)

        preprocess.build_save_dataset("valid", fields, opt)

        # Remove the generated *pt files.
        for pt in glob.glob(SAVE_DATA_PREFIX + "*.pt"):
            os.remove(pt)
        if hasattr(opt, "src_vocab") and os.path.exists(opt.src_vocab):
            os.remove(opt.src_vocab)
        if hasattr(opt, "tgt_vocab") and os.path.exists(opt.tgt_vocab):
            os.remove(opt.tgt_vocab)