Esempio n. 1
1
class MosesTranslator(Translator):
    """Handles the 'translate' task for MTMonkeyWorkers using Moses XML-RPC servers
    and built-in segmentation, tokenization, and detokenization.
    """

    def __init__(self, translate_port, recase_port, source_lang, target_lang, threads):
        """Initialize a MosesTranslator object according to the given 
        configuration settings.
        
        @param translate_port: the port at which the Moses translator operates
        @param recase_port: the port at which the recaser operates
        @param source_lang: source language (ISO-639-1 ID)
        @param target_lang: target language (ISO-639-1 ID)
        """
        # precompile XML-RPC Moses server addresses
        self.translate_proxy_addr = "http://localhost:" + translate_port + "/RPC2"
        self.recase_proxy_addr = None
        if recase_port is not None:
            self.recase_proxy_addr = "http://localhost:" + recase_port + "/RPC2"

        # initialize text processing tools (can be shared among threads)
        self.splitter = SentenceSplitter({'language': source_lang})
        self.tokenizer = Tokenizer({'lowercase': True,
                                    'moses_escape': True})
        self.detokenizer = Detokenizer({'moses_deescape': True,
                                        'capitalize_sents': True,
                                        'language': target_lang})
        self.threads = threads


    def process_task(self, task):
        """Process translation task. Splits request into sentences, then translates and
        recases each sentence."""
        # check parameters
        # be lenient and allow anything that can map to a boolean for alignmentInfo and detokenize
        doalign = _convert_boolean(task.get('alignmentInfo', ''), False)
        dodetok = _convert_boolean(task.get('detokenize', ''), True)
        dotok = _convert_boolean(task.get('tokenize', ''), True)
        dosegment = _convert_boolean(task.get('segment', ''), True)

        nbestsize = min(task.get('nBestSize', 1), 10)

        # run the translation
        src_lines = self.splitter.split_sentences(task['text']) if dosegment else [ task['text'] ]
        ret_src_tok = doalign or len(src_lines) > 1

        def _translator(line):
            return self._translate(line, doalign, dodetok, nbestsize, ret_src_tok, dotok, dosegment)

        translated = parallel_map(_translator, src_lines)

        return {
            'translationId': uuid.uuid4().hex,
            'translation': translated
        }

    def _translate(self, src, doalign, dodetok, nbestsize, ret_src_tok, dotok, dosegment):
        """Translate and recase one sentence. Optionally, word alignment
        between source and target is included on the output.

        @param src: source text (one sentence).
        @param dodetok: detokenize output?
        @param nbestsize: size of n-best lists on the output
        @param ret_src_tok: return tokenized source sentences?
        """

        # create server proxies (needed for each thread)
        translate_proxy = xmlrpclib.ServerProxy(self.translate_proxy_addr)
        recase_proxy = None
        if self.recase_proxy_addr is not None:  # recasing only if there is a recaser set up
            recase_proxy = xmlrpclib.ServerProxy(self.recase_proxy_addr)

        # tokenize
        src_tokenized = self.tokenizer.tokenize(src) if dotok else src

        # translate
        translation = translate_proxy.translate({
            "text": src_tokenized,
            "align": doalign,
            "nbest": nbestsize,
            "nbest-distinct": True,
        })

        # provide n-best lists
        rank = 0
        hypos = []
        for hypo in translation['nbest']:
            # recase (if there is a recaser set up)
            if recase_proxy is not None:
                recased = recase_proxy.translate({"text": hypo['hyp']})['text'].strip()
            else:
                recased = hypo['hyp']

            # construct the output
            parsed_hypo = {
                'text': recased,
                'score': hypo['totalScore'],
                'rank': rank,
            }
            if dodetok:  # detokenize if needed
                parsed_hypo['text'] = self.detokenizer.detokenize(recased)

            if doalign:  # provide alignment information if needed
                parsed_hypo['tokenized'] = recased
                parsed_hypo['alignment-raw'] = _add_tgt_end(hypo['align'], recased)

            rank += 1
            hypos.append(parsed_hypo)

        result = {
            'src': src,
            'translated': hypos,
        }

        if ret_src_tok:
            result['src-tokenized'] = src_tokenized

        return result
Esempio n. 2
0
    def __init__(self, translate_port, recase_port, source_lang, target_lang,
                 threads):
        """Initialize a MosesTranslator object according to the given 
        configuration settings.
        
        @param translate_port: the port at which the Moses translator operates
        @param recase_port: the port at which the recaser operates
        @param source_lang: source language (ISO-639-1 ID)
        @param target_lang: target language (ISO-639-1 ID)
        """
        # precompile XML-RPC Moses server addresses
        self.translate_proxy_addr = "http://localhost:" + translate_port + "/RPC2"
        self.recase_proxy_addr = None
        if recase_port is not None:
            self.recase_proxy_addr = "http://localhost:" + recase_port + "/RPC2"

        # initialize text processing tools (can be shared among threads)
        self.splitter = SentenceSplitter({'language': source_lang})
        self.tokenizer = Tokenizer({'lowercase': True, 'moses_escape': True})
        self.detokenizer = Detokenizer({
            'moses_deescape': True,
            'capitalize_sents': True,
            'language': target_lang
        })
        self.threads = threads
Esempio n. 3
0
    def __init__(self, translate_host, translate_port, source_lang,
                 target_lang, model_name, problem, t2t_usr_dir, data_dir,
                 preprocess_cmd, postprocess_cmd):
        """Initialize a TransformerTranslator object according to the given 
        configuration settings.
        
        @param translate_port: the port at which the Moses translator operates
        @param recase_port: the port at which the recaser operates
        @param source_lang: source language (ISO-639-1 ID)
        @param target_lang: target language (ISO-639-1 ID)
        @param preprocess_cmd: bash command for text preprocessing
        @param postprocess_cmd: bash command for text posprocessing
        """
        # precompile Tensorflow server addresses
        self.server = translate_host + ":" + translate_port

        # initialize text processing tools (can be shared among threads)
        self.tokenizer = Tokenizer({'lowercase': True, 'moses_escape': True})
        self.preprocess = preprocess_cmd
        self.postprocess = postprocess_cmd
        usr_dir.import_usr_dir(t2t_usr_dir)
        self.problem = registry.problem(problem)
        hparams = tf.contrib.training.HParams(
            data_dir=os.path.expanduser(data_dir))
        self.problem.get_hparams(hparams)
        self.request_fn = serving_utils.make_grpc_request_fn(
            servable_name=model_name, server=self.server, timeout_secs=30)
Esempio n. 4
0
    def __init__(self, translate_port, source_lang, target_lang):
        """Initialize a Translator object according to the given 
        configuration settings."""
        # precompile XML-RPC Moses server addresses
        self.translate_proxy_addr = "http://localhost:" + translate_port + "/RPC2?src="+source_lang+";tgt="+target_lang

        # initialize text processing tools (can be shared among threads)
        self.splitter = SentenceSplitter({'language': source_lang})
        self.tokenizer = Tokenizer({'lowercase': True,
                                    'moses_escape': True})
        self.detokenizer = Detokenizer({'moses_deescape': True,
                                        'capitalize_sents': True,
                                        'language': target_lang})
Esempio n. 5
0
    def __init__(self, translate_host, translate_port, source_lang, target_lang, model_name, problem, t2t_usr_dir, data_dir, preprocess_cmd, postprocess_cmd):
        """Initialize a TransformerTranslator object according to the given 
        configuration settings.
        
        @param translate_port: the port at which the Moses translator operates
        @param recase_port: the port at which the recaser operates
        @param source_lang: source language (ISO-639-1 ID)
        @param target_lang: target language (ISO-639-1 ID)
        @param preprocess_cmd: bash command for text preprocessing
        @param postprocess_cmd: bash command for text posprocessing
        """
        # precompile Tensorflow server addresses
        self.server = translate_host + ":" + translate_port

        # initialize text processing tools (can be shared among threads)
        self.tokenizer = Tokenizer({'lowercase': True,
                                    'moses_escape': True})
        self.preprocess = preprocess_cmd
        self.postprocess = postprocess_cmd
        usr_dir.import_usr_dir(t2t_usr_dir)
        self.problem = registry.problem(problem)
        hparams = tf.contrib.training.HParams(
            data_dir=os.path.expanduser(data_dir))
        self.problem.get_hparams(hparams)
        self.request_fn = serving_utils.make_grpc_request_fn(
            servable_name=model_name,
            server=self.server,
            timeout_secs=30)
Esempio n. 6
0
    def __init__(self, translate_host, translate_port, translate_path,
                 source_lang, target_lang, preprocess_cmd, postprocess_cmd):
        """Initialize a MosesTranslator object according to the given 
        configuration settings.
        
        @param translate_port: the port at which the Moses translator operates
        @param recase_port: the port at which the recaser operates
        @param source_lang: source language (ISO-639-1 ID)
        @param target_lang: target language (ISO-639-1 ID)
        @param preprocess_cmd: bash command for text preprocessing
        @param postprocess_cmd: bash command for text posprocessing
        """
        # precompile Marian server addresses
        self.translate_proxy_addr = "ws://" + translate_host + ":" + translate_port + "/" + translate_path

        # initialize text processing tools (can be shared among threads)
        self.tokenizer = Tokenizer({'lowercase': True, 'moses_escape': True})
        self.preprocess = preprocess_cmd
        self.postprocess = postprocess_cmd
Esempio n. 7
0
    def __init__(self, translate_host, translate_port, translate_path, source_lang, target_lang, preprocess_cmd, postprocess_cmd):
        """Initialize a MosesTranslator object according to the given 
        configuration settings.
        
        @param translate_port: the port at which the Moses translator operates
        @param recase_port: the port at which the recaser operates
        @param source_lang: source language (ISO-639-1 ID)
        @param target_lang: target language (ISO-639-1 ID)
        @param preprocess_cmd: bash command for text preprocessing
        @param postprocess_cmd: bash command for text posprocessing
        """
        # precompile Marian server addresses
        self.translate_proxy_addr = "ws://" + translate_host + ":" + translate_port + "/" + translate_path

        # initialize text processing tools (can be shared among threads)
        self.tokenizer = Tokenizer({'lowercase': True,
                                    'moses_escape': True})
        self.preprocess = preprocess_cmd
        self.postprocess = postprocess_cmd    
Esempio n. 8
0
    def __init__(self, translate_port, recase_port, source_lang, target_lang, threads):
        """Initialize a MosesTranslator object according to the given 
        configuration settings.
        
        @param translate_port: the port at which the Moses translator operates
        @param recase_port: the port at which the recaser operates
        @param source_lang: source language (ISO-639-1 ID)
        @param target_lang: target language (ISO-639-1 ID)
        """
        # precompile XML-RPC Moses server addresses
        self.translate_proxy_addr = "http://localhost:" + translate_port + "/RPC2"
        self.recase_proxy_addr = None
        if recase_port is not None:
            self.recase_proxy_addr = "http://localhost:" + recase_port + "/RPC2"

        # initialize text processing tools (can be shared among threads)
        self.splitter = SentenceSplitter({'language': source_lang})
        self.tokenizer = Tokenizer({'lowercase': True,
                                    'moses_escape': True})
        self.detokenizer = Detokenizer({'moses_deescape': True,
                                        'capitalize_sents': True,
                                        'language': target_lang})
        self.threads = threads
Esempio n. 9
0
class MarianTranslator(Translator):
    """Handles the 'translate' task for MTMonkeyWorkers using Moses XML-RPC servers
    and built-in segmentation, tokenization, and detokenization.
    """
    def __init__(self, translate_host, translate_port, translate_path,
                 source_lang, target_lang, preprocess_cmd, postprocess_cmd):
        """Initialize a MosesTranslator object according to the given 
        configuration settings.
        
        @param translate_port: the port at which the Moses translator operates
        @param recase_port: the port at which the recaser operates
        @param source_lang: source language (ISO-639-1 ID)
        @param target_lang: target language (ISO-639-1 ID)
        @param preprocess_cmd: bash command for text preprocessing
        @param postprocess_cmd: bash command for text posprocessing
        """
        # precompile Marian server addresses
        self.translate_proxy_addr = "ws://" + translate_host + ":" + translate_port + "/" + translate_path

        # initialize text processing tools (can be shared among threads)
        self.tokenizer = Tokenizer({'lowercase': True, 'moses_escape': True})
        self.preprocess = preprocess_cmd
        self.postprocess = postprocess_cmd

    def process_task(self, task):
        """Process translation task. Splits request into sentences, then translates and
        recases each sentence."""
        # check parameters
        # be lenient and allow anything that can map to a boolean for alignmentInfo and detokenize
        dodetok = _convert_boolean(task.get('detokenize', ''), True)
        dotok = _convert_boolean(task.get('tokenize', ''), True)
        dosegment = _convert_boolean(task.get('segment', ''), True)

        # run the translation
        def _translator(text):
            return self._translate(text, dodetok, dotok, dosegment)

        translated = _translator(task['text'])

        result = {
            'translation': [{
                'translated': [{
                    'text': translated,
                    'score': 100,
                    'rank': 0
                }],
                'translationId':
                uuid.uuid4().hex
            }],
        }
        return result

    def _translate(self, src, dodetok, dotok, dosegment):
        """Translate one sentence. 

        @param src: source text (one sentence).
        @param dodetok: detokenize output?
        @param ret_src_tok: return tokenized source sentences?
        @param dotok: tokenize output?
        """
        def _prepare_cmd(cmd, inputValue="", outputValue=""):
            SPACE_SPLIT_ELEMENT = "SPACE_SPLIT_ELEMENT"
            cmd_args = cmd.replace(" ", SPACE_SPLIT_ELEMENT).replace(
                '"$input"', inputValue).replace('"$output"', outputValue)
            return cmd_args.split(SPACE_SPLIT_ELEMENT)

        def _run_cmd(*args):
            try:
                out = subprocess.check_output(args[0]).strip()
                return 0, out
            except subprocess.CalledProcessError as grepexc:
                return grepexc.returncode, grepexc.output

        # tokenize
        src_tokenized = self.tokenizer.tokenize(src) if dotok else src

        if (self.preprocess):
            cmd_args = _prepare_cmd(self.preprocess, src)
            (cmd_error, cmd_output) = _run_cmd(cmd_args)
            if (cmd_error == 0):
                src_tokenized = cmd_output
            else:
                sys.stderr.write("{0}\n{1}".format(cmd_error, cmd_output))

        # translate
        ws = create_connection(self.translate_proxy_addr)
        ws.send(src_tokenized)
        result = ws.recv()

        if (self.postprocess):
            cmd_args = _prepare_cmd(self.postprocess, src, result)
            (cmd_error, cmd_output) = _run_cmd(cmd_args)
            if (cmd_error == 0):
                result = cmd_output
            else:
                sys.stderr.write("{0}\n{1}".format(cmd_error, cmd_output))

        result = {'src': src, 'translated': result}

        return result
Esempio n. 10
0
class Translator:
    """Handles the 'translate' task for KhresmoiWorker"""

    def __init__(self, translate_port, source_lang, target_lang):
        """Initialize a Translator object according to the given 
        configuration settings."""
        # precompile XML-RPC Moses server addresses
        self.translate_proxy_addr = "http://*****:*****@@')
        for x in range(0,len(src_translate)):
            src_translate[x] = bpe.segment(src_translate[x]).strip()
        print src_translate[:3]
        #print src_translate
        translated = [self._translate(line, doalign, dodetok, nbestsize) for line in src_translate]
        return _backward_transform({
            'translationId': uuid.uuid4().hex,
            'sentences': translated
        }, doalign, dodetok)

    def _translate(self, src, doalign, dodetok, nbestsize):
        """Translate and recase one sentence. Optionally, word alignment
        between source and target is included in output."""

        # create server proxies (needed for each thread)
        translate_proxy = xmlrpclib.ServerProxy(self.translate_proxy_addr)
       # recase_proxy = xmlrpclib.ServerProxy(self.recase_proxy_addr)



        # translate
        translation = translate_proxy.translate({
            "text": src,
            "align": doalign,
            "nbest": nbestsize,
            "nbest-distinct": True,
        })

        # provide n-best lists
        rank = 0
        hypos = []
        for hypo in translation['nbest']:
            recased = hypo['hyp']
            parsed_hypo = {
                'text': recased,
                'score': hypo['totalScore'],
                'rank': rank,
            }
            if dodetok:
                parsed_hypo['text'] = self.detokenizer.detokenize(recased)

            if doalign:
                parsed_hypo['tokenized'] = recased
                parsed_hypo['alignment-raw'] = _add_tgt_end(hypo['align'], recased)

            rank += 1
            hypos.append(parsed_hypo)

        result = {
            'src': src,
            'translated': hypos,
        }

        if dodetok:
            result['src-tokenized'] = src

        return result
Esempio n. 11
0
class TransformerTranslator(Translator):
    """Handles the 'translate' task for MTMonkeyWorkers using Moses XML-RPC servers
    and built-in segmentation, tokenization, and detokenization.
    """

    def __init__(self, translate_host, translate_port, source_lang, target_lang, model_name, problem, t2t_usr_dir, data_dir, preprocess_cmd, postprocess_cmd):
        """Initialize a TransformerTranslator object according to the given 
        configuration settings.
        
        @param translate_port: the port at which the Moses translator operates
        @param recase_port: the port at which the recaser operates
        @param source_lang: source language (ISO-639-1 ID)
        @param target_lang: target language (ISO-639-1 ID)
        @param preprocess_cmd: bash command for text preprocessing
        @param postprocess_cmd: bash command for text posprocessing
        """
        # precompile Tensorflow server addresses
        self.server = translate_host + ":" + translate_port

        # initialize text processing tools (can be shared among threads)
        self.tokenizer = Tokenizer({'lowercase': True,
                                    'moses_escape': True})
        self.preprocess = preprocess_cmd
        self.postprocess = postprocess_cmd
        usr_dir.import_usr_dir(t2t_usr_dir)
        self.problem = registry.problem(problem)
        hparams = tf.contrib.training.HParams(
            data_dir=os.path.expanduser(data_dir))
        self.problem.get_hparams(hparams)
        self.request_fn = serving_utils.make_grpc_request_fn(
            servable_name=model_name,
            server=self.server,
            timeout_secs=30)

    def process_task(self, task):
        """Process translation task. Splits request into sentences, then translates and
        recases each sentence."""
        # check parameters
        # be lenient and allow anything that can map to a boolean for alignmentInfo and detokenize
        dodetok = _convert_boolean(task.get('detokenize', ''), True)
        dotok = _convert_boolean(task.get('tokenize', ''), True)
        dosegment = _convert_boolean(task.get('segment', ''), True)

        # run the translation
        def _translator(text):
            return self._translate(text, dodetok, dotok, dosegment)

        translated, score = _translator(task['text'])

        result = {'translation': [{'translated': [{'text': translated,
                                                   'score': score,
                                                   'rank': 0}],
                                   'translationId': uuid.uuid4().hex}], }
        return result

    def _translate(self, src, dodetok, dotok, dosegment):
        """Translate one sentence. 

        @param src: source text (one sentence).
        @param dodetok: detokenize output?
        @param ret_src_tok: return tokenized source sentences?
        @param dotok: tokenize output?
        """
        def _prepare_cmd(cmd, inputValue="", outputValue=""):
            SPACE_SPLIT_ELEMENT="SPACE_SPLIT_ELEMENT"
            cmd_args = cmd.replace(" ", SPACE_SPLIT_ELEMENT).replace('"$input"', inputValue).replace('"$output"', outputValue)
            return cmd_args.split(SPACE_SPLIT_ELEMENT)

        def _run_cmd(*args):
            try:
                out = subprocess.check_output(args[0]).strip()                       
                return 0, out
            except subprocess.CalledProcessError as grepexc:                                                                                                   
                return grepexc.returncode, grepexc.output

        # tokenize
        src_tokenized = self.tokenizer.tokenize(src) if dotok else src

        if (self.preprocess):
            cmd_args = _prepare_cmd(self.preprocess, src)
            (cmd_error, cmd_output) = _run_cmd(cmd_args)
            if (cmd_error == 0):
                src_tokenized = cmd_output
            else:
                sys.stderr.write("{0}\n{1}".format(cmd_error, cmd_output))
        
        # translate
        outputs = serving_utils.predict([src_tokenized], self.problem, self.request_fn)
        outputs, = outputs
        result, score = outputs

        if (self.postprocess):
            cmd_args = _prepare_cmd(self.postprocess, src, result)
            (cmd_error, cmd_output) = _run_cmd(cmd_args)
            if (cmd_error == 0):
                result = cmd_output
            else:
                 sys.stderr.write("{0}\n{1}".format(cmd_error, cmd_output))

        result = {
            'src': src,
            'translated': result
        }, score.item()

        return result
Esempio n. 12
0
class MosesTranslator(Translator):
    """Handles the 'translate' task for MTMonkeyWorkers using Moses XML-RPC servers
    and built-in segmentation, tokenization, and detokenization.
    """
    def __init__(self, translate_port, recase_port, source_lang, target_lang,
                 threads):
        """Initialize a MosesTranslator object according to the given 
        configuration settings.
        
        @param translate_port: the port at which the Moses translator operates
        @param recase_port: the port at which the recaser operates
        @param source_lang: source language (ISO-639-1 ID)
        @param target_lang: target language (ISO-639-1 ID)
        """
        # precompile XML-RPC Moses server addresses
        self.translate_proxy_addr = "http://localhost:" + translate_port + "/RPC2"
        self.recase_proxy_addr = None
        if recase_port is not None:
            self.recase_proxy_addr = "http://localhost:" + recase_port + "/RPC2"

        # initialize text processing tools (can be shared among threads)
        self.splitter = SentenceSplitter({'language': source_lang})
        self.tokenizer = Tokenizer({'lowercase': True, 'moses_escape': True})
        self.detokenizer = Detokenizer({
            'moses_deescape': True,
            'capitalize_sents': True,
            'language': target_lang
        })
        self.threads = threads

    def process_task(self, task):
        """Process translation task. Splits request into sentences, then translates and
        recases each sentence."""
        # check parameters
        # be lenient and allow anything that can map to a boolean for alignmentInfo and detokenize
        doalign = _convert_boolean(task.get('alignmentInfo', ''), False)
        dodetok = _convert_boolean(task.get('detokenize', ''), True)
        dotok = _convert_boolean(task.get('tokenize', ''), True)
        dosegment = _convert_boolean(task.get('segment', ''), True)

        nbestsize = min(task.get('nBestSize', 1), 10)

        # run the translation
        src_lines = self.splitter.split_sentences(
            task['text']) if dosegment else [task['text']]
        ret_src_tok = doalign or len(src_lines) > 1

        def _translator(line):
            return self._translate(line, doalign, dodetok, nbestsize,
                                   ret_src_tok, dotok, dosegment)

        translated = parallel_map(_translator, src_lines)

        return {'translationId': uuid.uuid4().hex, 'translation': translated}

    def _translate(self, src, doalign, dodetok, nbestsize, ret_src_tok, dotok,
                   dosegment):
        """Translate and recase one sentence. Optionally, word alignment
        between source and target is included on the output.

        @param src: source text (one sentence).
        @param dodetok: detokenize output?
        @param nbestsize: size of n-best lists on the output
        @param ret_src_tok: return tokenized source sentences?
        """

        # create server proxies (needed for each thread)
        translate_proxy = xmlrpclib.ServerProxy(self.translate_proxy_addr)
        recase_proxy = None
        if self.recase_proxy_addr is not None:  # recasing only if there is a recaser set up
            recase_proxy = xmlrpclib.ServerProxy(self.recase_proxy_addr)

        # tokenize
        src_tokenized = self.tokenizer.tokenize(src) if dotok else src

        # translate
        translation = translate_proxy.translate({
            "text":
            src_tokenized,
            "align":
            str(doalign),
            "nbest":
            nbestsize,
            "nbest-distinct":
            str(True),
            "no-ReportSegmentation":
            str(True),
        })

        # provide n-best lists
        rank = 0
        hypos = []
        for hypo in translation['nbest']:
            # recase (if there is a recaser set up)
            if recase_proxy is not None:
                recased = recase_proxy.translate({"text": hypo['hyp']
                                                  })['text'].strip()
            else:
                recased = hypo['hyp']

            # construct the output
            parsed_hypo = {
                'text': recased,
                'score': hypo['totalScore'],
                'rank': rank,
            }
            if dodetok:  # detokenize if needed
                parsed_hypo['text'] = self.detokenizer.detokenize(recased)

            if doalign:  # provide alignment information if needed
                parsed_hypo['tokenized'] = recased
                parsed_hypo['alignment-raw'] = _add_tgt_end(
                    hypo['align'], recased)

            rank += 1
            hypos.append(parsed_hypo)

        result = {
            'src': src,
            'translated': hypos,
        }

        if ret_src_tok:
            result['src-tokenized'] = src_tokenized

        return result
Esempio n. 13
0
class TransformerTranslator(Translator):
    """Handles the 'translate' task for MTMonkeyWorkers using Moses XML-RPC servers
    and built-in segmentation, tokenization, and detokenization.
    """
    def __init__(self, translate_host, translate_port, source_lang,
                 target_lang, model_name, problem, t2t_usr_dir, data_dir,
                 preprocess_cmd, postprocess_cmd):
        """Initialize a TransformerTranslator object according to the given 
        configuration settings.
        
        @param translate_port: the port at which the Moses translator operates
        @param recase_port: the port at which the recaser operates
        @param source_lang: source language (ISO-639-1 ID)
        @param target_lang: target language (ISO-639-1 ID)
        @param preprocess_cmd: bash command for text preprocessing
        @param postprocess_cmd: bash command for text posprocessing
        """
        # precompile Tensorflow server addresses
        self.server = translate_host + ":" + translate_port

        # initialize text processing tools (can be shared among threads)
        self.tokenizer = Tokenizer({'lowercase': True, 'moses_escape': True})
        self.preprocess = preprocess_cmd
        self.postprocess = postprocess_cmd
        usr_dir.import_usr_dir(t2t_usr_dir)
        self.problem = registry.problem(problem)
        hparams = tf.contrib.training.HParams(
            data_dir=os.path.expanduser(data_dir))
        self.problem.get_hparams(hparams)
        self.request_fn = serving_utils.make_grpc_request_fn(
            servable_name=model_name, server=self.server, timeout_secs=30)

    def process_task(self, task):
        """Process translation task. Splits request into sentences, then translates and
        recases each sentence."""
        # check parameters
        # be lenient and allow anything that can map to a boolean for alignmentInfo and detokenize
        dodetok = _convert_boolean(task.get('detokenize', ''), True)
        dotok = _convert_boolean(task.get('tokenize', ''), True)
        dosegment = _convert_boolean(task.get('segment', ''), True)

        # run the translation
        def _translator(text):
            return self._translate(text, dodetok, dotok, dosegment)

        translated, score = _translator(task['text'])

        result = {
            'translation': [{
                'translated': [{
                    'text': translated,
                    'score': score,
                    'rank': 0
                }],
                'translationId':
                uuid.uuid4().hex
            }],
        }
        return result

    def _translate(self, src, dodetok, dotok, dosegment):
        """Translate one sentence. 

        @param src: source text (one sentence).
        @param dodetok: detokenize output?
        @param ret_src_tok: return tokenized source sentences?
        @param dotok: tokenize output?
        """
        def _prepare_cmd(cmd, inputValue="", outputValue=""):
            SPACE_SPLIT_ELEMENT = "SPACE_SPLIT_ELEMENT"
            cmd_args = cmd.replace(" ", SPACE_SPLIT_ELEMENT).replace(
                '"$input"', inputValue).replace('"$output"', outputValue)
            return cmd_args.split(SPACE_SPLIT_ELEMENT)

        def _run_cmd(*args):
            try:
                out = subprocess.check_output(args[0]).strip()
                return 0, out
            except subprocess.CalledProcessError as grepexc:
                return grepexc.returncode, grepexc.output

        # tokenize
        src_tokenized = self.tokenizer.tokenize(src) if dotok else src

        if (self.preprocess):
            cmd_args = _prepare_cmd(self.preprocess, src)
            (cmd_error, cmd_output) = _run_cmd(cmd_args)
            if (cmd_error == 0):
                src_tokenized = cmd_output
            else:
                sys.stderr.write("{0}\n{1}".format(cmd_error, cmd_output))

        # translate
        outputs = serving_utils.predict([src_tokenized], self.problem,
                                        self.request_fn)
        outputs, = outputs
        result, score = outputs

        if (self.postprocess):
            cmd_args = _prepare_cmd(self.postprocess, src, result)
            (cmd_error, cmd_output) = _run_cmd(cmd_args)
            if (cmd_error == 0):
                result = cmd_output
            else:
                sys.stderr.write("{0}\n{1}".format(cmd_error, cmd_output))

        result = {'src': src, 'translated': result}, score.item()

        return result
Esempio n. 14
0
class MarianTranslator(Translator):
    """Handles the 'translate' task for MTMonkeyWorkers using Moses XML-RPC servers
    and built-in segmentation, tokenization, and detokenization.
    """

    def __init__(self, translate_host, translate_port, translate_path, source_lang, target_lang, preprocess_cmd, postprocess_cmd):
        """Initialize a MosesTranslator object according to the given 
        configuration settings.
        
        @param translate_port: the port at which the Moses translator operates
        @param recase_port: the port at which the recaser operates
        @param source_lang: source language (ISO-639-1 ID)
        @param target_lang: target language (ISO-639-1 ID)
        @param preprocess_cmd: bash command for text preprocessing
        @param postprocess_cmd: bash command for text posprocessing
        """
        # precompile Marian server addresses
        self.translate_proxy_addr = "ws://" + translate_host + ":" + translate_port + "/" + translate_path

        # initialize text processing tools (can be shared among threads)
        self.tokenizer = Tokenizer({'lowercase': True,
                                    'moses_escape': True})
        self.preprocess = preprocess_cmd
        self.postprocess = postprocess_cmd    

    def process_task(self, task):
        """Process translation task. Splits request into sentences, then translates and
        recases each sentence."""
        # check parameters
        # be lenient and allow anything that can map to a boolean for alignmentInfo and detokenize
        dodetok = _convert_boolean(task.get('detokenize', ''), True)
        dotok = _convert_boolean(task.get('tokenize', ''), True)
        dosegment = _convert_boolean(task.get('segment', ''), True)

        # run the translation
        def _translator(text):
            return self._translate(text, dodetok, dotok, dosegment)

        translated = _translator(task['text'])

        result = {'translation': [{'translated': [{'text': translated,
                                                   'score': 100,
                                                   'rank': 0}],
                                   'translationId': uuid.uuid4().hex}], }
        return result

    def _translate(self, src, dodetok, dotok, dosegment):
        """Translate one sentence. 

        @param src: source text (one sentence).
        @param dodetok: detokenize output?
        @param ret_src_tok: return tokenized source sentences?
        @param dotok: tokenize output?
        """
        def _prepare_cmd(cmd, inputValue="", outputValue=""):
            SPACE_SPLIT_ELEMENT="SPACE_SPLIT_ELEMENT"
            cmd_args = cmd.replace(" ", SPACE_SPLIT_ELEMENT).replace('"$input"', inputValue).replace('"$output"', outputValue)
            return cmd_args.split(SPACE_SPLIT_ELEMENT)

        def _run_cmd(*args):
            try:
                out = subprocess.check_output(args[0]).strip()                       
                return 0, out
            except subprocess.CalledProcessError as grepexc:                                                                                                   
                return grepexc.returncode, grepexc.output

        # tokenize
        src_tokenized = self.tokenizer.tokenize(src) if dotok else src

        if (self.preprocess):
            cmd_args = _prepare_cmd(self.preprocess, src)
            (cmd_error, cmd_output) = _run_cmd(cmd_args)
            if (cmd_error == 0):
                src_tokenized = cmd_output
            else:
                sys.stderr.write("{0}\n{1}".format(cmd_error, cmd_output))
        
        # translate
        ws = create_connection(self.translate_proxy_addr)
        ws.send(src_tokenized)
        result=ws.recv()

        if (self.postprocess):
            cmd_args = _prepare_cmd(self.postprocess, src, result)
            (cmd_error, cmd_output) = _run_cmd(cmd_args)
            if (cmd_error == 0):
                result = cmd_output
            else:
                 sys.stderr.write("{0}\n{1}".format(cmd_error, cmd_output))

        result = {
            'src': src,
            'translated': result
        }

        return result
Esempio n. 15
0
 def __init__(self, translate_port, recase_port):
     self.translate_proxy = xmlrpclib.ServerProxy("http://localhost:" + translate_port +  "/RPC2")
     self.recase_proxy = xmlrpclib.ServerProxy("http://localhost:" + recase_port +  "/RPC2")
     self.tokenizer = Tokenizer({'lowercase': True, 'moses_escape': True})
     self.detokenizer = Detokenizer()
     self.splitter = SentenceSplitter()
Esempio n. 16
0
class Translator:
    """Handles the 'translate' task for KhresmoiWorker"""

    def __init__(self, translate_port, recase_port):
        self.translate_proxy = xmlrpclib.ServerProxy("http://localhost:" + translate_port +  "/RPC2")
        self.recase_proxy = xmlrpclib.ServerProxy("http://localhost:" + recase_port +  "/RPC2")
        self.tokenizer = Tokenizer({'lowercase': True, 'moses_escape': True})
        self.detokenizer = Detokenizer()
        self.splitter = SentenceSplitter()

    def process_task(self, task):
        """Process translation task. Splits request into sentences, then translates and
        recases each sentence."""
        doalign = task.get('alignmentInfo', '').lower() in ['true', 't', 'yes', 'y', '1']
        dodetok = not task.get('detokenize', '').lower() in ['false', 'f', 'no', 'n', '0']
        src_lines = self.splitter.split_sentences(task['text'])
        translated = [self._translate(line, doalign, dodetok) for line in src_lines]
        return {
            'translation': [
            {
                "translationId": uuid.uuid4().hex,
                "translated": translated
            }
            ]
        }
    
    def _translate(self, src, doalign, dodetok):
        """Translate and recase one sentence. Optionally, word alignment
        between source and target is included in output."""

        # tokenize
        src_tokenized = self.tokenizer.tokenize(src)

        # translate
        translation = self.translate_proxy.translate({
            "text": src_tokenized,
            "align": doalign
        })
        
        # recase
        tgt_tokenized = self.recase_proxy.translate({
            "text": translation['text'] })['text'].strip()

        # detokenize
        if dodetok:
            tgt = self.detokenizer.detokenize(tgt_tokenized)
    
        result = {
            'text': tgt,
            'score': 100, # TODO actual score
            'rank': 0 # TODO
        }

        # optionally add word-alignment information
        if doalign:
            result.update({
                'src-tokenized': src_tokenized,
                'tgt-tokenized': tgt_tokenized,
                'alignment-raw': _add_tgt_end(translation['align'], tgt_tokenized)
            })

        return result