Exemple #1
0
from translator import Translator

if __name__ == '__main__':
    # heb -> arb
    translator = Translator("heb", "arb")
    seq_1 = ["שלום לכולם", "ארבעים ושתיים", "ארבעים וחמש"]
    seq_2 = ["ארבעים ושתיים"]

    print(translator.translate(seq_2))
    translator.to("cpu")
    print(translator.translate(seq_2))
    translator.to("cuda")
    print(translator.translate(seq_1))
    print(translator.translate(seq_2))
    print(translator.translate(seq_2))

    # eng -> heb
    translator = Translator("eng", "arb")
    translator.to("cpu")
    print(translator.translate(["hello, world"]))
    #print(translator.translate("hello my name is yossi"))
    print(
        translator.translate(
            ["hello, my name is Yossi", "hello, my name is Avi"]))

    translator = Translator("eng", "heb")
    translator.to("cuda")
    print(
        translator.translate(
            ["hello, my name is Yossi", "hello, my name is Avi"]))
    print(translator.translate(["hello, my name is Yossi"]))
Exemple #2
0
def base(data):
    print("\n--- BASE METHOD ---")
    t = Translator()
    for item in data:
        result = t.translate(item)
        print("{}\t==>>\t{}".format(item, result))
Exemple #3
0
    header("Dumping Phobos Data", dump_path)

    import reverence
    from flow import FlowManager
    from miner import *
    from translator import Translator
    from writer import *

    rvr = reverence.blue.EVE(eve_path, cachepath=args.cache_path, sharedcachepath=res_path, server="singularity" if args.singularity else "tranquility")
    print "EVE Directory: {}".format(rvr.paths.root)
    print "Cache Directory: {}".format(rvr.paths.cache)
    print "Shared Resource Directory: {}".format(rvr.paths.sharedcache)
    print

    pickle_miner = ResourcePickleMiner(rvr)
    trans = Translator(pickle_miner)
    bulkdata_miner = BulkdataMiner(rvr, trans)
    staticcache_miner = ResourceStaticCacheMiner(rvr, trans)
    miners = (
        MetadataMiner(eve_path),
        bulkdata_miner,
        staticcache_miner,
        TraitMiner(staticcache_miner, bulkdata_miner, trans),
        SqliteMiner(rvr.paths.root, trans),
        CachedCallsMiner(rvr, trans),
        pickle_miner
    )

    writers = (
        JsonWriter(dump_path, indent=2),
    )
Exemple #4
0
from console import ConsoleReader
from translator import Translator
from printer import Printer

if __name__ == '__main__':
    reader = ConsoleReader()

    args = reader.get_args()

    # read data form args.data_path file
    with open(args.data_path) as data_file:
        data = data_file.read().strip()

    translator = Translator(data, args.alphabet_path)
    translator.translate()

    printer = Printer(args.output_path)
    printer.print(translator.get_translation())
 def __init__(self):
     super().__init__()
     self._translator = Translator()
Exemple #6
0
def main():
    translator = Translator()
    with open("source.py") as f:
        lscvm = translator.translate(f.read())
    print(lscvm)
Exemple #7
0
import os
from _thread import start_new_thread

from flask import Flask, request, make_response
from flask_jsonpify import jsonify

from detector import Detector
from translator import Translator, TranslationError
from request_limitation import RequestLimiter, RequestLimitExceededException

app = Flask(__name__)

translator = Translator()
detector = Detector()
start_new_thread(translator.initialize_models, ())

max_number_of_concurrent_requests = int(
    os.getenv('MAX_NUMBER_OF_CONCURRENT_REQUESTS', 3))
request_limiter = RequestLimiter(max_number_of_concurrent_requests)


@app.route('/health', methods=['GET', 'POST'])
def health():
    return jsonify(healthy=True, serviceAvailable=translator.models_loaded)


@app.route('/translation', methods=['POST'])
def translate():
    with request_limiter.limited_requests():
        return jsonify(texts=[
            translator.translate(text, request.json['targetLanguage'])
from translator import Translator


axiom = 'F'
rules = {'F': '+F--F+'}
mapping = {
'F': 'move',
'+': 'left',
'-': 'right',
}

size = 3.6
angle = 45
n = 12

Levy = Translator(axiom, rules, mapping, size, angle)
Levy(n, position=(-size * 2**(n/2-1), -size*n), angle=0, name='levy')
from translator import Translator

axiom = 'F+XF+F+XF'
rules = {
    'X': 'XF-F+F-XF+F+XF-F+F-X',
}
mapping = {
    '+': 'left',
    '-': 'right',
    'F': 'move',
}

n = 4
size = 8
angle = 90

Sierpinski = Translator(axiom, rules, mapping, size=size, angle=angle)
Sierpinski(n, position=(size * 2**(n + 1), 0))
Exemple #10
0
 def setUp(self) -> None:
     self.translator = Translator(common)
Exemple #11
0
import pickle as pkl
import torch
from language import Language
from translator import Translator

if __name__ == '__main__':
    english_language = pkl.load(open('./data/languages/english_language',
                                     'rb'))
    norwegian_language = pkl.load(
        open('./data/languages/norwegian_language', 'rb'))

    file_load_name = './data/model/translator_model_transformer.py'
    translator = Translator(english_language, norwegian_language, cuda='cpu')
    translator.load_model(file_load_name)

    print(translator.translate('do you have a friend'))
Exemple #12
0
 def setUp(self) -> None:
     self.orcish_translator = Translator(orcish)
     self.common_translator = Translator(common)
     self.debug_translator = Translator(debug)
     self.message = "ABCDefЙцук1234іІєї!#\""
Exemple #13
0
from flask import Flask, jsonify, request, redirect
from flasgger import Swagger

from translator import Translator

app = Flask(__name__)
swagger = Swagger(app)

print('\n\n')
print('=' * 100)
print('Starting Translator Service.....')
translator_obj = Translator()
print('Translator Service Started.')
print('=' * 100)
print('\n\n')


@app.route('/')
def home():
    redirect('/apidocs', code=302)


@app.route('/translate', methods=['POST'])
def run_translation():
    """Endpoint for Translation of input text to other language.
    ---
    parameters:
      - input_text: translation input
        in: formData
        type: string
      - source_lang: source language
Exemple #14
0
from utils import load_data

if __name__ == '__main__':

    # Set up arguement parser
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "-net",
        "--network_architecture",
        help=
        "String - Specify what network architecture to use - Options: (BasicRNN, EmbeddingRNN, EncDecRNN, EmbedBiRncDecRNN)"
    )
    args = parser.parse_args()

    if args.network_architecture == 'BasicRNN':
        translator = Translator(BasicRNN, 'BasicRNN', embed=False)
    elif args.network_architecture == 'EmbeddingRNN':
        translator = Translator(EmbeddingRNN, 'EmbeddingRNN')
    elif args.network_architecture == 'EncDecRNN':
        translator = Translator(EncDecRNN, 'EncDecRNN', embed=False)
    elif args.network_architecture == 'EmbedBiEncDecRNN':
        translator = Translator(EmbedBiEncDecRNN, 'EmbedBiEncDecRNN')
    else:
        translator = Translator(EmbedBiEncDecRNN, 'EmbedBiEncDecRNN')

    # Load English data
    eng_sentences = load_data('data/small_vocab_en')

    # Load French data
    fra_sentences = load_data('data/small_vocab_fr')
def test_translator_hello_world():
    translator = Translator(source_lang="en",
                            target_lang="pt",
                            source_api="google")
    assert translator.translate("Hello World") == "Olá Mundo"
from translator import Translator

axiom = 'X'
rules = {
    'X': '-YF+XFX+FY-',
    'Y': '+XF-YFY-FX+',
}
mapping = {
    'F': 'move',
    '+': 'left',
    '-': 'right',
}

size = 3
n = 8

Hilbert = Translator(axiom, rules, mapping, size)
Hilbert(n, position=(-size * 2**(n - 1), -size * 2**(n - 1)))
def languageCode():
    langCode = input("Enter the default source language code: ")
    if not LANGUAGES.get(langCode):
        print("Not Supported Language Code")
        return languageCode()
    return langCode

def fileName():
    print("Example file names: Default-Language, Language-TR, Words-En, etc... ")
    file = input("Enter the default language file name : ")
    if(os.path.exists("./src/data/"+file+".json")):
        return file
    print("Not find file")
    return fileName()

supportedLanguagesCode()
defaultLanguageCode = languageCode()
defaultLanguageFileName = fileName()
desctinationLanguages = []
print("input is 0(Zero) to complete")
while True:    
    languageCode = input("Enter destination language code : ")
    if languageCode=="0":
        break
    if languageCode not in LANGUAGES:
        print("Not Supported Language Code")
        continue
    desctinationLanguages.append(languageCode)
print(list(desctinationLanguages))    
translator = Translator(defaultLanguageFileName,defaultLanguageCode,desctinationLanguages)
translator.translate()
Exemple #18
0
# пример простого консольного перводчика с английского
# на русский и с русского на английский с автоматическим
# определением языка исходного текста

import requests
from translator import Translator

LANGS = {'ru': 'en', 'en': 'ru'}

with open('key_api.txt', encoding='utf-8') as f:
    APIkey = f.read()
t = Translator(APIkey)

text = None
while True:
    text = None
    while not text:
        text = input('> ').strip()
    code, to_lang = t.detect_lang(text, hint=list(LANGS))
    code, text = t.translate(text, LANGS[to_lang])
    print(text)
    print(
        'Переведено сервисом "Яндекс.Перевдчик" http://translate.yandex.ru \n')
Exemple #19
0
from logging import getLogger
from mongoengine import connect

from bot import bot
from translator import Translator
from settings import MONGO_CONNECTION_STRING
from logger import setup_logger
import social_credit_routes # TODO refine routing module loading
import plugins


setup_logger()
logger = getLogger('social_credit')
db_client = connect('social_credit', host=MONGO_CONNECTION_STRING)
# check mongo connection after creating client
# no exception catching: if mongo is unavailable - we want to crash
# According to pymongo doc: The ismaster command is cheap and does not require auth.
db_client.admin.command('ismaster')
bot.translator = Translator()
logger.info('bot started')
bot.polling()
Exemple #20
0
def predict(dn, rn):
    dir_name_format = "../data/{dn}-{rn}-raw"
    dir_name = dir_name_format.format(dn=dn, rn=rn)
    input_path = os.path.join(dir_name, "src-test.txt")
    if not os.path.isfile(input_path):
        print(f"File: {input_path} not exist.")
        return

    output_filename = f"prediction-{dn}-{rn}.txt"
    output_path = os.path.join(outputDir, output_filename)
    if os.path.isfile(output_path):
        print(f"File {output_path} already exists.")
        return

    # 作用:将src进行index
    preprocess = IndexedInputTargetTranslationDataset.preprocess(source_dictionary)
    # 作用:将输出逆index为句子
    postprocess = lambda x: ''.join(
        [token for token in target_dictionary.tokenize_indexes(x) if token != END_TOKEN and token != START_TOKEN and token != PAD_TOKEN])
    device = torch.device(f'cuda:{args.device}' if torch.cuda.is_available() and not args.no_cuda else 'cpu')

    print('Building model...')
    model = TransformerModel(source_dictionary.vocabulary_size, target_dictionary.vocabulary_size,
                             config['d_model'],
                             config['nhead'],
                             config['nhid'],
                             config['nlayers'])
    model.eval()
    checkpoint_filepath = checkpoint_path
    checkpoint = torch.load(checkpoint_filepath, map_location='cpu')
    model.load_state_dict(checkpoint)
    translator = Translator(
        model=model,
        beam_size=args.beam_size,
        max_seq_len=args.max_seq_len,
        trg_bos_idx=target_dictionary.token_to_index(START_TOKEN),
        trg_eos_idx=target_dictionary.token_to_index(END_TOKEN)
    ).to(device)

    from utils.pipe import PAD_INDEX
    def pad_src(batch):
        sources_lengths = [len(sources) for sources in batch]
        sources_max_length = max(sources_lengths)
        sources_padded = [sources + [PAD_INDEX] * (sources_max_length - len(sources)) for sources in batch]
        sources_tensor = torch.tensor(sources_padded)
        return sources_tensor
    def process(seq):
        seq = seq.strip()
        def is_proof(name):
            return name.count("balance") > 0 or name.count("one") > 0
        if is_proof(data_name) and not is_proof(dn):
            seq += ",$,1"
            global is_proof_process
            if is_proof_process:
                print("processing")
                is_proof_process = False
        return seq

    batch_size = args.bs
    print(f"Output to {output_path}:")
    with open(output_path, 'w', encoding='utf-8') as outFile:
        with open(input_path, 'r', encoding='utf-8') as inFile:
            seqs = []
            for seq in tqdm(inFile):
                seq = process(seq)
                src_seq = preprocess(seq)
                seqs.append(src_seq)
                if len(seqs) >= batch_size:
                    pred_seq = translator.translate_sentence(pad_src(seqs).to(device))
                    pred_line = [postprocess(pred) for pred in pred_seq]
                    # print(pred_line)
                    outFile.writelines([p.strip() + '\n' for p in pred_line])
                    seqs.clear()
                # endif
            # endfor
            if seqs:    # last batch
                pred_seq = translator.translate_sentence(pad_src(seqs).to(device))
                pred_line = [postprocess(pred).replace(START_TOKEN, '').replace(END_TOKEN, '') for pred in pred_seq]
                # print(pred_line)
                outFile.writelines([p.strip() + '\n' for p in pred_line])
                seqs.clear()
        # endwith
    # endwith
    print(f'[Info] {input_path} Finished.')
import sys
from syntax_exception import InvalidCommendType
from syntax_exception import InvalidClause
from translator import Translator
from end_program import EndProgram
from user_message import UserMessage

if __name__ == '__main__':
    if len(sys.argv) != 2:
        EndProgram.fail(UserMessage.INVALID_ARGUMENT_NUMBER)

    translated = ""
    path_asm_file = sys.argv[1]
    try:
        with open(path_asm_file) as asm_file:
            translated = Translator(asm_file).get_machine_code()

    except FileNotFoundError as e:
        EndProgram.fail(UserMessage.ASM_FILE_NOT_FOUND, str(e))

    except InvalidClause as e:
        EndProgram.fail(UserMessage.INVALID_CLAUSE, str(e))

    except InvalidCommendType as e:
        EndProgram.fail(UserMessage.INVALID_COMMEND_TYPE, str(e))

    if translated == "":
        EndProgram.success(UserMessage.EMPTY_HACK_FILE)
    try:
        file_name = os.path.basename(path_asm_file).split(".")[0]
        hack_file_path = os.path.join(os.path.dirname(path_asm_file),
Exemple #22
0
def context(data):
    print("\n --- USING CONTEXT ---")
    for item in data:
        with Translator() as tran:
            result = tran.translate(item)
            print("{} ==> {}".format(item, result))
Exemple #23
0
import parser, re, json
from translator import Translator, is_function


def rename_type(name):
    return re.sub(r"^SDL_", r"", name)


env = parser.default_env()
parser.parse(env, ['libSDL2.h'])
translate = Translator(env, rename_type)
translate.blacklist.update([
    '_IO_marker',
    '_IO_FILE',
])
translate.bluelist.update({
    'SDL_AudioCVT': 'AudioCVT',
    'SDL_assert_data': 'assert_data',
    'SDL_PixelFormat': 'PixelFormat',
    'SDL_RWops': 'RWops',
})

constants = {}
variables = {}

for name, value in env.constants.iteritems():
    if re.match(r'^SDL_\w', name):
        if isinstance(value, (int, str)):
            name = re.sub(r"^SDL_", r"", name)
            constants[name] = value
Exemple #24
0
import parser, re, json
from translator import Translator, is_function

def rename_type(name):
    assert not name.startswith("SDL_"), name
    return re.sub(r"^IMG_", r"", name)

def dependency(name):
    if name.startswith("SDL_"):
        return "libSDL2." + re.sub(r"^SDL_", r"", name)

env = parser.default_env()
parser.parse(env, ['/usr/include/SDL2/SDL_image.h'])
translate = Translator(env, rename_type, dependency)
translate.blacklist.update([
])
translate.bluelist.update({
})

constants = {}
variables = {}

for name, value in env.constants.iteritems():
    if re.match(r'^IMG_\w', name):
        if isinstance(value, (int, str)):
            name = re.sub(r"^IMG_", r"", name)
            constants[name] = value

for cname in env.names:
    if re.match(r'^IMG_\w', cname):
        typespec = translate.declarator(env.names[cname])