def translate_neurotolge(text, fromlang="auto", to_lang=None): if "et" in to_lang: to_lang = "et" elif "en" in to_lang: to_lang = "en" elif "ru" in to_lang: to_lang = "ru" elif "lv" in to_lang: to_lang = "lv" else: langapi = detectlanguage.detect(text)[0] if "et" in langapi["language"]: to_lang = "en" elif "ru" in langapi["language"]: to_lang = "en" elif "lv" in langapi["language"]: to_lang = "en" elif "en" in langapi["language"]: to_lang = "et" r = requests.get( "https://neurotolge.ee/translate?from={}&to={}&q={}".format( fromlang, to_lang, text), verify=False, ) if r.status_code == 200: a = r.json() translation = a["translations"][0]["translation"] return translation
def is_word(self): passwords = self.password_mutations() for _password in passwords: if detect(_password)[0]['confidence'] == 10: return True else: return False
def nuvem(request, id): documento = Documento.objects.get(pk=id) nome_arquivo = documento.arquivo.path prefix, file_extension = os.path.splitext(nome_arquivo) if not os.path.exists(prefix + '.txt'): pdf2txt(documento.arquivo.path) nome_arquivo = prefix + '.txt' if not documento.language: try: linhas = open(nome_arquivo).read().lower().split('\n')[0:20] except UnicodeDecodeError as erro: linhas = open( nome_arquivo, encoding='ISO-8859-1').read().lower().split('.')[0:20] trecho = ' '.join([('' if len(linha) < 20 else linha) for linha in linhas]) lang_detect = detectlanguage.detect(trecho) if len(lang_detect) > 0: precisao = lang_detect[0]['confidence'] if precisao > 7: documento.language = lang_detect[0]['language'] documento.save() imagem = generate(nome_arquivo, documento.language) contexto = {'doc': documento, 'nuvem': imagem} return render(request, 'nuvem.html', contexto)
def checkLang(myFile, verbose): # checks file for language and returns language code, or if is doubtful returns "xx" tryNumber = 0 # starting up counter finished = False status = detectlanguage.user_status() # get status for the account at detectlanguage.com if status['status'] == "SUSPENDED": print "*** Account at detectlanguage.com is suspended" print " Run %s -d to see status" % sys.argv[0] print " Quitting...\n" exit(7) with open(myFile) as theFile: fileLines = sum(1 for line in theFile) # number of lines in file theFile.close() # close file while True: if tryNumber * detectRows >= fileLines: print "*** File only has %d lines. No more lines to send. Accepting answer" % fileLines break with open(myFile) as theFile: # open file head = list(islice(theFile, tryNumber * detectRows, (tryNumber + 1) * detectRows)) # select rows from file theFile.close() # close file text = convertText(head, verbose) # convert all strange characters, remove special characters and so on print "--- Sending rows %d-%d to detectlanguage.com" % (tryNumber * detectRows, (tryNumber + 1) * detectRows) result = detectlanguage.detect(text) # detect language if result[0]['isReliable']: # result is reliable langCode = str(result[0]['language']) # langCode set to answer from detectlanguage.com print "--- Got %s - %s" % (langCode, langName(langCode)) for lang in prefLangs: # run through the prefered languages if lang == langCode: # recieved language is one of the prefered languages finished = True # search for language code is finished break # break out of this for loop if finished: break # break out of the while loop else: print "*** Not one of your prefered languages" else: langCode = "xx" print "*** Got unreliable answer. Confidence is %s" % str(result[0]['confidence']) tryNumber += 1 # counting number of trys if tryNumber > maxTrys: # reached maximum number of trys print "*** Max number of trys reached. Accepting answer" finished = True # break if finished: break if langCode == "xx": print "detectlanguage.com can't determine language code" else: print "detectlanguage.com says languagecode is %s" % langCode confidence = result[0]['confidence'] print "detectlanguage.com says confidence is %s" % confidence return langCode
def validate_language(user_input, api_key): detectlanguage.configuration.api_key = api_key detected_languages = detectlanguage.detect(user_input) for language in detected_languages: if language["language"] == "en": return language["isReliable"] return False
def predict(self, text_or_list): """ Predict the language. Paramters --------- text_or_list : str Returns ------- languages : list """ res = detectlanguage.detect(text_or_list) return [el[0]['language'] for el in res]
def predict(self, text_or_list: str) -> List[str]: """ Predict the language. Paramters --------- text_or_list : str Returns ------- languages : List[str] """ res = detectlanguage.detect(text_or_list) return [el[0]["language"] for el in res]
def languageDetection(text): # print (text) detectlanguage.configuration.api_key = config.detectlanguageApi_key try: re = detectlanguage.detect(text) all_confidence = sum([item.get('confidence') for item in re]) re = ', '.join([ item.get('language') + ":" + str(round(item.get('confidence') / all_confidence * 100, 2)) + "%" for item in re ]) except Exception as e: print(e) re = 'unknown' return re
def process_language_for_content(content_collection, search): """ :type content_collection: pymongo.collection.Collection """ detectlanguage.configuration.api_key = os.environ.get('DETECTLANG') for post in content_collection.find({ 'ld_lang': None, 'search_id': search['_id'] }): lang_json = detectlanguage.detect(post['text']) if lang_json[0]['isReliable']: print(post['_id']) content_collection.update_one( {'_id': post['_id']}, {"$set": { 'ld_lang': lang_json[0]['language'] }})
def ocr_core(filename): """ This function will handle the core OCR processing of images. """ # with open(filename,'r') as file: # text_content = file.read().splitlines() # final_text = str(text_content)+" " text = pytesseract.image_to_string(Image.open(filename), lang="tam") # We'll use Pillow's Image class to open the image and pytesseract to detect the string in the image result = detectlanguage.detect(text) # detected_language, detected_language_reliance = result['language'], result['isReliable'] translator = Translator() trans_text = translator.translate([text]) translated_text = [] for ele in trans_text: translated_text.append(ele.text) #print(ele.origin,' -> ',ele.text) print(translated_text) return text, translated_text
def edit_action(request): title = request.POST.get('title') url = "http://api.meaningcloud.com/lang-2.0" payload = "key=1b2ea248af8252d4d3ef84d0ef6798ce&txt=" + title + "&url=&doc=" headers = {'content-type': 'application/x-www-form-urlencoded'} response = requests.request("POST", url, data=payload, headers=headers) data = json.loads(response.text) language = data['language_list'][0]['name'] # detectlanguage API detectlanguage.configuration.api_key = "7750ba3cec7607c4e9a95584b94bb464" result = detectlanguage.detect(title) lang = result[0]['language'] return render(request, 'lang.html', {'language': language, 'lang': lang})
def detect_language(search_q): """ Uses detectlanguage API to determine language of video based on video title and cleaned description API returns json result with string: predicted language, boolean: isReliable and double: confidence level Updates video table in database with detectlanguage data :returns: void """ try: cursor, connection = connect() cursor.execute( """SELECT title, clean_descr, v_id FROM video WHERE "searchQ" = %s;""", (search_q, )) results = cursor.fetchall() # configure language detection API detectlanguage.configuration.api_key = "eea8968a48d7b6af0a3de993f7f401e0" # for each result from the database, detect language from title and clean description for result in results: text = result[0] + " " + result[1] v_id = result[2] json = detectlanguage.detect(text)[0] # update database with language data (language, isReliable, confidence level) cursor.execute( """UPDATE video SET language = %s, is_reliable = %s, confidence = %s WHERE v_id = %s""", (json['language'], json['isReliable'], json['confidence'], v_id)) connection.commit() except Exception as e: print("Exception in detect_language:", e) finally: closeConnection(connection, cursor)
def only_in_language(self, language_code): """Filter for those Tweets that seem to be in a given language. Tweet language is decided by the `detectlanguage` API: https://detectlanguage.com/ If detectlanguage.configuration.api_key has not already been set, tries to get it from settings.DETECTLANGUAGE_API_KEY. """ import detectlanguage if detectlanguage.configuration.api_key is None: detectlanguage.configuration.api_key = settings.DETECTLANGUAGE_API_KEY if not isinstance(language_code, str): raise ValueError("language_code has to be string") result = [] languages = detectlanguage.detect( [tweet.filtered_text for tweet in self.data]) for idx, lang in enumerate(languages): try: if lang[0]["language"] == language_code: result.append(self.data[idx]) except IndexError: pass return self.__class__(result, unique=self.unique)
def test_invalid_key(self): detectlanguage.configuration.api_key = 'invalid' detectlanguage.detect("Hello world")
def test_secure(self): detectlanguage.configuration.secure = True result = detectlanguage.detect("Hello world") eq_('en', result[0]['language']) detectlanguage.configuration.secure = False
def test_detect_array(self): result = detectlanguage.detect(["Hello world", "Ėjo ežiukas"]) eq_('en', result[0][0]['language']) eq_('lt', result[1][0]['language'])
def test_detect_unicode(self): result = detectlanguage.detect("Ėjo ežiukas") eq_('lt', result[0]['language'])
def test_detect(self): result = detectlanguage.detect("Hello world") eq_('en', result[0]['language'])
import detectlanguage detectlanguage.configuration.api_key = 'sua api key' texto = input(str("Digite a frase que deseja: ")) print(detectlanguage.detect(texto))
def detect_english(text): result = dl.detect(text) return "en" in [e['language'] for e in result]
try: q_ru = str(blob.translate(to="ru")) except: q_ru = q try: q_de = str(blob.translate(to="de")) except: q_de = q print q_en print q_de print q_ru print "----" lang = detectlanguage.detect(q) lang = lang[0]["language"] if lang == "en": text_en_boost = native_boost text_de_boost = foreign_boost text_ru_boost = foreign_boost hashtags_boost = default_HT_boost elif lang == "ru": text_en_boost = foreign_boost text_de_boost = foreign_boost text_ru_boost = native_boost hashtags_boost = default_HT_boost elif lang == "de": text_en_boost = foreign_boost text_de_boost = native_boost
def nuvem(request, id): documento = Documento.objects.get(pk=id) form = LayoutForm(request.POST or None, request.FILES or None, initial={ 'descricao': documento.descritivo or None, 'cores': documento.cores, 'select': documento.select }) flag = documento.chave and request.GET.get( 'chave') and documento.chave == request.GET.get('chave') if request.POST: if form.is_valid(): documento.descritivo = form.cleaned_data.get('descricao') if form.cleaned_data.get('imagem'): documento.imagem = form.cleaned_data.get('imagem') else: documento.imagem = None documento.stopwords = form.cleaned_data.get('stopwords') documento.cores = form.cleaned_data.get('cores') documento.select = form.cleaned_data.get('select') documento.save() messages.success(request, 'Alteração salva com sucesso.') nome_arquivo = documento.arquivo.path prefix, file_extension = os.path.splitext(nome_arquivo) if not os.path.exists(prefix + '.txt'): pdf2txt(documento.arquivo.path) nome_arquivo = prefix + '.txt' if os.path.exists(prefix + '.dedup'): os.rename(prefix + '.dedup', nome_arquivo) numero_linhas = 50 if not documento.language: try: linhas = open(nome_arquivo).read().lower().split( '\n')[0:numero_linhas] except UnicodeDecodeError as erro: linhas = open(nome_arquivo, encoding='ISO-8859-1').read().lower().split( '.')[0:numero_linhas] trecho = ' '.join([('' if len(linha) < numero_linhas else linha) for linha in linhas]) lang_detect = detectlanguage.detect(trecho) if len(lang_detect) > 0: precisao = lang_detect[0]['confidence'] if precisao > 5: documento.language = lang_detect[0]['language'] documento.select = lang_detect[0]['language'] documento.save() mask = None channel = 0 if documento.imagem: try: image = Image.open(documento.imagem) channel = len(image.split()) mask = np.array(image) except Exception: messages.error( request, 'Não foi possivel usar a imagem como mascára, por favor selecione outra.' ) # Fix error: Not Implement methods para imagens com menos de 3 canais. if channel >= 3 or not documento.cores: color = documento.cores else: messages.error( request, 'Não foi possivel pegar as cores dessa imagem, ' 'pois ela possui somente %s %s.' % (channel, 'canais' if channel > 1 else 'canal')) color = False if documento.tipo == 'keywords': imagem = generate_words(nome_arquivo, documento.language, mask, color) else: imagem = generate(nome_arquivo, documento.stopwords, documento.language, mask, color) contexto = {'show': flag, 'form': form, 'doc': documento, 'nuvem': imagem} return render(request, 'nuvem.html', contexto)
while True: try: if detectlanguage.user_status()['requests'] >= detectlanguage.user_status()['daily_requests_limit']: logging.debug("Number of requests over daily limit.") time.sleep(60) statuses = db[twitterStatusCol].find({ "language_detections.language": { "$exists": False } }) if statuses: count = 0 batch_request = [] batch_status = [] for twitterStatus in statuses: if count >= 500: logging.debug("Processing batch ...") detections = detectlanguage.detect(batch_request) if len(detections) != 500: logging.error("ABNORMAL NUMBER OF LANGUAGE DETECTIONS: " + str(len(detections))) break count = 0 for detection in detections: if len(detection) == 0: detection = {} detection['source'] = 'detectlanguage' detection['language'] = '' batch_status[count]['language_detections'] = [] batch_status[count]['language_detections'].append(detection) else: detection[0]['source'] = 'detectlanguage'
import detectlanguage detectlanguage.configuration.api_key = "609c401c57d4180ddbf29d57ad9b273c" print(detectlanguage.detect("Buenos dias señor")) print(detectlanguage.simple_detect("तक को बनाया स्टार")) print(detectlanguage.user_status())
def tr_detector(arg): return detectlanguage.detect(arg)
c = 0 for line in csv.reader(fline, delimiter='\t', skipinitialspace='True', quotechar=None): #lg = detect(line[1].decode('utf-8').strip()) lang = '' conf = '-1' reliable = 'False' # b = TextBlob(line[1].decode('utf-8').strip()) # lg = b.detect_language() lg = detectlanguage.detect(line[1].decode('utf-8').strip()) c = c + 1 print(f + ' ' + str(c)) if len(lg) > 0: writer.writerow([ line[0].strip().encode('utf-8'), line[1].strip().encode('utf-8'), line[2].strip().encode('utf-8'), line[3].strip().encode('utf-8'), line[4].strip().encode('utf-8'), line[5], lg[0]['language'], lg[0]['isReliable'], lg[0]['confidence'] ])
max_requests = 1000 # int(sys.argv[2]) if len(sys.argv) > 2 else 1000 offset = 0 # int(sys.argv[3]) if len(sys.argv) > 3 else 0 dl_user_status = detectlanguage.user_status() available_requests = dl_user_status['daily_requests_limit'] - dl_user_status[ 'requests'] if available_requests == 0: print("Quota of requests at DetectLanguage exhausted for today.") exit() df = pd.read_csv(source_file_path, header=0) nb_lines = min(available_requests, len(df) - offset, max_requests) df = df[offset:offset + nb_lines].copy().reset_index(drop=True) response = detectlanguage.detect(df["text"].values.tolist()) first_languages = list( map( lambda x: x[0] if x else { 'isReliable': False, 'confidence': 0, 'language': '' }, response)) new_df = pd.concat([df, pd.DataFrame(first_languages)], axis=1) orc_file = ORC_FILE.format(datetime.now().strftime("%y%m%d")) with open(orc_file, "wb") as data: with pyorc.Writer( data, "struct<text:string,isSpam:boolean,language:string,isReliable:boolean,confidence:float>",
async def searchcard(ctx, *args): #author = ctx.message.author languages = ['en', 'ja', 'ko', 'zh-tw', 'fr', 'it', 'de', 'es'] #Search Function chosen_lang = False error_found = False add_chosen_lang = 'lang=en' for i in range(0, len(args)): if 'lang=' in args[i] and args[i].split( '=', 1)[1] in languages and chosen_lang == False: add_chosen_lang = args[i] args = args[:i] + args[i + 1:] chosen_lang = True elif ('lang=' in args[i] and args[i].split('=', 1)[1] not in languages): await ctx.send('Invalid Language') error_found = True card_name = " ".join(args[:]) #not 5b # if len(card_name) >= 3: # blob = TextBlob(card_name).detect_language() # languages = ['en', 'ja', 'ko', 'zh-tw' , 'fr', 'it', 'de', 'es'] # if blob.lower() in languages: # d_lang = blob # elif blob.lower() == 'fy': # d_lang = 'de' # else: # d_lang = 'en' # add_lang = f'lang={d_lang}' # else: # add_lang = add_chosen_lang # if chosen_lang == True: # add_lang = add_chosen_lang if len(card_name) >= 3 and error_found == False: try: result = dlang.detect(card_name)[0]['language'] if result.lower() in languages: d_lang = result elif result.lower() == 'zh-Hant': d_lang = 'zh-tw' elif result.lower() == 'fy': d_lang = 'de' else: d_lang = 'en' add_lang = f'lang={d_lang}' except: add_lang = add_chosen_lang await ctx.send('API limit reached. Please use "lang=<lang>"') error_found = True else: add_lang = add_chosen_lang if error_found == False: dict_filters = { 'card_name': 'card_name', 'card_clan': 'clan%5B%5D', 'sv_format': 'format', 'card_set': 'card_set%5B%5D', 'card_cost': 'cost%5B%5D', 'card_type': 'char_type%5B%5D', 'card_rarity': 'rarity%5B%5D', 'language': 'lang' } dict_set_acro = { '25': 'RGW', '24': 'EOP', '23': 'OOS', '22': 'DOC', '21': 'RSC', '20': 'DOV', '19': 'ETA', '18': 'SOR', '17': 'FOH', '16': 'WUP', '15': 'UCL', '14': 'VEC', '13': 'ROG', '12': 'STR', '11': 'ALT', '10': 'OOT', '09': 'BOS', '08': 'DBN', '07': 'CGS', '06': 'SFL', '05': 'WLD', '04': 'TOG', '03': 'ROB', '02': 'DRK', '01': 'CLC', '00': 'Basic' } initial_link = 'https://shadowverse-portal.com/cards?' added_filters = [] #Add Card Name } add_card_name = dict_filters['card_name'] + '=' + card_name add_card_name = add_card_name.replace(' ', '+') added_filters.append(add_card_name) #Add Lang Name #add_lang = dict_filters['language'] + '=' + language added_filters.append(add_lang) added_link = '&'.join(added_filters) count = 0 increment = 12 move_button = '' while move_button in ['N', 'B'] or count == 0: if move_button == 'B' and count % increment == 0: count = count - increment * 2 elif move_button == 'B' and count % increment != 0: count = count - count % increment - increment card_offset = '&card_offset=' + str(count) full_link = initial_link + added_link + card_offset source = requests.get(full_link).text soup = bs(source, 'lxml') card_list = soup.find_all('a', class_="el-card-visual-content") if len(card_list) == 0: msg_no_result = 'No result found' chosen_card_number = False await ctx.send(msg_no_result) break if len(card_list) == 1 and count == 0: chosen_card_number = card_list[0]['href'].split('/')[2] break right_ans = ['N', 'B', 'Q'] embed_desc = '' for i in range(1, len(card_list) + 1): count += 1 card_title = card_list[i - 1].find('img')['alt'] card_number = card_list[i - 1]['href'].split('/')[2] if card_number[0] == '7': expac_acro = '[alt]' else: expac_acro = f'[{dict_set_acro[card_number[1:3]]}]' #print(str(count) + '.) ' + card_title + ' ' + expac_acro) embed_desc = embed_desc + str( count) + '.) ' + card_title + ' ' + expac_acro + '\n' right_ans.append(str(count)) embed_footer = '## - Choose card, N - next page, B - prev page, Q - quit' time_out = False while time_out == False: author = ctx.author.name txt_response = f"**{author}**, did you mean..." await ctx.send(txt_response) embed_response = discord.Embed(title="Search Results", description=embed_desc, color=0xffb7c5) embed_response.set_footer(text=embed_footer) await ctx.send(embed=embed_response) def check(msg): return msg.author == ctx.author and msg.channel == ctx.channel valid_ans = False while valid_ans == False: try: msg = await bot.wait_for("message", check=check, timeout=10) if msg.content in right_ans: move_button = msg.content valid_ans = True time_out = True chosen_card_number = False except asyncio.TimeoutError: await ctx.send(f"**{author}**, Time's up!") valid_ans = True time_out = True chosen_card_number = False if valid_ans == False: await ctx.send(f"**{author}**, Invalid Response") #move_button = input('## - Choose card, N - next page, B - prev page, Q - quit = ') if move_button.isnumeric() == True and int(move_button) in range( count - len(card_list) + 1, count + 1): chosen_card_number = card_list[(int(move_button) % increment) - 1]['href'].split('/')[2] else: chosen_card_number = False #if query == '1': # chosen_card_number = '110124010' #else: # chosen_card_number = '120541010' if chosen_card_number is not False: add_lang = add_chosen_lang if chosen_lang == True else add_lang chosen_card_link = f'https://shadowverse-portal.com/card/{chosen_card_number}?{add_lang}' source = requests.get(chosen_card_link).text soup = bs(source, 'lxml') card_info = soup.find('ul', class_="card-info-content") card_text = card_info.find_all('span') p_text = card_info.find_all('p') trait = card_text[1].text.split('\r\n')[1] class_ = card_text[3].text.split('\r\n')[1] rarity = card_text[5].text.split('\r\n')[1] create = card_text[7].text.split('\r\n')[1] if chosen_card_number[0] != '7' and chosen_card_number[1:3] != '00': liquefy = f'{p_text[0].text} / ' + p_text[1].text.split( "\n")[2] card_pack = card_text[11].text.split('\r\n')[1] if add_lang == 'lang=ja': title = soup.find_all( 'li', class_="bl-breadcrumb-content-list")[2].text else: title = soup.find( 'h1', class_="card-main-title").text.split('\r\n')[1] embed1 = discord.Embed(title=title, url=chosen_card_link, color=discord.Color.orange()) flavor = soup.find('p', class_="card-content-description").text if int(chosen_card_number[-4]) == 1: #follower skill_txt = soup.find_all('p', class_="card-content-skill") if skill_txt[0].text == '\n': skill_u = 'None' else: skill_u = str(skill_txt[0]).split('>', 1)[1].split( '</p>', 1)[0].split('\r\n')[-2] if skill_txt[1].text == '\n': skill_e = 'None' else: skill_e = str(skill_txt[1]).split('>', 1)[1].split( '</p>', 1)[0].split('\r\n')[-2] skill_u = clean_text_1(skill_u) skill_e = clean_text_1(skill_e) lang_unevo = soup.find_all( 'p', class_="el-label-card-state l-inline-block")[0].text.split( '\r\n')[1] lang_evo = soup.find_all( 'p', class_="el-label-card-state l-inline-block")[1].text.split( '\r\n')[1] atk_unevo = soup.find_all( 'p', class_="el-card-status is-atk")[0].text.split('\r\n')[1] atk_evo = soup.find_all( 'p', class_="el-card-status is-atk")[1].text.split('\r\n')[1] life_unevo = soup.find_all( 'p', class_="el-card-status is-life")[0].text.split('\r\n')[1] life_evo = soup.find_all( 'p', class_="el-card-status is-life")[1].text.split('\r\n')[1] embed1.add_field( name=f'{lang_unevo}: {atk_unevo}/{life_unevo}', value=f'{skill_u}', inline=False) embed1.add_field(name=f'{lang_evo}: {atk_evo}/{life_evo}', value=f'{skill_e}', inline=False) else: #non-follower skill = str( soup.find_all('p', class_="card-content-skill")[0]).split( '>', 1)[1].split('</p>', 1)[0].split('\r\n')[1] skill = clean_text_1(skill) embed1.add_field(name='Effect:', value=f'{skill}', inline=False) #embed1.set_author(name='SV FAQ Bot Commands \n') #embed1.add_field(name="__dl <code> <lang> <mode>__", # value="Transforms deckcode into SV Portal link. \n\n", # inline=False # ) #embed1.add_field(name='Effect:', # value=f'{skill} \n {flavor}') embed1.set_image( url= f"https://svgdb.me/assets/cards/en/C_{chosen_card_number}.png" ) #the image itself #embed1.set_footer(text='Yahiko#1354',icon_url="https://cdn.discordapp.com/attachments/84319995256905728/252292324967710721/embed.png") #image in icon_url #embed1.set_thumbnail(url="https://cdn.discordapp.com/attachments/84319995256905728/252292324967710721/embed.png") #image itself #await author.send(embed=embed1) await ctx.send(embed=embed1)
import detectlanguage detectlanguage.configuration.api_key = "7e8ac16dc6ab196f2449de5fd7d7f70b" # Enable secure mode (SSL) if you are passing sensitive data # detectlanguage.configuration.secure = True print(detectlanguage.simple_detect("Buenos dias señor")) print(detectlanguage.detect("سلام گلم")) print(detectlanguage.detect("Oui")) detectlanguage.user_status()
def detect_english(text): result = dl.detect(text) return "en" in [ e['language'] for e in result]
import detectlanguage import json '''This program detect the language of console input from users using detectlanguae''' # Input from console input_str = raw_input("Enter your input: ") detectlanguage.configuration.api_key = "7750ba3cec7607c4e9a95584b94bb464" result = detectlanguage.detect(input_str) print result[0]['language']