def enumerate(self, response, output, target): subdomains = set() self.subdomainscount = 0 start_time = time.time() #Test JSON try: subdomains = response.json() except ValueError: # includes simplejson.decoder.JSONDecodeError print ("Decoding JSON has failed\n") try: while self.subdomainscount < 10000: subdomains = response.json()[self.subdomainscount]["name_value"] if not subdomains: print(f"[x] Oops! No data found for {self.target} using SSL Certificates.") else: self.subdomainscount = self.subdomainscount + 1 if "@" in subdomains: # filter for emails pass else: print(f"[*] {subdomains}") if self.output is not None: write_file(subdomains, self.engine + '_' + self.output, target) if self.output: print(f"\nSaving result... {self.engine + '_'+ self.output}") except IndexError: pass print( G + f"\n[**]SSL Certificates: {self.subdomainscount} subdomains have been found in %s seconds" % ( time.time() - start_time) + W)
def enumerate(self, response, output, target): subdomains = set() subdomainscount = 0 start_time = time.time() try: while subdomainscount < 40: subdomains = response.json()["data"][subdomainscount]["id"] subdomainscount = subdomainscount + 1 print(f"[*] {subdomains}") if self.output is not None: write_file(subdomains, self.engine + '_' + self.output, target) if self.output: print(f"\nSaving result... {self.engine + '_' + self.output}") except IndexError: pass print( G + f"\n[**] TugaRecon is complete. VirusTotal: {subdomainscount} subdomains have been found in %s seconds" % (time.time() - start_time) + W) if not subdomains: print(f"[x] No data found for {self.target} using VirusTotal.")
def enumerate(self, url, output): subdomains = set() subdomainscount = 0 start_time = time.time() try: response = requests.get(url, headers={'User-Agent': useragent()}) while subdomainscount < 10000: subdomains = response.json()[subdomainscount]["name_value"] subdomainscount = subdomainscount + 1 if "@" not in subdomains: # filter for emails print(f"[*] {subdomains}") if self.output is not None: write_file(subdomains, self.engine + self.output) if self.output: print(f"\nSaving result... {self.engine + self.output}") except IndexError: pass print( G + f"\n[**] TugaRecon is complete. SSL Certificates: {subdomainscount} subdomains have been found in %s seconds" % ( time.time() - start_time) + W) if not subdomains: print(f"[x] Oops! No data found for {self.target} using SSL Certificates.")
def get_url(self, g_clean, page_number): if page_number > 30: for x in range(len(g_clean)): print(g_clean[x]) time.sleep(0.5) if self.output is not None: write_file(g_clean[x], self.engine + '_' + self.output, self.target)
def enumerate(self, url, output, target): subdomains = set() subdomainscount = 0 start_time = time.time() try: response = requests.get(url, headers=tuga_useragents.useragent()) while subdomainscount < 100: subdomains = response.json()[subdomainscount]["dns_names"][0] subdomainscount = subdomainscount + 1 print(f"[*] {subdomains}") if self.output is not None: write_file(subdomains, self.engine + '_' + self.output, target) if self.output: print(f"\nSaving result... {self.engine + '_' + self.output}") except IndexError: pass print( G + f"\n[**] TugaRecon is complete. CertSpotter: {subdomainscount} subdomains have been found in %s seconds" % (time.time() - start_time) + W) if not subdomains: print(f"[x] No data found for {self.target} using CertSpotter.")
def commit(message, author=None): """Commita o estado atual do index para a master, com a mensagem Retorna o hash do commit. """ tree = write_tree() parent = get_local_master_hash() if author is None: author = '{} <{}>'.format(os.environ['GIT_AUTHOR_NAME'], os.environ['GIT_AUTHOR_EMAIL']) timestamp = int(time.mktime(time.localtime())) utc_offset = -time.timezone author_time = '{} {}{:02}{:02}'.format(timestamp, '+' if utc_offset > 0 else '-', abs(utc_offset) // 3600, (abs(utc_offset) // 60) % 60) lines = ['tree ' + tree] if parent: lines.append('parent ' + parent) lines.append('author {} {}'.format(author, author_time)) lines.append('committer {} {}'.format(author, author_time)) lines.append('') lines.append(message) lines.append('') data = '\n'.join(lines).encode() sha1 = hash_object(data, 'commit') master_path = os.path.join('.git', 'refs', 'heads', 'master') write_file(master_path, (sha1 + '\n').encode()) print('Commitado para a master: {:7}'.format(sha1)) return sha1
def enumerate(self, url, output): subdomains = set() subdomainscount = 0 start_time = time.time() try: response = requests.get(url) while subdomainscount < 10000: # Remove "," an IP from list remove_ip = response.text.replace(",", " ") subdomains = remove_ip.split() subdomainscount = subdomainscount + 2 print(f"[*] {subdomains[subdomainscount]}") # Write to a file if self.output is not None: write_file(subdomains[subdomainscount], self.engine + '_' + self.output) if self.output: print(f"\nSaving result... {self.engine + '_' + self.output}") except IndexError: pass print( G + f"\n[**] TugaRecon is complete. HackerTarget: {int((subdomainscount/2)-1)} subdomains have been found in %s seconds" % (time.time() - start_time) + W) if not subdomains: print(f"[x] No data found for {self.target} using HackerTarget.")
def enumerate(self, url, output): subdomains = set() subdomainscount = 0 start_time = time.time() try: response = requests.get(url, headers={'User-Agent': useragent()}) while subdomainscount < 500: subdomains = response.json()["subdomains"][subdomainscount] subdomainscount = subdomainscount + 1 print(f"[*] {subdomains}") if self.output is not None: write_file(subdomains, self.engine + self.output) if self.output: print(f"\nSaving result... {self.engine + self.output}") except IndexError: pass print( G + f"\n[**] TugaRecon is complete. Threat Crowd: {subdomainscount} subdomains have been found in %s seconds" % (time.time() - start_time) + W) if not subdomains: print(f"[x] No data found for {self.target} using Threat Crowd.")
def save(self, savePath, patientID): if self.onlyCancer: print("Saving only images with cancer.") savePath = Path(savePath) saveImagePath = savePath / "image" / patientID / "dummy.mha" saveTextPath = savePath / "path" / (patientID + ".txt") if not saveImagePath.parent.exists(): createParentPath(str(saveImagePath)) if not saveTextPath.parent.exists(): createParentPath(str(saveTextPath)) for i in range(2): length = len(self.cuttedLabelList[i]) for x in tqdm(range(length), desc="Saving images...", ncols=60): if self.onlyCancer and not ( self.cuttedStackedLabelArrayList[i][x] == 2).any(): continue saveImagePath = savePath / "image" / patientID / "image_{}_{:02d}.mha".format( i, x) saveLabelPath = savePath / "image" / patientID / "label_{}_{:02d}.mha".format( i, x) sitk.WriteImage(self.cuttedLabelList[i][x], str(saveLabelPath), True) sitk.WriteImage(self.cuttedImageList[i][x], str(saveImagePath), True) write_file(str(saveTextPath), str(saveImagePath) + "\t" + str(saveLabelPath))
def enumerate(self, response, output, target): subdomains = set() subdomainscount = 0 start_time = time.time() try: #Test json subdomains = response.json()["subdomains"][subdomainscount] except KeyError: print( G + f"[x] Decoding JSON has failed.... No data found for {self.target} using Threat Crowd." + W) exit(1) try: while subdomainscount < 500: subdomains = response.json()["subdomains"][subdomainscount] subdomainscount = subdomainscount + 1 print(f"[*] {subdomains}") if self.output is not None: write_file(subdomains, self.engine + '_' + self.output, target) if self.output: print(f"\nSaving result... {self.engine + '_' + self.output}") except IndexError: pass print( G + f"\n[**]Threat Crowd: {subdomainscount} subdomains have been found in %s seconds" % (time.time() - start_time) + W) if not subdomains: print(f"[x] No data found for {self.target} using Threat Crowd.")
def enumerate(self, response, output, target): subdomains = [] subdomainscount = 0 start_time = time.time() try: while subdomainscount < 100: subdomains = response.json()[subdomainscount]["dns_names"][0] subdomainscount = subdomainscount + 1 print(f"[*] {subdomains}") if self.output is not None: write_file(subdomains, self.engine + '_' + self.output, target) if self.output: print(f"\nSaving result... {self.engine + '_' + self.output}") except: pass if not subdomains: print(f"[x] No data found for {self.target} using CertSpotter.") else: print( G + f"\n[**] TugaRecon is complete. CertSpotter: {subdomainscount} subdomains have been found in %s seconds" % (time.time() - start_time) + W)
def signUp(): name = request.json['name'] email = request.json['email'] password = request.json['password'] users = read_file() userFound = False idx = 1 if users: for user in users: if user['email'] == email: userFound = True break idx = users[-1]['id'] if userFound: return json.dumps({'message': 'User already exists!!!', 'error': True}) else: salt = generate_salt() password_hash = generate_hash(password, salt) users.append({ 'id': idx, 'name': name, 'email': email, 'salt': salt, 'password_hash': password_hash }) write_file(users) return json.dumps({ 'message': 'User created Successfully!!!', 'error': False })
def modify_story(story_id): ''' Saves the changes to the file, based on the user inputs. ''' if functions.valid_value( request.form['business_value']) and functions.valid_time( request.form['estimation_time']): story_list = functions.read_file('result.txt') for story in story_list: if int(story[0]) == int(story_id): story[1] = str( functions.convert_string(request.form['story_title'])) story[2] = str( functions.convert_string(request.form['user_story'])) story[3] = str( functions.convert_string( request.form['acceptance_criteria'])) story[4] = str(int(request.form['business_value'])) story[5] = str( functions.correct_time(request.form['estimation_time'])) story[6] = str(request.form['status']) break functions.write_file('result.txt', story_list) return redirect('/list') else: return render_template('error.html')
def init(repo): """Cria um repositório .git.""" os.mkdir(repo) os.mkdir(os.path.join(repo, '.git')) for name in ['objects', 'refs', 'refs/heads']: os.mkdir(os.path.join(repo, '.git', name)) write_file(os.path.join(repo, '.git', 'HEAD'), b'ref: refs/heads/master') print('repositório vazio inicializado: {}'.format(repo))
def _create_working_files(file_path, content): """Create tree of working files.""" # Create tree directory that the file in it: if '/' in file_path: parent_paths = file_path.split('/')[:-1] for i in range(len(parent_paths) - 1): make_directory('/'.join(parent_paths[:i + 1])) # Create new file: write_file(file_path, content)
def main(): results = f.parse_json_file('results.json') d_output = {} for result in results: d_result = dict(result) for task_id in d_result: task = d_result[task_id] task_info = task['info'] idx = task_info['file'] d_output.setdefault(idx, {}) if task_info['pdf_page_end'] != '': d_output[idx]['pages'] = u'Páginas: ' + task_info['pdf_page_start'] + ' a ' + task_info['pdf_page_end'] else: d_output[idx]['pages'] = u'Página: ' + task_info['pdf_page_start'] breadcrumbs = [] for i in range(1, 11): breadcrumb = task_info['pdf_title_' + str(i)] if breadcrumb != '': breadcrumbs.append(breadcrumb) d_output[idx]['breadcrumbs'] = ' >> '.join(breadcrumbs) d_output[idx]['answers'] = task['answers'] output = [] crlf = '\r\n' for item in sorted(d_output.iteritems(), key = operator.itemgetter(0)): output.append(f.encode_utf_8(item[0])) d_item = item[1] output.append(f.encode_utf_8(d_item['breadcrumbs'])) output.append(f.encode_utf_8(d_item['pages'])) output.append(f.encode_utf_8(d_item['answers'])) f.write_file('propuestas.txt', str(crlf * 2).join(output))
def delete_story(story_id): ''' Deletes the chosen story from the file. ''' story_list = functions.read_file('result.txt') for story in story_list: if int(story[0]) == int(story_id): story_list.remove(story) functions.write_file('result.txt', story_list) return redirect('/list')
def test_write_file(self): from os import remove empty_string = "" text = "123\n456\n" file_path = AuxiliaryTestMethods.create_temp_text_file(empty_string) self.assertEqual(functions.read_file(file_path), empty_string) self.assertNotEqual(functions.read_file(file_path), text) functions.write_file(file_path, text) self.assertEqual(functions.read_file(file_path), text) remove(file_path)
def syntax_check(lang, name, russian, path=''): if lang == 'rus': bigrams = rus_a_s(lang, name, russian, path) elif lang == 'eng': bigrams = eng_a_s(lang, name, path) elif lang == 'ger': bigrams = ger_a_s(lang, name, path) else: bigrams = [] write_file(lang, name, bigrams, '3_ready', path)
def printDomains(self, domains, target, output): for domain in sorted(domains): print(domain) if self.output is not None: write_file(domain, self.engine + '_' + self.output, target) if self.output: print(f"\nSaving result... {self.engine + '_' + self.output}") #########################################################################################################
def _setup_for_new_branch(commit): """Create working files and rewrite index for the current branch.""" new_content_index = '' content_snap = read_file(lgit_path + '/.lgit/snapshots/%s' % commit).split('\n') for line_snap in content_snap: content = read_file(lgit_path + '/.lgit/objects/%s/%s' % (line_snap[:2], line_snap[2:40])) file_name = content_snap[41:] _create_working_files(file_name, content) timestamp = format_mtime(file_name) new_content_index += (timestamp + (' ' + line_snap[:40]) * 3 + ' ' + file_name + '\n') write_file(lgit_path + '/.lgit/index', new_content_index)
def main(): arguments = sys.argv if arguments[1] == "--read": functions.read_file(arguments[2]) elif arguments[1] == "--write": functions.write_file(arguments[2]) elif arguments[1] == "--append": functions.append_file(arguments[2]) elif arguments[1] == "--read-remote": functions.read_remote_file() elif arguments[1] == "--help": print( "To read a file use --read followed by a file name, to read a file on a remote server use --read-remote, to write a new file use --write followed by a file name, and to edit a file already exisiting use --append followed by the file name." )
def create_temp_dir_populated_aux(test_class, current_path, depth=3): from os import path for i in range(AuxiliaryTestMethods.get_random() % 11): filename = AuxiliaryTestMethods.get_random_str() text = AuxiliaryTestMethods.get_random_str() file_path = path.join(current_path, filename) test_class.assertFalse(path.exists(file_path)) functions.write_file(file_path, text) test_class.assertTrue(path.exists(file_path)) new_path = path.join(current_path, AuxiliaryTestMethods.get_random_str()) test_class.assertFalse(path.exists(new_path)) functions.makedirs(new_path) test_class.assertTrue(path.exists(new_path)) if depth > 1: create_temp_dir_populated_aux(test_class, new_path, depth - 1)
def main(): d_output = ape.format_results('results.json') crlf = '\r\n' output = [] s = '=======================================' for item in sorted(d_output.iteritems(), key = operator.itemgetter(0)): d_item = item[1] f.append(output, s + crlf + 'Propuestas tarea - ' + item[0] + (' (' + d_item['task_id'] + ')') + crlf + s) f.append(output, d_item['breadcrumbs']) f.append(output, d_item['pages'] + crlf + '------------------') answers = d_item['answers'] for answer in answers: answ = answer if 'desconocido' in answer: answer = answer.split('_') answer = answer[0] + ' (' + answer[1] + ')' else: answer = '(' + str(answer) + ')' f.append(output, 'Propuestas analista ' + answer + crlf + '---------------------------------------') f.append(output, 'Hora de inicio: ' + f.formatTime(answers[answ]['answer_end_date']) + crlf + 'Hora de fin: ' + f.formatTime(answers[answ]['answer_start_date'])) for item in answers[answ]['answer'].split('\n'): if item.replace(' ', '') != '': f.append(output, item + crlf + '----------') f.write_file('propuestas.txt', str(crlf * 2).join(output))
def printDomains(self, domains, target, output): for domain in sorted(domains): print(domain) self.count = self.count + 1 if self.output is not None: write_file(domain, self.engine + '_' + self.output, target) print( G + f"\n[**] TugaRecon is complete. Entrust Datacard: {self.count} subdomains have been found" + W) if self.output: print(f"\nSaving result... {self.engine + '_' + self.output}") #########################################################################################################
def _remove_file_index(a_file): """Remove the information of the tracked a_file. Args: a_file: The tracked file. Returns: True/False: if a_file exist in the index file. """ content_index = read_file(lgit_path + '/.lgit/index').split('\n') had_file = False for line in content_index: if line.endswith(a_file): # Remove the index of file: content_index.remove(line) had_file = True write_file(lgit_path + '/.lgit/index', '\n'.join(content_index)) return had_file
def main(): args = sys.argv[1:] # 0 - script name n = '\n' if len(args) == 1: d_all_xml_element_attributes, l_noticias = omc.parse_xml(args[0], args[1]) # Hoja1, item_noticia_enero2014, item_noticia_febrero2014, item_noticia_marzo2014 output = [] t = '\t' n = '\r\n' output.append(f.encode_utf_8(t.join(omc.tags) + n)) for d_noticia in l_noticias: print d_noticia['ID'] print d_noticia['URL'] print for attribute in d_all_xml_element_attributes: if attribute not in d_noticia: d_noticia[attribute] = 'NUL' l_tags = [] for tag in omc.tags: l_tags.append(d_noticia[tag]) output.append(f.encode_utf_8(t.join(l_tags) + n)) f.write_file(args[0].split('.')[0] + '.tcsv', ''.join(output)) print d_all_xml_element_attributes else: print n, 'Provide one .xml file and the item tag', n
def main(args): leftLabelPath = args.filePath + "/label_left" + args.suffix + ".nii.gz" leftImagePath = args.filePath + "/image_left" + args.suffix + ".nii.gz" rightLabelPath = args.filePath + "/label_right" + args.suffix + ".nii.gz" rightImagePath = args.filePath + "/image_right" + args.suffix + ".nii.gz" leftLabel = sitk.ReadImage(leftLabelPath) leftLabelArray = sitk.GetArrayFromImage(leftLabel) leftImage = sitk.ReadImage(leftImagePath) leftImageArray = sitk.GetArrayFromImage(leftImage) rightLabel = sitk.ReadImage(rightLabelPath) rightLabelArray = sitk.GetArrayFromImage(rightLabel) rightImage = sitk.ReadImage(rightImagePath) rightImageArray = sitk.GetArrayFromImage(rightImage) saveLeftLabelPath = args.savePath + "/left/label_" saveLeftImagePath = args.savePath + "/left/image_" saveRightLabelPath = args.savePath + "/right/label_" saveRightImagePath = args.savePath + "/right/image_" saveTextPath = Path( args.savePath).parent / "path" / (Path(args.savePath).name + ".txt") leftLabelPathList = saveSliceImage256(leftLabelArray, leftLabel, saveLeftLabelPath, "nearest") leftImagePathList = saveSliceImage256(leftImageArray, leftImage, saveLeftImagePath, "linear") rightLabelPathList = saveSliceImage256(rightLabelArray, rightLabel, saveRightLabelPath, "nearest") rightImagePathList = saveSliceImage256(rightImageArray, rightImage, saveRightImagePath, "linear") print(len(leftLabelPathList), len(leftImagePathList), len(rightLabelPathList), len(rightImagePathList)) for ll, li in zip(leftLabelPathList, leftImagePathList): write_file(str(saveTextPath), ll + "\t" + li) for rl, ri in zip(rightLabelPathList, rightImagePathList): write_file(str(saveTextPath), rl + "\t" + ri)
def main(args): path = Path(args.filePath) savePath = Path(args.savePath) saveSize = (256, 256) imagePathList = [] labelPathList = [] for d in ["left", "right"]: imagePath = path / ("image_" + d + ".nii.gz") labelPath = path / ("label_" + d + ".nii.gz") image = sitk.ReadImage(str(imagePath)) label = sitk.ReadImage(str(labelPath)) dummyPath = savePath / d / "dummy.mha" createParentPath(dummyPath) length = image.GetSize()[0] for x in range(length): imageSavePath = savePath / d / ("image_" + str(x).zfill(3) + ".mha") labelSavePath = savePath / d / ("label_" + str(x).zfill(3) + ".mha") imageSlice = ResamplingInAxis(image, x, saveSize) labelSlice = ResamplingInAxis(label, x, saveSize, is_label=True) sitk.WriteImage(imageSlice, str(imageSavePath), True) sitk.WriteImage(labelSlice, str(labelSavePath), True) imagePathList.append(str(imageSavePath)) labelPathList.append(str(labelSavePath)) textSavePath = savePath.parent / "path" / (path.name + ".txt") createParentPath(textSavePath) imagePathList = sorted(imagePathList) labelPathList = sorted(labelPathList) for x, y in zip(imagePathList, labelPathList): write_file(textSavePath, str(y) + "\t" + str(x))
def enumerate(self, response, output, target): subdomains = set() self.subdomainscount = 0 start_time = time.time() try: while self.subdomainscount < 10000: subdomains = response.json()[ self.subdomainscount]["name_value"] if not subdomains: print( f"[x] Oops! No data found for {self.target} using SSL Certificates." ) else: self.subdomainscount = self.subdomainscount + 1 if "@" in subdomains: # filter for emails pass else: print(f"[*] {subdomains}") if self.output is not None: write_file(subdomains, self.engine + '_' + self.output, target) if self.output: print(f"\nSaving result... {self.engine + '_'+ self.output}") except IndexError: pass #if not subdomains: #print(f"[x] Oops! No data found for {self.target} using SSL Certificates.") #else: print( G + f"\n[**] TugaRecon is complete. SSL Certificates: {self.subdomainscount} subdomains have been found in %s seconds" % (time.time() - start_time) + W)
def main(): results = f.parse_json_file('results.json') output = [] crlf = '\r\n' s = ['========================', '----------'] questions = { 'p0': u'1) ¿Qué personas, grupos y/o asociaciones identificas en la noticia?', 'p1': u'2) ¿Quién o qué organismo ha facilitado la información al periodista para redactar la noticia?', 'p2': u'3) ¿Aparece una voz del sur (testimonio directo) que cuente su opinión en la noticia?', 'p3': u'4) ¿La noticia identifica y/o explica las causas de la problemática planteada?' } for result in results: d_result = dict(result) for task_id in d_result: task = d_result[task_id] if 'answers' in task: # Allow partial results f.append(output, s[0] + crlf + 'Respuestas tarea (' + str(task_id) + ')' + crlf + s[0] + crlf) id_noticia = task['info']['details']['id_noticia'] noticia_json = dict(f.parse_json_file('json/' + id_noticia + '.json')) for tag in omc.tags: if tag in noticia_json: tag_value = noticia_json[tag] if tag_value != 'NUL': f.append(output, tag + ': ' + tag_value + crlf) answers = task['answers'] for answer in answers: answ = answer if 'desconocido' in answer: answer = answer.split('_') answer = answer[0] + ' (' + answer[1] + ')' else: answer = '(' + str(answer) + ')' f.append(output, 'Propuestas analista ' + answer + crlf + '---------------------------------------') f.append(output, 'Hora de inicio: ' + f.formatTime(answers[answ]['answer_end_date']) + crlf + 'Hora de fin: ' + f.formatTime(answers[answ]['answer_start_date']) + crlf) f.append(output, u'Categorías' + crlf + s[1] + crlf * 2 + answers[answ]['answer']['tags'] + crlf) for q in ['p0', 'p1', 'p2', 'p3']: f.append(output, questions[q] + crlf) for item in answers[answ]['answer'][q].split('\n'): if item.replace(' ', '') != '': f.append(output, item + crlf + s[1] + crlf) comments = answers[answ]['answer']['comentarios'] if comments.replace(' ', '') != '': f.append(output, 'Comentarios' + crlf + s[1] + crlf * 2 + comments + crlf) f.write_file('noticias.txt', crlf.join(output))
def main(): results = f.parse_json_file('results.json') d_output = {} crlf = '\r\n' for result in results: d_result = dict(result) for task_id in d_result: task = d_result[task_id] if 'answers' in task: # Allow partial results task_info = task['info'] idx = task_info['file'] d_output.setdefault(idx, {}) d_output[idx]['task_id'] = str(task_id) if task_info['pdf_page_end'] != '': d_output[idx]['pages'] = u'Páginas: ' + task_info['pdf_page_start'] + ' a ' + task_info['pdf_page_end'] else: d_output[idx]['pages'] = u'Página: ' + task_info['pdf_page_start'] breadcrumbs = [] for i in range(1, 11): breadcrumb = task_info['pdf_title_' + str(i)] if breadcrumb != '': breadcrumbs.append(breadcrumb) d_output[idx]['breadcrumbs'] = crlf.join(breadcrumbs) d_output[idx]['answers'] = task['answers'] output = [] for item in sorted(d_output.iteritems(), key = operator.itemgetter(0)): info = f.encode_utf_8(item[0]) d_item = item[1] s = "=======================================" output.append(s + crlf + 'Propuestas tarea - ' + info + f.encode_utf_8(' (' + d_item['task_id'] + ')') + crlf + s) output.append(f.encode_utf_8(d_item['breadcrumbs'])) output.append(f.encode_utf_8(d_item['pages'])) output.append('------------------') output.append(f.encode_utf_8(d_item['answers'])) f.write_file('propuestas.txt', str(crlf * 2).join(output))
def main(): results = f.parse_json_file('results.json') output = [] crlf = '\r\n' sep = [ '=====================================================================', '=================', '------------------------------', '----------'] a_def = [ 'El tweet', u'se refiere directamente a la ciudadanía, los ciudadanos y/o ciudadanas.', u'El diputado o diputada se refiere a la ciudadanía en ', ' persona.'] f.append(output, sep[0]) f.append(output, u'¿Están alejados los diputados y diputadas españoles de la ciudadanía?') f.append(output, sep[0] + crlf) for result in results: d_result = dict(result) for task_id in d_result: task = d_result[task_id] if 'answers' in task: # Allow partial results task_details = task['info']['details'] f.append(output, 'Tarea - ' + str(task_id) + crlf + sep[1] + crlf) tweet = task_details['tweet'] for match in re.finditer('http://t.co/', tweet): url_value = tweet[match.start():match.end() + 10] tweet = tweet.replace("<a title='" + url_value + "' href ='" + url_value + "' target='_blank'>" + url_value + "</a>", url_value) f.append(output, tweet + crlf) f.append(output, task_details['date'] + crlf) f.append(output, task_details['user'] + ' - ' + task_details['url'] + crlf) task_answers = task['answers'] for answer in task_answers: analista = 'Analista ' if 'desconocido' in answer: answ = answer.split('_') analista += answ[0] + ' (' + answ[1] + ')' else: analista += '(' + str(answer) + ')' f.append(output, analista + crlf + sep[2] + crlf) a_answer = task_answers[answer] f.append(output, 'Hora de inicio: ' + f.formatTime(a_answer['answer_end_date']) + crlf + 'Hora de fin: ' + f.formatTime(a_answer['answer_start_date']) + crlf) a_value = a_answer['answer'] if a_value == 'NotKnown': f.append(output, a_def[0] + u' no sé si ' + a_def[1] + crlf + sep[3] + crlf) elif a_value == 'No': f.append(output, a_def[0] + ' no ' + a_def[1] + crlf + sep[3] + crlf) else: f.append(output, a_def[0] + ' ' + a_def[1] + crlf + sep[3] + crlf) f.append(output, a_def[2] + a_value.lower() + a_def[3] + crlf + sep[3] + crlf) f.write_file('tweets.txt', crlf.join(output))
if __name__ == '__main__': # Задайте язык, слово, его кириллический эквивалент(для русского) # и (опционально) путь, по которому расположены файлы биграмм if len(sys.argv) > 1: path_name = sys.argv[1] + '\\' else: path_name = os.getcwd() path_name = path_name[0:path_name.rfind('\\')] + '\\' word_lines = opening(path_name) clusters_result = find_common(word_lines) clusters_lines = [] i = 1 for cluster in clusters_result: cl = [] for noun in clusters_result[cluster]: adj = '' for a in noun[1]: adj = adj + ';' + a cl.append(noun[0] + adj) cl = make_categories(cl, os.listdir(path_name + 'result\\3_ready\\'), 0) for line in cl: clusters_lines.append(str(i) + ';' + cluster + ';' + line) i += 1 # print(line, len(clusters_result[line])) print(len(clusters_result)) write_file('all', 'languages', clusters_lines, '7_classified', path_name, 'csv')
def _update_head_file(branch_name): """Update the HEAD file with the branch_name""" content = 'ref: refs/head/%s' % branch_name write_file(lgit_path + '/.lgit/HEAD', content)
def write_file(file_path, text): return functions.write_file(file_path, text)
elif args.reset: functions.reset_data() elif args.reset_all: functions.reset_all_data() elif args.copy_password: functions.copy_to_clipboard() elif args.generate_key: backup.write_key() elif args.encrypt and args.decrypt: raise TypeError( "Please specify whether you want to encrypt the file or decrypt it." ) elif args.encrypt: functions.write_file() backup.encrypt(file, backup.load_key()) elif args.decrypt: backup.decrypt(file, backup.load_key()) else: raise TypeError( "Please specify whether you want to encrypt the file or decrypt it." ) else: print('no luck') exit()
def main(): """ http://terramapas.icv.gva.es/odcv05_etrs89h30_2010/? &SERVICE=WMS &VERSION=1.3.0 &REQUEST=GetMap &LAYERS=ortofoto_2010_valencia &FORMAT=image/png &CRS=EPSG:25830 &BBOX=727955.71890415,4372723.5411562,728125.05214607,4372892.8743981 # minx miny maxx maxy &WIDTH=256 &HEIGHT=256 """ ###################### # DO NOT CHANGE THIS # ###################### # DEFAULT TILE SIZE def_tile_width = 256 def_tile_height = 256 # DEFAULT LIMITS def_min_x_y = [727955.71890415, 4372723.5411562] # [min_x, min_y] def_max_x_y = [728125.05214607, 4372892.8743981] # [max_x, max_y] # DEFAULT DIFF def_diff_width = def_max_x_y[0] - def_min_x_y[0] # [max_x, min_x] def_diff_height = def_max_x_y[1] - def_min_x_y[1] # [max_y, min_y] #################################################################### ############### # CHANGE THIS # ############### # SET NEW TILE SIZE new_tile_width = 940 new_tile_height = 356 # SET NEW LIMITS new_min_x_y = [727814.88208, 4373064.54349] # [min_x, min_y] new_max_x_y = [729355.75638, 4374066.81196] # [max_x, max_y] # NEW DIFF """ def_tile_width - def_diff_width new_tile_width - x """ def_diff_width = (new_tile_width * def_diff_width) / def_tile_width """ def_tile_height - def_diff_height new_tile_height - x """ def_diff_height = (new_tile_height * def_diff_height) / def_tile_height #################################################################### # FIRST START POINT first_start_point = [new_min_x_y[0], new_max_x_y[1]] # [min_x, max_y] top_left # FIRST END POINT first_end_point = [new_max_x_y[0], new_max_x_y[1]] # [max_x, max_y] top_right # LAST END POINT last_end_point = [new_max_x_y[0], new_min_x_y[1]] # [max_x, min_y] bottom_right """ [fsp]-------[fep] # first_start_point | # first_end_point | | [nsp] [nep] # n_start_point | # n_end_point | | [lsp]-------[lep] # last_start_point | # last_end_point """ # GET THE TILES current_row = 0 tiles = [] current_point = [first_start_point[0], first_start_point[1], 0, first_start_point[1]] # min_x min_y max_x max_y current_end_point = [0, 0, first_end_point[0], first_end_point[1]] # min_x min_y max_x max_y while True: current_min_y = current_point[1] - def_diff_height # max_y - def_diff_height current_max_x = current_point[0] + def_diff_width # min_x + def_diff_width current_tile = [current_point[0], current_min_y, current_max_x, current_point[3]] # min_x min_y max_x max_y tiles.append(current_tile) print current_tile if current_tile[2] > current_end_point[2]: # max_x > max_x (n_end_point) print current_row += 1 new_y = first_start_point[1] - (def_diff_height * current_row) current_point[0] = first_start_point[0] # min_x current_point[1] = new_y # min_y current_point[3] = new_y # max_y else: current_point[0] = current_max_x # min_x if current_tile[2] > current_end_point[2] and current_tile[1] < last_end_point[1]: # min_y > min_y (bottom_right) break output = [] t = '\t' crlf = '\r\n' # WRITE RESULT for tile in tiles: min_x = "'" + str(tile[0]) + t min_y = "'" + str(tile[1]) + t max_x = "'" + str(tile[2]) + t max_y = "'" + str(tile[3]) + crlf output.append(min_x + min_y + max_x + max_y) f.write_file('tiles.txt', ''.join(output))