def persist_json() -> str: try: write_to_file("urls.json", json_urls) return "True" except: print("Error occured when trying to persist JSON") return "False"
def get_job_description(job_postings, start, end, dest_dir, verbose=False): ''' Iterates through [start, end) portion of the job postings, retrieves their full description, and writes to file Input: job_postings: list of JobPosting start: start index end: end index dest_dir: write path for file storage Returns: No return, writes to file ''' for i in range(start, end): job_posting = job_postings[i] job_page_dic = util.get_request_to_dic(job_posting.url, verbose) about_us = util.extract_adjacent(job_page_dic, 'richTextArea.siteInfo.aboutUs', 'text') rich_description = util.extract_adjacent(job_page_dic, 'richTextArea.jobPosting.jobDescription', 'text') job_type = util.extract_adjacent(job_page_dic, 'labeledImage.JOB_TYPE', 'imageLabel') posted_date = util.extract_adjacent(job_page_dic, 'labeledImage.POSTED_DATE', 'imageLabel') location = util.extract_adjacent(job_page_dic, 'labeledImage.LOCATION', 'imageLabel') req_id = util.extract_adjacent(job_page_dic, 'labeledImage.JOB_REQ', 'imageLabel') job_info = job_posting.info job_info['id'] = job_posting.ID job_info['req_id'] = req_id job_info['title'] = job_page_dic['openGraphAttributes']['title'] job_info['job_type'] = job_type job_info['location'] = location job_info['posted_date'] = posted_date job_info['rich_description'] = rich_description job_info['about_us'] = about_us util.write_to_file(job_posting.ID, job_info, dest_dir)
def count_colors(capture, videoPath): counter = {} frame_number = 0 ret = True while (ret): start = time.time() ret, frame = capture.read() if not ret: break currentFramePixelsPerColorCount = get_pixel_count_per_color_in_frame( frame) filePath = OUTPUT_FOLDER + os.path.basename(videoPath) + '-' + str( frame_number) util.write_to_file(format_json(currentFramePixelsPerColorCount), filePath + '.json') # show the frames # cv2.imshow('original',frame) cv2.imwrite(filePath + '.png', frame) k = cv2.waitKey(30) & 0xFF if k == 27: break frame_number += 1 end = time.time() print(frame_number, ', time: ', end - start) return counter
def create_fstab(self, mnt_pt): text = """/dev/hda1\t/\text3\tdefaults,errors=remount-ro\t0\t1 /dev/hda2\tnone\tswap\tsw\t0\t0 proc\t/proc\tproc\tdefaults\t0\t0 """ util.write_to_file("%s/etc/fstab" % mnt_pt, "w", text)
def get_job_description(job_postings, start, end, dest_dir, verbose=False): for i in range(start, end): job_posting = job_postings[i] job_page_dic = util.get_request_to_dic(job_posting.url, verbose) description = util.extract_key(job_page_dic, 'description') job_info = job_posting.info job_info['description'] = description util.write_to_file(job_posting.ID, job_info, dest_dir)
async def download_policy_html(visit_info, page): target_url = visit_info.policy_snapshot_url year = visit_info.year season = visit_info.season url_id = visit_info.url_id visit_info.stage = "policy_download" log_("info", logger, visit_info, "Will download policy html") t0 = time() safe_filename = safe_filename_from_url(target_url) await load_page(page, target_url, url_id, timeout=PAGE_LOAD_TIMEOUT) load_time = time() - t0 log_("info", logger, visit_info, "Loaded policy page in %0.1f" % load_time) invalid_redirection, reason = is_invalid_redirection( page, target_url, visit_info) if invalid_redirection: log_( "error", logger, visit_info, "ERR-401: Invalid redirection while loading policy: %s " "Current url: %s" % (reason, page.url)) return content = await page.content() page_text = await get_printable_text(page) if not len(page_text): log_("error", logger, visit_info, "ERR-409: Blank policy page") return text_lang, detection_err = await get_text_language(page_text) if detection_err is not None: log_( "error", logger, visit_info, "ERR-408: Error detecting policy language: %s %s %s" % (len(page_text), page.url, detection_err)) return if text_lang != "en": log_("error", logger, visit_info, "ERR-402: Non-english policy page %s" % text_lang) return readable_html = await get_readable_html(page) # await page.screenshot({'path': '%s.png' % domain}) if readable_html and READABILTY_FAILURE_STR not in readable_html: safe_filename = get_wb_file_name(target_url, year, season, "readable.html", url_id) write_to_file(join(READABLE_POLICY_HTML_DIR, safe_filename), readable_html) else: log_("info", logger, visit_info, "ERR-403: Readability script failed for policy page") safe_filename = get_wb_file_name(target_url, year, season, "privacy.html", url_id) write_to_file(join(POLICY_HTML_DIR, safe_filename), content) log_("info", logger, visit_info, "OK. Successfully saved the policy page %s" % safe_filename)
def _disable_ttys(self, mnt_pt): #disable gettys util.run_command("sed -i -e 's/^\([1-6].*:respawn*\)/#\1/' -e 's/^T/#\t/' %s/etc/inittab" % mnt_pt) text = "S0:12345:respawn:/sbin/getty -L console 9600 vt100\n" util.write_to_file("%s/etc/inittab" % mnt_pt, "a", text)
def output_issue_plan(mapped_issues, sprint, output_file): output = StringIO() contents = build_buffered_output(output, sprint, mapped_issues) full_output_file_path = handle_file_creation(output_file) write_to_file(full_output_file_path, contents) output.close() sys.stdout.write( "Success! Sprint plan has been successfully written to file: /outputs/{} :)\n" .format(output_file))
def main(): with concurrent.futures.ThreadPoolExecutor( max_workers=MAX_THREAD) as executor: results = executor.map(request_to_check_for_mobile_friendly_ssl, get_all_from_email(FILE_LOCATION_TO_READ_FOR)) for result in results: email, has_m, has_ssl = result lock.acquire() write_to_file(FILE_LOCATION_FOR_SAVING, email, has_m, has_ssl[1]) lock.release()
def process_file(self): self.cut_words() self.generate_ann_list() result_file = os.path.join(self.result_dir, 'format_' + self.result_fname + '.txt') util.write_to_file(result_file, self.word_list, self.pos_list, self.location_list, self.ann_list, is_keep_space=True)
def convert_anvil_file_to_annotation_csv_frames_given_frame_rate_and_save_to_file( anvil_xml_file_path, frame_rate, file_path): ## input validation if not (os.path.exists(anvil_xml_file_path)): print('anvil xml file', anvil_xml_file_path, 'does not exist') sys.exit() if (os.access(file_path, os.W_OK)): print('cannot write to', file_path) sys.exit() csv_text = get_annotation_frames(anvil_xml_file_path, frame_rate) util.write_to_file(csv_text, file_path)
def unignore(self, redditor): """ Removes the specified redditor from the blacklist if they are on it. """ name = redditor.name.lower() if name not in self.blacklist: print("'" + name + "' is not blacklisted.") self.reddit.send_message(name, MentionedBot.NAME, self.NOT_IGNORED_NOTIFICATION) return self.blacklist.remove(name) util.write_to_file(util.BLACKLIST_FILE, ','.join(self.blacklist)) self.reddit.send_message(name, MentionedBot.NAME, self.UNIGNORED_NOTIFICATION) print("'" + name + "' unignored.")
def create_xen_config(self): text = """kernel = "%s" ramdisk = "/boot/initrd-%s.img" memory = %s name = "%s" disk = ['file:%s,hda1,w','file:%s,hda2,w'] vif = ['bridge=xenbr0'] root = "/dev/hda1 ro" """ % (self._kernel, self._name, self._mem, self._name, self._path, self._swap) util.write_to_file("/etc/xen/%s" % self._name, "w", text)
def download_policy_pdf(url, visit_info): log_("info", logger, visit_info, "PDF link, will download") try: content, url, status_code = fetch_url(url) except Exception as exc: raise PdfDownloadError("Exception while downloading the PDF %s" % exc) if content and status_code == requests.codes.ok: # noqa safe_filename = get_wb_file_name(url, visit_info.year, visit_info.season, "privacy.pdf", visit_info.url_id) write_to_file(join(POLICY_PDF_DIR, safe_filename), content, mode='wb') log_("info", logger, visit_info, "OK. Successfully saved the policy PDF %s" % safe_filename) else: # we get a non-OK response while downloading the policy PDF raise HttpStatusError("PDF download error: %s" % status_code)
def reducer(filename): ''' Given the output of the mapper for this assignment, the reducer should print one row per weather type, along with the average value of ENTRIESn_hourly for that weather type, separated by a tab. You can assume that the input to the reducer will be sorted by weather type, such that all entries corresponding to a given weather type will be grouped together. In order to compute the average value of ENTRIESn_hourly, you'll need to keep track of both the total riders per weather type and the number of hours with that weather type. That's why we've initialized the variable riders and num_hours below. Feel free to use a different data structure in your solution, though. An example output row might look like this: 'fog-norain\t1105.32467557' Since you are printing the output of your program, printing a debug statement will interfere with the operation of the grader. Instead, use the logging module, which we've configured to log to a file printed when you click "Test Run". For example: logging.info("My debugging message") Note that, unlike print, logging.info will take only a single argument. So logging.info("my message") will work, but logging.info("my","message") will not. ''' with open(filename) as subway_weather_data: tallied_sub_data = partition_weather(subway_weather_data) print(tallied_sub_data) output_filename = "subway_weather_ridership_count.txt" [ write_to_file(output_filename, "{}\t{}\n".format(k, v[0] / v[1])) for k, v in tallied_sub_data.items() ]
def test_with_uniform_initialization(self, attack_rel_init, votes_init, filename1, filename2): # the set of initial values we want to iterate to initial_values_set = util.get_initial_set(0.2, len(votes_init)) # the original permutation original_permutation = np.array(range(len(votes_init))) # finds all the possible permutations. This is used as different labellings labellings = list(itertools.permutations(original_permutation)) attack_rel = [] votes = [] for label in labellings: # stores the results for INR # Of form [unique fixed points, initilizations, number of times it converges to that solution] inr_results = [] # stores the results for ISS iss_results = [] attack_rel = attack_rel_init label = np.array(label) # modify the attack_rel and the votes according to new labelling attack_rel[original_permutation, :] = attack_rel[label, :] attack_rel[:, original_permutation] = attack_rel[:, label] votes = votes_init votes[original_permutation] = votes[label] for i in range(len(initial_values_set)): #initial_values = np.array([ 0.4366013 , 0.57195905, 0.16146849 , 0.8727271]) # finds the converged values for conv_INR = self.run_INR(initial_values_set[i], attack_rel, votes, label) conv_ISS = self.run_ISS(initial_values_set[i], attack_rel, votes, label) # the first value is directly appended if (i == 0): inr_results.append([conv_INR, initial_values_set[i], 1]) iss_results.append([conv_ISS, initial_values_set[i], 1]) else: # for the others add depending if the fixe point has already been found inr_results = self.add_to_existing(inr_results, conv_INR, initial_values_set[i]) iss_results = self.add_to_existing(iss_results, conv_ISS, initial_values_set[i]) # Write the results for the labelling to the file util.write_to_file(filename1, label, inr_results) util.write_to_file(filename2, label, iss_results)
def handle_output(since_sha, sorted_formatted_commits, output_file): if not output_file: output_file = PRS_FILENAME output = StringIO() output.write('---------------------------------------\n') output.write("Showing commits merged into develop after: {}\n".format(since_sha)) output.write('---------------------------------------\n') output.write("Total Merged PRs: {}\n".format(len(sorted_formatted_commits))) output.write('---------------------------------------\n') buffer_list_of_dicts(output, sorted_formatted_commits) contents = output.getvalue() full_output_file_path = handle_file_creation(output_file) write_to_file(full_output_file_path, contents) sys.stdout.write( "Success! Release review has been successfully written to file: /outputs/{} :)\n".format(output_file) ) output.close()
def get_job_description(job_postings, start, end, dest_dir, verbose=False): ''' Iterates through [start, end) portion of the job postings, retrieves their full description, and writes to file Input: job_postings: list of JobPosting start: start index end: end index dest_dir: write path for file storage Returns: No return, writes to file ''' for i in range(start, end): job_posting = job_postings[i] job_page_dic = util.get_request_to_dic(job_posting.url, verbose) description = util.extract_key(job_page_dic, 'description') job_info = job_posting.info job_info['link'] = job_posting.url job_info['description'] = description util.write_to_file(job_posting.ID, job_info, dest_dir)
def run(videoPath, useRgb): start = datetime.now() outputPath = OUTPUT_FOLDER + os.path.basename(videoPath) + '-diffs-h.csv' if not (os.path.exists(videoPath)): print('file', videoPath, 'does not exist') sys.exit() if (os.access(outputPath, os.W_OK)): print('cannot write to', outputPath) sys.exit() capture = cv2.VideoCapture(videoPath) output = util.calc_diffs(calcDistances(capture, useRgb)) capture.release() cv2.destroyAllWindows() util.write_to_file(util.format_csv(output), outputPath) end = datetime.now() print('time taken:', str(end - start))
def compare_hlds_variants(): """ TODO: kill bugs BUG1: sentence001-original-test contains 2(!) <item> sentences. """ hlds_reader = hlds.HLDSReader(hlds.testbed_file) for i, sentence in enumerate(hlds_reader.sentences): xml_sentence_test = hlds.create_hlds_file(sentence, mode="test", output="xml") util.write_to_file( xml_sentence_test, "xmltest/sentence{0}-converted-test.xml".format(str(i).zfill(3))) xml_sentence_realize = hlds.create_hlds_file(sentence, mode="test", output="xml") util.write_to_file( xml_sentence_test, "xmltest/sentence{0}-converted-realize.xml".format( str(i).zfill(3))) for i, item_etree in enumerate(hlds_reader.xml_sentences): root = etree.Element("regression") doc = etree.ElementTree(root) root.insert(0, item_etree) xml_sentence_original = hlds.etreeprint(doc) util.write_to_file( xml_sentence_original, "xmltest/sentence{0}-original-test.xml".format(str(i).zfill(3)))
def compare_hlds_variants(): """ TODO: kill bugs BUG1: sentence001-original-test contains 2(!) <item> sentences. """ hlds_reader = hlds.HLDSReader(hlds.testbed_file) for i, sentence in enumerate(hlds_reader.sentences): xml_sentence_test = hlds.create_hlds_file(sentence, mode="test", output="xml") util.write_to_file(xml_sentence_test, "xmltest/sentence{0}-converted-test.xml".format(str(i).zfill(3))) xml_sentence_realize = hlds.create_hlds_file(sentence, mode="test", output="xml") util.write_to_file(xml_sentence_test, "xmltest/sentence{0}-converted-realize.xml".format(str(i).zfill(3))) for i, item_etree in enumerate(hlds_reader.xml_sentences): root = etree.Element("regression") doc = etree.ElementTree(root) root.insert(0, item_etree) xml_sentence_original = hlds.etreeprint(doc) util.write_to_file(xml_sentence_original, "xmltest/sentence{0}-original-test.xml".format(str(i).zfill(3)))
def run(videoPath, outputPath, pHistory=0, pVarTreshold=8): start = datetime.now() if not (os.path.exists(videoPath)): print('file', videoPath, 'does not exist') sys.exit() if (os.access(outputPath, os.W_OK)): print('cannot write to', outputPath) sys.exit() capture = cv2.VideoCapture(videoPath) output = countColors(capture) rgb_diffs = util.prefix_dict_keys(util.calc_diffs(output), 'diff') output.update(rgb_diffs) capture.release() cv2.destroyAllWindows() util.write_to_file(util.format_csv(output), outputPath) end = datetime.now() print('time taken:', str(end - start))
def run (videoPath, outputPath, pHistory = 0, pVarTreshold = 8): start = datetime.now() ## input validation if not (os.path.exists(videoPath)): print('file', videoPath, 'does not exist') sys.exit() if (os.access(outputPath, os.W_OK)): print('cannot write to', outputPath) sys.exit() # setup, teardown capture = cv2.VideoCapture(videoPath) fgbg = cv2.createBackgroundSubtractorMOG2(history = pHistory, varThreshold = pVarTreshold, detectShadows = False) # processing output = countFgBgPixels(capture, fgbg) util.write_to_file(util.format_csv(output), outputPath) # teardown capture.release() cv2.destroyAllWindows() end = datetime.now() print('time taken:', str(end - start))
def network_setup(self, mnt_pt): util.write_to_file("%s/etc/hostname" % mnt_pt, "w", self._name + "\n") util.write_to_file("%s/etc/hosts" % mnt_pt, "w", "127.0.0.1 localhost %s\n" % self._name) text = """auto lo iface lo inet loopback auto eth0 iface eth0 inet dhcp """ util.write_to_file("%s/etc/network/interfaces" % mnt_pt, "w", text)
def reducer(ridership_file): ''' Write a reducer that will compute the busiest date and time (that is, the date and time with the most entries) for each turnstile unit. Ties should be broken in favor of datetimes that are later on in the month of May. You may assume that the contents of the reducer will be sorted so that all entries corresponding to a given UNIT will be grouped together. The reducer should print its output with the UNIT name, the datetime (which is the DATEn followed by the TIMEn column, separated by a single space), and the number of entries at this datetime, separated by tabs. For example, the output of the reducer should look like this: R001 2011-05-11 17:00:00 31213.0 R002 2011-05-12 21:00:00 4295.0 R003 2011-05-05 12:00:00 995.0 R004 2011-05-12 12:00:00 2318.0 R005 2011-05-10 12:00:00 2705.0 R006 2011-05-25 12:00:00 2784.0 R007 2011-05-10 12:00:00 1763.0 R008 2011-05-12 12:00:00 1724.0 R009 2011-05-05 12:00:00 1230.0 R010 2011-05-09 18:00:00 30916.0 ... ... Since you are printing the output of your program, printing a debug statement will interfere with the operation of the grader. Instead, use the logging module, which we've configured to log to a file printed when you click "Test Run". For example: logging.info("My debugging message") Note that, unlike print, logging.info will take only a single argument. So logging.info("my message") will work, but logging.info("my","message") will not. ''' with open(ridership_file) as ridership_data: output_filename = "./subway_busiest_hour.txt" ridership_dict = reduce(calculate_busiest_time, [data for data in ridership_data], {}) [write_to_file(output_filename, generate_output(k, v)) for k, v in ridership_dict.items()]
def reducer(ridership_file): ''' Given the output of the mapper for this exercise, the reducer should PRINT (not return) one line per UNIT along with the total number of ENTRIESn_hourly over the course of May (which is the duration of our data), separated by a tab. An example output row from the reducer might look like this: 'R001\t500625.0' You can assume that the input to the reducer is sorted such that all rows corresponding to a particular UNIT are grouped together. Since you are printing the output of your program, printing a debug statement will interfere with the operation of the grader. Instead, use the logging module, which we've configured to log to a file printed when you click "Test Run". For example: logging.info("My debugging message") Note that, unlike print, logging.info will take only a single argument. So logging.info("my message") will work, but logging.info("my","message") will not. ''' with open(ridership_file) as ridership_data: output_filename = "./subway_ridership_count.txt" ridership_dict = reduce(count_entries, [data for data in ridership_data], {}) [write_to_file(output_filename, "{}\t{}\n".format(k, v)) for k, v in ridership_dict.items()]
<Payload> <ser:Services> <ser:Request> <ser:Operation_Name>abillityReferenceApi</ser:Operation_Name> <ser:ChangeServicesRequest> <ser:request> <EVENT xmlns=""> %s </EVENT> </ser:request> </ser:ChangeServicesRequest> </ser:Request> </ser:Services> </Payload> </EaiEnvelope> </com:clientRequest> </soapenv:Body> </soapenv:Envelope> """ % tup[0] request = urllib2.Request(url, xml) response = urllib2.urlopen(request) return tup[1] if __name__ == "__main__": lines = open(line_file, 'r') for line in lines: tup = build_request(line) msisdn = send_command(tup) write_to_file(msisdn, numbers)
from util import get_courses, get_course_info, write_to_file import sys building = sys.argv[1] courses = get_courses(building) info = [get_course_info(c) for c in courses] rename = {'valley lsb': 'vlsb'} if building in rename: building = rename[building] write_to_file(info, building)
def main(args): """The main process of autocorrelation methods. :param args: an object of the arguments. """ file_list = args.inputfiles label_list = args.labels output_format = args.f if len(file_list) == 0: print('Input files not found.') return False if output_format == 'svm' and len(label_list) == 0: print('The labels of the input files should be set.') return False if output_format == 'svm' and len(file_list) != len(label_list): print( 'The number of labels should be the same as that of the input files.' ) return False if args.out is not None: outputfile_list = args.out if len(outputfile_list) != len(file_list): print( 'The number of output files should be the same as that of input files.' ) return False elif args.out is None: outputfile_list = [] if output_format == 'svm': for in_file_name in file_list: file_elem_list = list(os.path.splitext(in_file_name)) out_name = file_elem_list[0] + '_svm' + file_elem_list[1] outputfile_list.append(out_name) elif output_format == 'tab': for in_file_name in file_list: file_elem_list = list(os.path.splitext(in_file_name)) out_name = file_elem_list[0] + '_tab' + file_elem_list[1] outputfile_list.append(out_name) elif output_format == 'csv': for in_file_name in file_list: file_elem_list = list(os.path.splitext(in_file_name)) out_name = file_elem_list[0] + '_csv' + file_elem_list[1] outputfile_list.append(out_name) if output_format != 'svm': label_list = [0] * len(file_list) if args.method.upper() not in ['MAC', 'GAC', 'NMBAC', 'PDT']: for input_file, output_file, label in zip(file_list, outputfile_list, label_list): with open(input_file) as f: k = read_k(args.alphabet, args.method, 0) # Get index_list. if args.i is not None: from pse import read_index ind_list = read_index(args.i) else: ind_list = [] default_e = [] # Set Pse default index_list. if args.alphabet == 'DNA': alphabet_list = index_list.DNA if k == 2: default_e = const.DI_INDS_6_DNA elif k == 3: default_e = const.TRI_INDS_DNA elif args.alphabet == 'RNA': alphabet_list = index_list.RNA default_e = const.DI_INDS_RNA elif args.alphabet == 'Protein': alphabet_list = index_list.PROTEIN default_e = const.INDS_3_PROTEIN else: print('The alphabet should be DNA, RNA or Protein.') return False theta_type = 1 if args.method in const.METHODS_AC: theta_type = 1 elif args.method in const.METHODS_CC: theta_type = 2 elif args.method in const.METHODS_ACC: theta_type = 3 else: print("Method error!") # ACC. if args.e is None and len(ind_list) == 0 and args.a is False: # Default Pse. res = acc(f, k, args.lag, default_e, alphabet_list, extra_index_file=args.e, all_prop=args.a, theta_type=theta_type) else: res = acc(f, k, args.lag, ind_list, alphabet_list, extra_index_file=args.e, all_prop=args.a, theta_type=theta_type) write_to_file(res, output_format, label, output_file) if args.method.upper() in ['MAC', 'GAC', 'NMBAC']: if args.lamada < 0 or args.lamada > 10: print( 'The value of lamada should be larger than 0 and smaller than 10.' ) return False if args.a is None: args.a == False elif args.alphabet == 'DNA': args.alphabet = index_list.DNA if args.oli == 0: if args.a == True: for input_file, output_file, label in zip( file_list, outputfile_list, label_list): res = autocorrelation(autoc=args.method, inputfile=input_file, props=const.ALL_DI_DNA_IND, k=2, l=args.lamada, alphabet=args.alphabet) write_to_file(res, output_format, label, output_file) elif args.a == False: for input_file, output_file, label in zip( file_list, outputfile_list, label_list): res = autocorrelation(autoc=args.method, inputfile=input_file, props=const.DEFAULT_DI_DNA_IND, k=2, l=args.lamada, alphabet=args.alphabet) write_to_file(res, output_format, label, output_file) if args.oli == 1: if args.a == True: for input_file, output_file, label in zip( file_list, outputfile_list, label_list): res = autocorrelation(autoc=args.method, inputfile=input_file, props=const.ALL_TRI_DNA_IND, k=3, l=args.lamada, alphabet=args.alphabet) write_to_file(res, output_format, label, output_file) elif args.a == False: for input_file, output_file, label in zip( file_list, outputfile_list, label_list): res = autocorrelation(autoc=args.method, inputfile=input_file, props=const.DEFAULT_TRI_DNA_IND, k=3, l=args.lamada, alphabet=args.alphabet) write_to_file(res, output_format, label, output_file) elif args.alphabet == 'RNA': args.alphabet = index_list.RNA if args.a == True: for input_file, output_file, label in zip( file_list, outputfile_list, label_list): res = autocorrelation(autoc=args.method, inputfile=input_file, props=const.ALL_RNA_IND, k=2, l=args.lamada, alphabet=args.alphabet) write_to_file(res, output_format, label, output_file) elif args.a == False: for input_file, output_file, label in zip( file_list, outputfile_list, label_list): res = autocorrelation(autoc=args.method, inputfile=input_file, props=const.DEFAULT_RNA_IND, k=2, l=args.lamada, alphabet=args.alphabet) write_to_file(res, output_format, label, output_file) if args.method.upper() == 'PDT': if args.alphabet != 'Protein': print('PDT method is only available for Protein sequences.') return False else: if args.lamada < 1 or args.lamada > 15: print( 'The value of -lamada should be larger than 0 and smaller than 16.' ) return False else: for input_file, output_file, label in zip( file_list, outputfile_list, label_list): res = pdt(input_file, args.lamada) write_to_file(res, output_format, label, output_file) if len(outputfile_list) != 0: for index, output_file in enumerate(outputfile_list): out_with_full_path = os.path.abspath(output_file) if os.path.isfile(out_with_full_path): if index == 0: print('The output file(s) can be found here:') print(out_with_full_path)
def set_running_time(self, t): """ Sets the total running time this bot has run """ util.write_to_file(self.running_time_file, str(int(t)))
def _apt_setup(self, text, mnt_pt): filename = "%s/etc/apt/sources.list" % mnt_pt util.write_to_file(filename, "w", text) util.run_command("chroot %s /usr/bin/apt-get update" % mnt_pt)
def kill_nash_hotplug(self, mnt_pt): filename = "%s/etc/rc2.d/S05kill-nash-hotplug" % mnt_pt util.write_to_file(filename, "w", "pkill nash-hotplug\n") os.chmod("%s/etc/rc2.d/S05kill-nash-hotplug" % mnt_pt, 0777)
def process_file(self): self.generate_ann_list() result_file = os.path.join(self.result_dir, 'format_' + self.result_fname + '.txt') util.write_to_file(result_file, self.word_list, self.pos_list, self.ann_list)
def main(args): file_list = args.inputfiles outputfile_list = [] label_list = args.labels output_format = args.f if len(file_list) == 0: print('Input files not found.') return False if output_format == 'svm' and len(label_list) == 0: print('The labels of the input files should be set.') return False if output_format == 'svm' and len(file_list) != len(label_list): print( 'The number of labels should be the same as that of the input files.' ) return False if args.out is not None: outputfile_list = args.out if len(outputfile_list) != len(file_list): print( 'The number of output files should be the same as that of input files.' ) return False elif args.out is None: outputfile_list = [] if output_format == 'svm': for in_file_name in file_list: file_elem_list = list(os.path.splitext(in_file_name)) out_name = file_elem_list[0] + '_svm' + file_elem_list[1] outputfile_list.append(out_name) elif output_format == 'tab': for in_file_name in file_list: file_elem_list = list(os.path.splitext(in_file_name)) out_name = file_elem_list[0] + '_tab' + file_elem_list[1] outputfile_list.append(out_name) elif output_format == 'csv': for in_file_name in file_list: file_elem_list = list(os.path.splitext(in_file_name)) out_name = file_elem_list[0] + '_csv' + file_elem_list[1] outputfile_list.append(out_name) if output_format != 'svm': label_list = [0] * len(file_list) for input_file, output_file, label in zip(file_list, outputfile_list, label_list): with open(input_file) as f: # Get index_list. if args.i is not None: ind_list = read_index(args.i) else: ind_list = [] default_e = [] # Set Pse default index_list. if args.alphabet == 'DNA': alphabet_list = index_list.DNA if args.k == 2: default_e = const.DI_INDS_6_DNA elif args.k == 3: default_e = const.TRI_INDS_DNA elif args.alphabet == 'RNA': alphabet_list = index_list.RNA default_e = const.DI_INDS_RNA elif args.alphabet == 'Protein': alphabet_list = index_list.PROTEIN default_e = const.INDS_3_PROTEIN theta_type = 1 if args.method in const.THETA_1_METHODS: theta_type = 1 elif args.method in const.THETA_2_METHODS: theta_type = 2 elif args.method == 'PseKNC': theta_type = 3 else: print("Method error!") # PseKNC. if args.method != 'PseKNC': if args.e is None and len(ind_list) == 0 and args.a is False: # Default Pse. res = pseknc(f, args.k, args.w, args.lamada, default_e, alphabet_list, extra_index_file=args.e, all_prop=args.a, theta_type=theta_type) else: res = pseknc(f, args.k, args.w, args.lamada, ind_list, alphabet_list, extra_index_file=args.e, all_prop=args.a, theta_type=theta_type) # iPseKNC. else: if args.e is None and len(ind_list) == 0 and args.a is False: # Default iPse. res = ipseknc(f, args.k, args.w, args.lamada, const.DI_INDS_6_DNA, alphabet_list, extra_index_file=args.e, all_prop=args.a) else: res = ipseknc(f, args.k, args.w, args.lamada, ind_list, alphabet_list, extra_index_file=args.e, all_prop=args.a) write_to_file(res, output_format, label, output_file) if len(outputfile_list) != 0: for index, output_file in enumerate(outputfile_list): out_with_full_path = os.path.abspath(output_file) if os.path.isfile(out_with_full_path): if index == 0: print('The output file(s) can be found here:') print(out_with_full_path)
def main(args): #TODO:args.method will be finished #TODO:args.inputfile, name file_list = args.inputfiles outputfile_list = [] label_list = args.labels output_format = args.f if len(file_list) == 0: print 'Input files not found.' return False if output_format == 'svm' and len(label_list) == 0: print 'The labels of the input files should be set.' return False if output_format == 'svm' and len(file_list) != len(label_list): print 'The number of labels should be the same as that of the input files.' return False if args.out is not None: outputfile_list = args.out if len(outputfile_list) != len(file_list): print 'The number of output files should be the same as that of input files.' return False elif args.out is None: outputfile_list = [] if output_format =='svm': for in_file_name in file_list: file_elem_list = list(os.path.splitext(in_file_name)) out_name = file_elem_list[0] + '_svm' + file_elem_list[1] outputfile_list.append(out_name) elif output_format =='tab': for in_file_name in file_list: file_elem_list = list(os.path.splitext(in_file_name)) out_name = file_elem_list[0] + '_tab' + file_elem_list[1] outputfile_list.append(out_name) elif output_format =='csv': for in_file_name in file_list: file_elem_list = list(os.path.splitext(in_file_name)) out_name = file_elem_list[0] + '_csv' + file_elem_list[1] outputfile_list.append(out_name) if output_format !='svm': label_list = [0] * len(file_list) if args.alphabet == "RNA": for input_file, output_file, label in zip(file_list, outputfile_list, label_list): if args.method.upper() == 'TRIPLET': res = get_triplet_matrix(input_file) write_to_file(res, output_format, label, output_file) elif args.method.upper() == 'PSESSC': if args.k is None: print "parameters k is required. The default value of k is 2." args.k = 2 if args.r is None: print "parameters r is required. The default value of r is 2." args.r = 2 if args.w is None: print "parameters w is required. The default value of w is 0.1." args.w = 0.1 res = get_psessc_matrix(input_file, args.k, args.r, args.w) write_to_file(res, output_format, label, output_file) elif args.method.upper() == 'PSEDPC': if args.n is None: print "parameters n is required. The default value of d is 0." args.n = 0 if args.r is None: print "parameters r is required. The default value of r is 2." args.r = 2 if args.w is None: print "parameters w is required. The default value of w is 0.1." args.w = 0.1 res = get_psedpc_matrix(input_file, args.n, args.r, args.w) write_to_file(res, output_format, label, output_file) else: print("Method error!") else: print("sequence type error!") if len(outputfile_list) != 0: for index, output_file in enumerate(outputfile_list): out_with_full_path = os.path.abspath(output_file) if os.path.isfile(out_with_full_path): if index == 0: print 'The output file(s) can be found here:' print out_with_full_path