def writeOutput(rows, ofile): """ Function that will write the output file for Cybersource """ csv = open_csv(ofile, "w", config.get('OrderGroove', 'outputColumnNames').split(',')) csv.writeheader() for k in rows.keys(): csv.writerow(rows[k])
def get_qa_csv(): with open(ROOT + "/unambiguous_links.json", "rb") as fin: cin = unicodecsv.DictReader(fin) rows = [row for row in cin] def normalize(s): return re.sub(r"<[^>]+>", "", strip_cantillation(s, strip_vowels=True)) tanakh = random.sample([ x for x in rows if Ref(x['Quoted Ref']).primary_category == "Tanakh" and Ref(x['Quoting Ref']).is_segment_level() ], 250) talmud = random.sample([ x for x in rows if Ref(x['Quoted Ref']).primary_category == "Talmud" and Ref(x['Quoting Ref']).is_segment_level() ], 250) qa_rows = [{ "Found Text": normalize(Ref(x['Quoted Ref']).text("he").ja().flatten_to_string()), "Source Text": "...".join( get_snippet_by_seg_ref( Ref(x['Quoting Ref']).text('he'), Ref(x['Quoted Ref']))), "URL": "https://sefaria.org/{}?p2={}".format( Ref(x['Quoting Ref']).url(), Ref(x['Quoted Ref']).url()), "Wrong segment (seg) / Wrong link (link)": "" } for x in (tanakh + talmud)] with open(ROOT + "/QA Section Links.csv", "wb") as fout: csv = unicodecsv.DictWriter(fout, [ "Source Text", "Found Text", "URL", "Wrong segment (seg) / Wrong link (link)" ]) csv.writeheader() csv.writerows(qa_rows)
i. Use the csvwriter to write the `loan.values()` to a row in the CSV file. Hint: Refer to the official documentation for the csv library. https://docs.python.org/3/library/csv.html#writer-objects """ # Set the output header header = [ "loan_price", "remaining_months", "repayment_interval", "future_value" ] # Set the output file path output_path = Path("inexpensive_loans.csv") # @TODO: Use the csv library and `csv.writer` to write the header row # and each row of `loan.values()` from the `inexpensive_loans` list. # YOUR CODE HERE! import csv with open('inexpensive_loans.csv', 'w') as new_file: header = [ "loan_price", "remaining_months", "repayment_interval", "future_value" ] csv_writer = csv.DictWriter(new_file, header=header, delimiter='_\t_') csv_writer = csv.writeheader() for line in csv_writer: print(line)
file = './data/kenpom2018.json' with open(file) as f: jsonString = f.readline() data = json.loads(jsonString) fileName = './data/ESPN_NCAA_Dict.csv' fields=[] with open(fileName) as f: reader = csv.DictReader(f) headers = reader.fieldnames fields = headers+['kenpom'] records =[] for row in reader: data = {} for h in headers: data[h] = row[h] data['kenpom'] = cleanName(data['NCAA']) records.append(data) outCsv = 'nameMap.csv' with open(outCsv,'w') as f: csv = csv.DictWriter(f,fields) csv.writeheader() csv.writerows(records)
url = base_urls[body] + link.get('href') rep_info = {'Website': url} rep_info.update(get_tx_rep(url, body)) # Skip entries with None values: if len(filter(lambda val: val is None, rep_info.values())) > 0: continue print str(rep_info) + '\n' dict_list.append(rep_info) return dict_list if __name__ == '__main__': dict_list = get_tx_leg() with open(os.path.join(writePath, 'TXLeg.csv'), 'w') as csv_file: csv = csv.DictWriter( csv_file, [ 'District', 'Name', 'Party', 'Website', 'Phone', 'Address', 'Email', 'Facebook', 'Twitter' ], restval='', lineterminator='\n' ) csv.writeheader() for row in dict_list: csv.writerow(row)