def split_list(list, size): """ Split a list into around equally sized pieces """ newseq = [] splitsize = 1.0/size*len(list) for i in range(size): newseq.append(list[int(round(i*splitsize)):int(round((i+1)*splitsize))]) return newseq d = DataUtility() print 'Reading the csv file...' csv_content = d.read_csv_file('Flightroutes.csv') print 'Read {0} lines of data.'.format(len(csv_content)) all_flights = [] for index, item in enumerate(csv_content): all_flights.append( { 'startPosition': { 'longitude': float(item['srcLongitude']), 'latitude': float(item['srcLatitude']), 'airportCode': item['srcAirport'] }, 'endPosition': { 'longitude': float(item['destLongitude']), 'latitude': float(item['destLatitude']),
text = text.replace('\n',' ') # text = cgi.escape(text).encode('ascii','xmlcharrefreplace') latitude = result["geo"]["coordinates"][0] longitude = result["geo"]["coordinates"][1] tweetid = result["id"] created_at = result["created_at"] #write in csv row = [tweetid, user, text, latitude, longitude, created_at] csvwriter.writerow(row) result_count += 1 last_id = result["id"] print "got %d results" % result_count csvfile.close() print "written to %s" % outfile d = DataUtility() twitterjsondata = d.read_csv_file(outfile) with open('data.json', 'w') as fp: json.dump(twitterjsondata, fp)
from DataUtility import DataUtility d = DataUtility() # Read an example CSV file csv_file = d.read_csv_file('example-data/test.csv') print csv_file # Access a specific row and key print csv_file[0]['author'] # Read an example XML file xml_file = d.read_xml_file('example-data/test.xml') print xml_file # Access a specific entry from the XML tree print xml_file['cities']['stadt']['stadt'][1]['name'] # Read an example JSON file json_file = d.read_json_file('example-data/test.json') print json_file # Access a specific entry from the JSON tree print json_file['globe']['radius'] # Write one of the dictionaries to a new JSON file d.write_json_file(csv_file, 'example-data/output.json')