def main(): stats = youtube.grabYouTubeSample(100) #print len(videos) print len(stats) #print videos print stats print "FINISHED COLLECTION" print "STARTING PARSING" TitleParser.parse_videos(stats) print "COMPUTING AVERAGES" TitleParser.compute_average_views() print "COMPLETE"
row['viewCount'] = '1,000,000+' data.append(row) with open("Fixed" + readFile, 'w', encoding='utf8', newline='') as output_file: keys = data[0].keys() writer = csv.DictWriter(output_file, keys) writer.writeheader() writer.writerows(data) def fixNullByte(): readFile = 'YouTubeData5.csv' data = '' with open(readFile) as fd: data = fd.read() with open("NoNull" + readFile, 'w') as fo: fo.write(data.replace('\x00', '')) #fixNullByte() #fixViewCount() s = grabYouTubeSample() TitleParser.parse_videos(s) TitleParser.compute_average_views() TitleParser.gen_compute_average_views(TitleParser.description_dict, 'DescriptionData.csv') TitleParser.gen_compute_average_views(TitleParser.tag_dict, 'TagData.csv')
row['viewCount'] = '10,001-100,000' elif row['viewCount'] <= 1000000: row['viewCount'] = '100,001-1,000,000' else: row['viewCount'] = '1,000,000+' data.append(row) with open("Fixed"+readFile,'w',encoding='utf8',newline='') as output_file: keys = data[0].keys() writer = csv.DictWriter(output_file, keys) writer.writeheader() writer.writerows(data) def fixNullByte(): readFile = 'YouTubeData5.csv' data = '' with open(readFile) as fd: data = fd.read() with open("NoNull"+readFile, 'w') as fo: fo.write(data.replace('\x00', '')) #fixNullByte() #fixViewCount() s = grabYouTubeSample() TitleParser.parse_videos(s) TitleParser.compute_average_views() TitleParser.gen_compute_average_views(TitleParser.description_dict, 'DescriptionData.csv') TitleParser.gen_compute_average_views(TitleParser.tag_dict, 'TagData.csv')