self.reco_type_mow[i], self.SourceLink, self.status, self.curated, self.raw]) for i in range(0, len(self.ticker)): Csv_Data1.append([self.firm[i]]) for i in range(0, len(self.ticker)): Csv_Data2.append([self.Company_Name[i], self.firm[i]]) except Exception, e: print str(e) if __name__ == '__main__': # Start the controller filename = raw_input("Enter the Filename you want to parse: " ) ## dividing the input file into multiple files make_files = divide_files.MakeFiles(filename) make_files.divide_into_files() files = make_files.count_files / 2 ## counting the number of the *.1 files # Create instances of Feed Parser class for i in xrange(files): print "Total Complete " + str(i * 100 / files) + '%' index = str(i + 2) + ".1" FParser = FeedsParser(index) if (not (FParser.Filter_Feeds())): continue FParser.Clean_Ptags() FParser.Extract_Date() FParser.Extract_Ticker() FParser.Extract_Stock_Price() FParser.Extract_SourceLink()
def Make_file_for_source_code( self, Ticker ): ### storing the source code of the website into a file named "RecoAction_orig.txt" fo = open("RecoAction_orig.txt", "w") string1 = "http://www.benzinga.com/stock/" string2 = Ticker string3 = "/ratings" source_code = urllib2.urlopen(string1 + string2 + string3).read() fo.write(source_code) fo.close() if __name__ == "__main__": # Start the controller filename = raw_input("Enter the Filename you want to parse: ") make_files = divide_files.MakeFiles( filename) ## dividing the input file into multiple files make_files.divide_into_files() files = make_files.count_files / 2 ## counting the number of the *.1 files # Create instances of Feed Parser class for i in xrange(files): print "Total Complete " + str(i * 100 / files) + '%' index = str(i + 2) + ".1" FParser = FeedsParser(index) FParser.Extract_Date() FParser.Extract_Ticker() FParser.Extract_Stock_Price() FParser.Extract_Headline() FParser.Extract_SourceLink() FParser.Extract_Analyst() FParser.Output_of_RecoAction_orig_and_RecoType_orig()