Example #1
0
def create_data(path):
    file_utils = FileUtils()
    reader = file_utils.getCsvReader(path)
    data = []
    for row in reader:
        sub = []
        for i in range(0, len(row)-1):
            sub.append(float(row[i]))
        sub.append(row[len(row)-1])
        data.append(sub)
    return data
Example #2
0
def create_data(path):
    file_utils = FileUtils()
    reader = file_utils.getCsvReader(path)
    data = []
    for row in reader:
        sub = []
        for i in range(0, len(row) - 1):
            sub.append(float(row[i]))
        sub.append(row[len(row) - 1])
        data.append(sub)
    return data
Example #3
0
 def __init__(self, filename):
     fileUtils = FileUtils()
     reader = fileUtils.getCsvReader(filename)
     self.headers = reader.next()
     self.label_list = []
     self.feature_list = []
     for row in reader:
         self.label_list.append(row[len(row) - 1])
         rowDict = {}
         for i in range(1, len(row) - 1):
             rowDict[self.headers[i]] = row[i]
         self.feature_list.append(rowDict)
 def __init__(self, filename):
     fileUtils = FileUtils()
     reader = fileUtils.getCsvReader(filename)
     self.headers = reader.next()
     self.label_list = []
     self.feature_list = []
     for row in reader:
         self.label_list.append(row[len(row)-1])
         rowDict = {}
         for i in range(1, len(row)-1):
             rowDict[self.headers[i]] = row[i]
         self.feature_list.append(rowDict)
    def read(self, url):
        page = urllib2.urlopen(url, timeout=10)
        data = page.read()
        return data

    # load data from disk by normal file utils
    def getRecord(self, filename, fileUtils, row, col):
        records = fileUtils.read_csv(filename)
        return records[row][col]

    # load data from disk by csv reader
    def getRecordByCSV(self, filename, fileUtils, row, col):
        records = fileUtils.read(filename)
        items = zip(item.strip().split(' ') for item in records.split('\n'))
        print items[0][row][col]


if __name__ == '__main__':
    me = Crawler()
    url = "http://www.stats202.com/stats202log.txt"
    data = me.read(url)
    filename = 'C:\Users\Administrator\Desktop\exp.csv'
    fileUtils = fu.FileUtils()
    # save the url contents to disk
    fileUtils.save(filename, data)
    # get the 1 row and 1 col value from csv file
    print me.getRecord(filename, fileUtils, 0, 0)
    # another method to get 1 row and 1 col value from csv file
    print me.getRecordByCSV(filename, fileUtils, 0, 0)