class _Pollutant: def __init__(self, nc_path, csv_path, pol_name): ''' :param nc_path: netcdf file path :param csv_path: csv file path or None ''' self.nc = NcPollutantReader(nc_path, pol_name) self.csv = None if csv_path is not None: self.csv = CsvReader(csv_path) def getModeled(self, datetime): return self.nc.getRasterAtDateTime(datetime) def getMeasured(self, datetime, station): if self.csv is None: return None return self.csv.getConcentration(datetime, station) def getMeasuredForDay(self, datetime, station): if self.csv is None: return None return self.csv.getConcentrationsForDay(datetime, station) def getMaxDate(self): return self.nc.getMaxDate() def getMinDate(self): return self.nc.getMinDate()
def __init__(self, nc_path, csv_path, pol_name): ''' :param nc_path: netcdf file path :param csv_path: csv file path or None ''' self.nc = NcPollutantReader(nc_path, pol_name) self.csv = None if csv_path is not None: self.csv = CsvReader(csv_path)
def test_division_method_calculator(self): test_data = CsvReader('/src/Unit Test Division.csv').data for row in test_data: self.assertEqual( self.calculator.division(row['Value 1'], row['Value 2']), float(row['Result'])) self.assertEqual(self.calculator.result, float(row['Result']))
def test_multiply_method_calculator(self): test_data = CsvReader('/src/Unit Test Multiplication.csv').data for row in test_data: self.assertEqual( self.calculator.multiply(row['Value 1'], row['Value 2']), int(row['Result'])) self.assertEqual(self.calculator.result, int(row['Result']))
def test_subtract_method_calculator(self): test_data = CsvReader('/src/Unit Test Subtraction.csv').data for row in test_data: self.assertEqual( self.calculator.subtract(row['Value 1'], row['Value 2']), int(row['Result'])) self.assertEqual(self.calculator.result, int(row['Result']))
def test_good_csv(): with CsvReader('../resources/good.csv') as file: data = file.getdata() header = file.getheader() print(header) for d in data: print(d)
def setUp(self) -> None: self.csv_reader = CsvReader('Unit Test Addition.csv') self.csv_reader = CsvReader('Unit Test Subtraction.csv') self.csv_reader = CsvReader('Unit Test Division.csv') self.csv_reader = CsvReader('Unit Test Multiplication.csv') self.csv_reader = CsvReader('Unit Test Square.csv') self.csv_reader = CsvReader('Unit Test Square Root.csv')
def main(): km = KmeansClustering(max_iter=100) with CsvReader('../resources/solar_system_census.csv') as file: if file == None: exit("File is corrupted") dataset = file.getdata() dataset = np.array(dataset, dtype='float') km.fit(dataset) print(km.predict(dataset)) colors = 10 * ["r", "g", "c", "b", "k"] for centroid in km.centroids: plt.scatter(centroid[0], centroid[1], s=130, marker="x") for classification in range(len(km.classes)): color = colors[classification] for citiziens in km.classes[classification]: plt.scatter(citiziens[0], citiziens[1], color=color, s=30) plt.show()
def test_sqrt_method_calculator(self): test_data = CsvReader('/src/Unit Test Square Root.csv').data for row in test_data: self.assertEqual(self.calculator.sqrt(row['Value 1']), float(row['Result'])) self.assertEqual(self.calculator.result, float(row['Result']))
#import panda as pd import numpy as np import sys sys.path.insert(1, '../ex03') from mylinearregression import MyLinearRegression from csvreader import CsvReader import matplotlib.pyplot as plt # 1: import data with CsvReader("are_blue_pills_magics.csv", header=True, skip_top=0, skip_bottom=0) as csv_file: data = np.array(csv_file.getdata(), float) Xpill = data[:, 1:2] Yscore = data[:, 2:3] # 2: perform fit Xpill_ = (Xpill - 3) / 6 tr = MyLinearRegression([0, 0]) print(tr.fit_(Xpill_, Yscore, 2, 1000)) #print(tr.predict_(Xpill)) #print(tr.cost_(Xpill, Yscore)) # 3: check the MSE #linear_model1 = MyLinearRegression(np.array([[89.0], [-8]])) #linear_model2 = MyLinearRegression(np.array([[89.0], [-6]])) #Y_model1 = linear_model1.predict_(Xpill)
# This file is for reading csv file and send json api for the application app = Flask(__name__) CORS(app) return_default = { 'return_code': '200', 'return_info': 'success', 'result': False } coins = [ 'tezos', 'bitcoin', 'bnb', 'bitcoin-cash', 'cardano', 'eos', 'ethereum', 'litecoin', 'stellar', 'tether', 'xrp' ] file_reader = CsvReader() @app.route("/", methods=["GET"]) def getData(): result = [] for coin in coins: # get latest record for the coin latest = file_reader.getLatest(coin) # get 1 day price change percentage for the coin one_day_change = file_reader.getDayChange(coin, 1) # get 7 day price change percentage for the coin seven_day_change = file_reader.getDayChange(coin, 7) # get 30 day price change percentage for the coin one_month_change = file_reader.getDayChange(coin, 30) row_for_coin = {}
#end def #end def def fetch_kanji(self, kanji): encoded_char = urllib.parse.quote(kanji) image_url = WikicommonsResource.get_imageurl(encoded_char + '-j' + self.imgtype + '.' + self.filetype) # check for japan-specific file if image_url != None: print('Found Japan-specific image for kanji ' + kanji + '!') self._save_image(image_url) else: image_url = WikicommonsResource.get_imageurl(encoded_char + '-' + self.imgtype + '.' + self.filetype) if image_url != None: self._save_image(image_url) else: print('Cannot download image for kanji ' + kanji) #end if #end if #end def #end class hiraganas = HiraganaResource('/home/heartdisease/Documents/hiragana_animated/', 'stroke_order_animation', 'gif') katakanas = KatakanaResource('/home/heartdisease/Documents/katakana_animated/', 'stroke_order_animation', 'gif') kanjis = KanjiResource('/home/heartdisease/Documents/kanji_red/', 'red', 'png') # 'order', 'red' #hiraganas.fetch_hiraganas(HiraganaResource.get_hiraganas()) #katakanas.fetch_katakanas(KatakanaResource.get_katakanas()) kanjilist = CsvReader(',', '"') kanjis.fetch_kanjis([row[0] for row in kanjilist.parse('csv/kanji80.csv')])
if len(sys.argv) > 8: met_addr = sys.argv[8] elif not config: met_addr = None if len(sys.argv) > 9: met_par = sys.argv[9] elif not config: met_par = None # load input data set coo_filt = ['id', 'east', 'north', 'elev'] if ifname[-4:] in ('.geo', '.coo'): g = GeoReader(fname=ifname[:-4] + '.geo') f = GeoReader(fname=ifname[:-4] + '.coo', filt=coo_filt) else: g = CsvReader(fname=ifname[:-4] + '.dmp') f = CsvReader(fname=ifname[:-4] + '.csv', filt=coo_filt) directions = g.Load() coordinates = f.Load() # writers if ofname[-4:] == '.dmp' or ofname[-4:] == '.csv' or ofname == 'stdout': # dmp/csv file or console output if ofname[-4:] == '.dmp' or ofname[-4:] == '.csv': ofname1 = ofname[:-4] + '.dmp' ofname2 = ofname[:-4] + '.csv' else: ofname1 = ofname2 = ofname dmp_wrt = CsvWriter(angle = 'DMS', dist = '.4f', \ filt = ['station', 'id','hz','v','distance', 'datetime'], \ fname = ofname1, mode = 'a', sep = ';')
best = i + 1 #print(best) return best if __name__ == "__main__": # constants data = None citizenships = [ "The flying cities of Venus", "United Nations of Earth", "Mars Republic", "Asteroids' Belt colonies" ] # Init k = KmeansClustering(max_iter=10, ncentroid=len(citizenships)) with CsvReader("../assets/solar_system_census.csv", header=True) as f: data = np.array(f.getdata()) workbench = np.array([list(elem.values()) for elem in data.copy()])[:, 1:].astype(np.float) # Fit k.fit(workbench, plot=True) # Prediction y_pred = k.predict(workbench) print(y_pred, y_pred.shape) # Reformatting res = list(data) for i, y in enumerate(y_pred): res[i]['citizenship'] = citizenships[y]
from csvreader import CsvReader if __name__ == "__main__": with CsvReader('good.csv') as file: if file == None: print("File is corrupted") else: data = file.getdata() header = file.getheader() print(header) print(data)
def test_file_not_found(): with CsvReader('bad.csv') as file: if file is None: print('File is corrupted')
def test_square_root_using_csv(self): test_file= CsvReader('src/root.csv').data for row in test_file: result = round(calculator.root(int(row['Value 1'])),8) self.assertEqual(result, round(float(row['Result']),8))
print( "Usage: freestation.py input_file gama_path station_id station_height" ) sys.exit(-1) if fname[-4:] not in ['.geo', '.coo', '.dmp', '.csv']: fname += '.geo' if len(sys.argv) > 2: gama_path = sys.argv[2] else: gama_path = '/home/siki/GeoEasy/gama-local' # load observations and coordinates fn = fname[:-4] # remove extension ext = fname[-4:] if ext in ['.geo', '.coo']: obs = GeoReader(fname=fn + '.geo') else: obs = CsvReader(fname=fn + '.dmp') # load observations observations = obs.Load() # load coordinates and add to adjustment if ext in ['.geo', '.coo']: coo = GeoReader(fname=fn + '.coo') else: coo = CsvReader(fname=fn + '.csv') n = 0 # number of points st = False # station found coords = coo.Load() f = Freestation(observations, coords, gama_path) print(f.Adjustment())
def test_bad_csv(): with CsvReader('../resources/bad.csv') as file: if file is None: print('File is corrupted')
from csvreader import CsvReader if __name__ == "__main__": with CsvReader('resources/good.csv', header=True) as file: if file == None: print("File is corrupted") data = file.getdata() header = file.getheader() for i in data: print(i) if __name__ == "__main__": with CsvReader('resources/bad.csv') as file: if file == None: print("File is corrupted")
if (cent_cpy[i][3] != slender_min and cent_cpy[i][0] != height_max and ((cent_cpy[((i + 1) % 3)][3] < cent_cpy[i][3] and cent_cpy[( (i + 2) % 3)][0] > cent_cpy[i][0]) or (cent_cpy[((i + 2) % 3)][3] < cent_cpy[i][3] and cent_cpy[( (i + 1) % 3)][0] > cent_cpy[i][0]))): earth_height = cent_cpy[i][0] cent_cpy.remove(cent_cpy[i]) print("Belt", Belt) # centroids = numpy.delete(centroids, Belt[0], axis=0) # print(centroids) # Venus = numpy.where(centroids[:,1] == numpy.amin(centroids[:,1])) # print(Venus) # centroids = numpy.delete(centroids, Venus[0], axis=0) # Mars = numpy.where(centroids[:,0] == numpy.amax(centroids[:,0])) # centroids = numpy.delete(centroids, Mars[0], axis=0) if __name__ == "__main__": kmean = KmeansClustering(ncentroid=4) with CsvReader('solar_system_census.csv', header=True) as file: data = file.getdata() npc = NumPyCreator() data_array = npc.from_list(data, dtype=float) # print(data_array) kmean.fit(data_array) kmean.predict(data_array) pass
#!/usr/bin/python from csvreader import CsvReader # if __name__ == "__main__": # with CsvReader('good.csv') as file: # data = file.getdata() # header = file.getheader() # print(data) # print(header) # if __name__ == "__main__": # with CsvReader('bad.csv') as file: # if file == None: # print("File is corrupted") if __name__ == "__main__": with CsvReader('good.csv', ',', True) as file: data = file.getdata() header = file.getheader() print(data) print(header)
print("Enough Balance") # encoded_amount=encode_single('uint256', int(actual_amount)) txn_body = self.erc20.buildTransaction( {'from': self.source_addr, 'gas': 100000, 'gasPrice': self.web3.toWei(gas_price, 'gwei'), 'nonce': self.nonce}).transfer(address, int(actual_amount)) signed_txn_body = self.web3.eth.account.signTransaction(txn_body, private_key=self.private_key) self.web3.eth.sendRawTransaction(signed_txn_body.rawTransaction) else: print('代币余额不足!') raise Exception("Not Enough Balance for transfer!") if __name__ == '__main__': if len(sys.argv) != 2: print("Usage: ") print("python3 main.py [CSV_FILE_PATH]") print("Example: ") print("python3 main.py test.csv") exit(0) else: print("Start sending TXs.") batch_list = CsvReader(sys.argv[1]).parse() handler = ContributeTokens(api_endpoint=api_endpoint, contract_address=contract_address, private_key=private_key, source_addr=source_addr) for tx in batch_list: handler.transfer(tx[0], float(tx[1]), gas_price) print("All TX Sent.")
from csvreader import CsvReader with CsvReader('good.csv', skip_top=5, skip_bottom=1) as file: if not file: print("File is corrupted") exit(1) data = file.getdata() print(*data, sep="\n") header = file.getheader()
from csvreader import CsvReader if __name__ == "__main__": with CsvReader('bad.csv') as file: if file == None: print("File is corrupted")
from georeader import GeoReader from csvreader import CsvReader logging.getLogger().setLevel(logging.WARNING) if len(sys.argv) > 1: ifname = sys.argv[1] else: #ifname = 'test.geo' print("Usage: blindorientation.py input_file totalstation port") sys.exit(-1) if ifname[-4:] != '.dmp' and ifname[-4:] != '.geo': ifname += '.geo' if ifname[-4:] == '.geo': g = GeoReader(fname=ifname) else: g = CsvReader(fname=ifname) data = g.Load() stationtype = '1100' if len(sys.argv) > 2: stationtype = sys.argv[2] port = '/dev/ttyUSB0' if len(sys.argv) > 3: port = sys.argv[3] if re.search('120[0-9]$', stationtype): from leicatps1200 import LeicaTPS1200 mu = LeicaTPS1200() elif re.search('110[0-9]$', stationtype): from leicatcra1100 import LeicaTCRA1100 mu = LeicaTCRA1100() elif re.search('180[0-9]$', stationtype): from leicatca1800 import LeicaTCA1800
import numpy as np import matplotlib.pyplot as plt import sys sys.path.insert(1, '../ex03') from mylinearregression import MyLinearRegression from csvreader import CsvReader # 1: import data with CsvReader("spacecraft_data.csv", header = True, skip_top = 0, skip_bottom = 0) as csv_file: data = np.array(csv_file.getdata(), float) #Xage = data[:, 0:1] Xage = data[:, 0:3] Yprice = data[:, 3:4] # 2: perform fit #tr = MyLinearRegression([516, -1]) tr = MyLinearRegression([8., -10., 7., -2.]) print(tr.mse_(Xage, Yprice)) print(tr.fit_(Xage, Yprice, 1e-4, 1000)) #print(tr.cost_(Xage, Yprice)) print(tr.mse_(Xage, Yprice)) # 3: print plot plt.plot(Xage[:, 0:1], Yprice, 'bo', label = "Strue") #plt.plot(Xage, tr.predict_(Xage), 'g') plt.plot(Xage[:, 0:1], tr.predict_(Xage), 'go', label ="Spredict") plt.ylabel('sell price') plt.xlabel('age')
#!/usr/bin/python3 from csvreader import CsvReader import re reader = CsvReader(',', '"') original = reader.parse('latin.gerald.original.csv') modified = reader.parse('latin.gerald.modified.csv') if len(original) != len(modified): print('Both documents don\'t have equal number of rows') exit(1) else: for i in range(0, len(original)): normalized_original = re.sub(r'[ \n\t\.,;:\(\)]', '', original[i][1].lower()) normalized_modified = re.sub(r'[ \n\t\.,;:\(\)]', '', (modified[i][1] + modified[i][2]).lower()) if original[i][0] != modified[i][0]: print('row', (i + 1), ': latin column does not match (' + original[i][0] + ' <-> ' + modified[i][0] + ')') #end if if normalized_original != normalized_modified: print('row', (i + 1), ': translation column was altered (' + original[i][1].replace('\n', '\\n') + ' <-> ' + modified[i][1].replace('\n', '\\n') + ' [' + modified[i][2].replace('\n', '\\n') + '])') #end if #end for print('Checked documents') exit(0) #end if
# Subject tests # if __name__ == "__main__": # with CsvReader('good.csv') as file: # data = file.getdata() # header = file.getheader() # if __name__ == "__main__": # with CsvReader('bad.csv') as file: # if file is None: # print("File is corrupted") # My tests if __name__ == "__main__": with CsvReader('good.csv', header=True, skip_top=8, skip_bottom=8) as file: if file is None: print("File is corrupted") else: print("getheader: ", file.getheader()) print("getdata: ", *file.getdata(), sep="\n") if __name__ == "__main__": # Should be corrupted with CsvReader('bad.csv') as file: if file is None: print("File is corrupted") else: print("File is ~GOOD~") # Should be good
if len(sys.argv) > 2: ofname = sys.argv[2] else: ofname = 'stdout' station_id = None if len(sys.argv) > 3: station_id = sys.argv[3] station_ih = 0 if len(sys.argv) > 4: station_ih = float(sys.argv[4]) # load input data set if ifname[-4:] == '.coo': g = GeoReader(fname=ifname, filt=['id', 'east', 'north', 'elev']) else: g = CsvReader(fname=ifname, filt=['id', 'east', 'north', 'elev']) data = g.Load() if ofname[-4:] == '.geo': geo_wrt = GeoWriter(dist = '.4f', angle = 'RAD', fname = ofname, \ filt = ['station', 'id', 'hz', 'v', 'distance', 'faces', 'ih', \ 'code'], mode = 'w') else: geo_wrt = CsvWriter(dist = '.4f', angle = 'RAD', fname = ofname, \ header = True, mode = 'w', \ filt = ['station', 'id', 'hz', 'v', 'distance', 'faces', 'ih', \ 'code']) og = ObsGen(data, station_id, station_ih) if og.station_east is None or og.station_north is None or og.station_elev is None: print "station coordinates not found: ", station_id exit(-1) observations = og.run()
def test_square_using_csv(self): test_file= CsvReader('src/square.csv').data for row in test_file: self.assertEqual(calculator.square(int(row['Value 1'])), int(row['Result']))
print(self.centroids) print('------------') def predict(self, X): """Predict from wich cluster each datapoint belongs to. Args:X: has to be an numpy.ndarray, a matrice of dimension m * n. Returns: the prediction has a numpy.ndarray, a vector of dimension m * 1. Raises: This function should not raise any Exception.""" belongs = [[] for _ in range(self.ncentroid)] for entry in X: belongs[self.closest(entry)].append(entry) return belongs km = KmeansClustering(ncentroid=4) with CsvReader('solar_system_census.csv', header=True) as citizens: if citizens is None: print('file is corrupt') else: data = citizens.get_data() header = citizens.get_header() # citizens.tabulate() npc = np.array(citizens.data).astype('longdouble') npc = npc[:, 1:] # print(npc) km.fit(npc) """ Belt: max height, ? weight, min bone-den | bH, bW, bB Mars: mH > eH , ? weight, ? bone-den | mH, mW, mB Erth: eH < mH , eW > vW , ? bone-den | eH, eW, eB Vnus: ? height, vW < eW , ? bone-den | vH, vW, vB