def read_sparda_ethik(self,infile,is_sparda=True): blz = None baccount_no = None r = 0 if is_sparda else 1 for row in utils.get_csv('iso-8859-4',infile,replacenl=is_sparda): if not row: continue if row[0]=='BLZ:': blz = int(row[1]) continue if row[0]=='Konto:': baccount_no = int(row[1]) continue date = utils.convert_date4(row[0]) if not date: continue if row[9+r]=='Anfangssaldo': self.sbal = utils.read_float(row[11+r],row[12+r]) continue if row[9+r]=='Endsaldo': self.ebal = utils.read_float(row[11+r],row[12+r]) continue be = BankStatementEntry(self) be.posting_date = date be.purpose = row[8+r] be.partner = row[3+r] be.partner_iban = row[5+r] be.amount = utils.read_float(row[11+r],row[12+r]) be.cleanup() self.entries.append(be) if blz and baccount_no: self.iban = utils.iban_de(blz,baccount_no)
def main(): data = np.transpose(utils.get_csv('data.csv')) X = data[0] y = data[1] theta0, theta1 = train(X, y) plot_line(data, theta0, theta1) utils.put_csv('parameters.csv', theta0, theta1)
def test_items_serializers( client, item_lib_martigny, # on shelf item_lib_fully, # on loan csv_header, json_header, rero_json_header, patron_martigny, librarian_martigny, librarian_sion, loan_pending_martigny): """Test record retrieval.""" login_user(client, librarian_martigny) item_url = url_for('invenio_records_rest.item_item', pid_value=item_lib_fully.pid) response = client.get(item_url, headers=json_header) assert response.status_code == 200 data = get_json(response) assert data['metadata'].get('item_type', {}).get('$ref') item_url = url_for('invenio_records_rest.item_item', pid_value=item_lib_martigny.pid) response = client.get(item_url, headers=json_header) assert response.status_code == 200 data = get_json(response) assert data['metadata'].get('item_type', {}).get('$ref') item_url = url_for('invenio_records_rest.item_item', pid_value=item_lib_fully.pid, resolve=1) response = client.get(item_url, headers=json_header) data = get_json(response) assert data['metadata'].get('item_type', {}).get('pid') # test if all key exist into response with a value for key in ['created', 'updated', 'id', 'links', 'metadata']: assert key in data assert data[key] list_url = url_for('invenio_records_rest.item_list') response = client.get(list_url, headers=rero_json_header) assert response.status_code == 200 list_url = url_for('api_item.inventory_search') response = client.get(list_url, headers=csv_header) assert response.status_code == 200 data = get_csv(response) assert data assert '"pid","document_pid","document_title","document_creator",' \ '"document_main_type","document_sub_type","library_name",' \ '"location_name","barcode","call_number","second_call_number",' \ '"enumerationAndChronology","item_type","temporary_item_type",' \ '"temporary_item_type_end_date","general_note","staff_note",' \ '"checkin_note","checkout_note","loans_count","checkout_date",' \ '"due_date","last_transaction_date","status","created",' \ '"issue_status","issue_status_date","issue_claims_count",' \ '"issue_expected_date","issue_regular"' in data
def download_fulfill(): ''' Will redirect the user to download the csv file that matches the given uid. Note: currently not secure, but the contents of the csv are not critical. ''' uid = request.args.get('fname') to_serve = utils.get_csv(fname, directory=DIRECTORY) return send_file(to_serve)
def institution_process(countries: Dict[str, Country]) -> List[Institution]: rows = get_row(dbc.GRID_DATABASE_DIR / "institutes.csv") row_count = csv_size(dbc.GRID_DATABASE_DIR / "institutes.csv") attrs = ["addresses", "acronyms", "aliases", "labels", "links", "types"] # Group the GRID data tables by grid_id for better access. institution_attrs = [ get_csv(dbc.GRID_DATABASE_DIR / f"{attr}.csv", "grid_id") for attr in attrs ] institutions_list: List[Institution] = [] pbar = tqdm(total=row_count) for row in rows: nullify(row) # Get all the data related to the current institution. address, acronym, alias, label, link, type = [ attr.get(row["grid_id"]) for attr in institution_attrs ] # Create 'soup' variable for fuzzy matching of institutions. soup = [row["name"]] if address: country = dbc.country_name_mapper(address[0].pop("country")) institution = Institution(**{**row, **address[0]}) institution.country = countries[country] soup.append(country) else: institution = Institution(**row) if acronym: institution.acronyms = [Acronym(**i) for i in acronym] soup.extend(i["acronym"] for i in acronym) if alias: institution.aliases = [Alias(**i) for i in alias] soup.extend(i["alias"] for i in alias) if label: institution.labels = [Label(**i) for i in label] soup.extend(i["label"] for i in label) if link: institution.links = [Link(**i) for i in link] if type: institution.types = [Type(**i) for i in type] institution.soup = " | ".join(i for i in soup) institutions_list.append(institution) pbar.update() pbar.close() del institution_attrs # Free-up memory (~ 10^5 institutions). return institutions_list
def run(): station_ids_to_plot = None # [508, 509, 510, 101, 102] stations = {} timestamps = [] last_timestamp = None station_names = {} for row in get_csv('stations.csv'): station_id = int(row['station_id:ID(Station)']) station_names[station_id] = row['name'].replace('/', '/\n') for row in get_csv('extra/free_bikes_at_station.csv'): if last_timestamp != row['timestamp:INT']: last_timestamp = row['timestamp:INT'] timestamps.append( datetime.fromtimestamp(float(row['timestamp:INT']))) station_id = int(row['station_id:INT']) if station_ids_to_plot and station_id not in station_ids_to_plot: continue if not station_id in stations: stations[station_id] = [] station = stations[station_id] station.append(int(row['free_bikes:INT'])) plot(timestamps, stations, station_names)
def test_items_serializers( client, item_lib_martigny, # on shelf item_lib_fully, # on loan csv_header, json_header, rero_json_header, patron_martigny_no_email, librarian_martigny_no_email, librarian_sion_no_email, loan_pending_martigny): """Test record retrieval.""" login_user(client, librarian_martigny_no_email) item_url = url_for('invenio_records_rest.item_item', pid_value=item_lib_fully.pid) response = client.get(item_url, headers=json_header) assert response.status_code == 200 data = get_json(response) assert data['metadata'].get('item_type').get('$ref') item_url = url_for('invenio_records_rest.item_item', pid_value=item_lib_martigny.pid) response = client.get(item_url, headers=json_header) assert response.status_code == 200 data = get_json(response) assert data['metadata'].get('item_type').get('$ref') item_url = url_for('invenio_records_rest.item_item', pid_value=item_lib_fully.pid, resolve=1) response = client.get(item_url, headers=json_header) data = get_json(response)['metadata'] assert data.get('item_type').get('pid') list_url = url_for('invenio_records_rest.item_list') response = client.get(list_url, headers=rero_json_header) assert response.status_code == 200 list_url = url_for('invenio_records_rest.item_list') response = client.get(list_url, headers=csv_header) assert response.status_code == 200 data = get_csv(response) assert data assert '"pid","document_pid","document_title","document_creator",' \ '"document_type","location_name","barcode","call_number",' \ '"second_call_number","loans_count","last_transaction_date",' \ '"status","created"' in data
def read_sparkasse(self,infile): first_row = True for row in utils.get_csv('iso-8859-4',infile): if not row: continue if first_row: first_row = False continue be = BankStatementEntry(self) self.iban = row[0] be.posting_date = utils.convert_date2(row[1]) be.purpose = row[4] be.partner = row[11] be.partner_iban = row[12] be.amount = utils.read_float(row[14]) be.cleanup() self.entries.append(be)
def test_stats_get(client, stats, csv_header): """Test record retrieval.""" item_url = url_for('invenio_records_rest.stat_item', pid_value=stats.pid) res = client.get(item_url) assert res.status_code == 200 assert res.headers['ETag'] data = get_json(res) # Check metadata for k in ['created', 'updated', 'metadata', 'links']: assert k in data # Check self links res = client.get(to_relative_url(data['links']['self'])) assert res.status_code == 200 # CSV format item_url = url_for('invenio_records_rest.stat_item', pid_value=stats.pid, format='csv') res = client.get(item_url, headers=csv_header) assert res.status_code == 200 data = get_csv(res) assert data == ( 'library id,library name,number_of_active_patrons,' 'number_of_checkins,number_of_checkouts,' 'number_of_deleted_items,number_of_documents,' 'number_of_items,number_of_librarians,number_of_libraries,' 'number_of_new_items,number_of_new_patrons,' 'number_of_order_lines,number_of_patrons,' 'number_of_renewals,number_of_requests,' 'number_of_satisfied_ill_request\r\n' 'lib3,Library of Fully,0,0,0,0,1,1,0,2,1,0,0,0,0,0,0\r\n' 'lib1,Library of Martigny-ville,0,0,0,0,1,1,0,2,1,0,0,0,0,0,0\r\n' 'lib4,Library of Sion,0,0,0,0,1,1,0,1,1,0,0,0,0,0,0\r\n') list_url = url_for('invenio_records_rest.stat_list') res = client.get(list_url) assert res.status_code == 200 data = get_json(res) assert data['hits']['hits']
def get_baccount(cls,infile): blz = None baccount_no = None iban = None for row in utils.get_csv('iso-8859-4',infile): if not row: continue if row[0]=='BLZ:': blz = int(row[1]) continue if row[0]=='Konto:': baccount_no = int(row[1]) continue if row[0][0:2]=='DE': iban = row[0] break if blz and baccount_no: iban = utils.iban_de(blz,baccount_no) break if iban and iban in BankAccount.baccounts_by_iban: return (BankAccount.baccounts_by_iban[iban],iban) else: return (None,iban)
import numpy as np import utils as utils import functools as ft import train as t from plot_line import plot_line data = np.transpose(utils.get_csv('data.csv')) X = data[0] y = data[1] sample_size = .1 mask = np.random.sample(y.size) while (((mask < sample_size) == True).size == 0): mask = np.random.sample(y.size) train = mask >= sample_size test = mask < sample_size theta0, theta1 = t.train(X[train], y[train]) estimation = theta0 + theta1 * X[test] error = abs((estimation - y[test]) / y[test]) error = sum(error) / error.size print("sample size = {}".format(y[train].size)) print("test size = {}".format(y[test].size)) print("error = {:2f}%".format(error * 100))
import numpy as np import utils as utils try: m = int(input("mileage: ")) parameters = utils.get_csv('parameters.csv') theta0 = parameters[0] theta1 = parameters[1] prediction = theta0 + m * theta1 print("estimate: {:.2f}".format(prediction)) except ValueError: print("Please enter valid int input")
import matplotlib as mpl mpl.use('TkAgg') from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt from matplotlib import cm from matplotlib.ticker import LinearLocator, FormatStrFormatter import utils as utils from train import cost import numpy as np from functools import partial fig = plt.figure() ax = fig.gca(projection='3d') data = utils.get_csv('data.csv') data = np.transpose(data) km = data[0] price = data[1] m = 40 theta0 = np.linspace(7000, 9000, num=m) theta1 = np.linspace(-0.05, 0.0, num=m) theta0, theta1 = np.meshgrid(theta0, theta1) def partial_cost(theta0, theta1): return cost(km, price, theta0, theta1) cost_fun = np.vectorize(partial_cost) Z = cost_fun(theta0, theta1) surf = ax.plot_surface(theta0, theta1, Z, rstride=1, cstride=1, cmap=mpl.cm.coolwarm, linewidth=0, antialiased=False) fig.colorbar(surf, shrink=0.5, aspect=10) plt.show()