def main(): # Get the name of the config file config_file = leverage_efficiency.base.get_config_filename(sys.argv) # Extract the data from source data folder into common format import extract extract.main(config_file) # Update data with most recent values (optional) #import update # This doesn't connect to the rest of the pipeline yet #update.main(config_file) # Calculate derived quantities like returns for input into calculations import transform transform.main(config_file) # Perform leverage efficiency calculations import analysis analysis.main(config_file) # Create figures import plots plots.main(config_file) # Create exact figures used in the paper import paper_plots paper_plots.main(config_file) # Create figures used in the EE lecture notes import lecture_plots lecture_plots.main(config_file)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--crawl') args = parser.parse_args() if args.crawl == 'yes': c = raw_input("This will crawl the websites and will take very long time. Do you want to continue? ") if c == 'yes': crawl.crawl_trains() extract.main() transform.main() os.system('bash export.sh')
def test_main(): urls = [ ( "https://factfinder.census.gov/bkmk/table/1.0/en/" "ACS/13_5YR/B07010/0100000US|0400000US01|0500000US01001", "https://data.census.gov/cedsci/table?g=" "0100000US_0400000US01_0500000US01001&tid=ACSDT5Y2013.B07010&y=2013", ), ( "https://factfinder.census.gov/bkmk/table/1.0/en/DEC/10_113/H1", "https://data.census.gov/cedsci/table?tid=DECENNIALCD1132010.H1&y=2010", ), ( "http://factfinder.census.gov/bkmk/table/1.0/en/DEC/10_SF1/H10", "https://data.census.gov/cedsci/table?tid=DECENNIALSF12010.H10&y=2010", ), ( "https://factfinder.census.gov/bkmk/table/1.0/en/NES/2016/00A1", "https://data.census.gov/cedsci/table?tid=NONEMP2016.NS1600NONEMP&y=2016", ), ( "https://factfinder.census.gov/bkmk/table/1.0/en/SBO/2012/00CSA01", "https://data.census.gov/cedsci/table?tid=SBOCS2012.SB1200CSA01&y=2012", ), ( "http://factfinder.census.gov/bkmk/cf/1.0/en/place/Chicago city, Illinois" "/POPULATION/DECENNIAL_CNT", "https://data.census.gov/cedsci/profile?q=Chicago+city%2C+Illinois", ), ] for old, new in urls: assert transform.main(old) == new
def main(): parser = argparse.ArgumentParser() parser.add_argument('--crawl') args = parser.parse_args() if args.crawl == 'yes': c = raw_input( "This will crawl the websites and will take very long time. Do you want to continue? " ) if c == 'yes': crawl.crawl_trains() extract.main() transform.main() os.system('bash export.sh')
def go_to_images(): filename = request.args.get("filename") filepath = "/static/uploads/" + filename # get id by removing ".png" from the filename file_id = filename.split(".") db_id = file_id[0] # get image from db based on id image = Image.query.get(db_id) # check if the image already has directions associated with it if not image.directions: # if not, get the ws_rows value and send it to the transform functions directions = transform.main(image.ws_rows) # save the directions as a string to the database entry for the image image.directions = str(directions) model.session.commit() else: #otherwise, retrieve the directions from the database and change it back to a list for displaying directions = eval(image.directions) return render_template("directions.html", filepath=filepath, directions=directions)
def predict(): if request.method == 'POST': # Get the file from post request f = request.files['image'] # Save the file to ./uploads # basepath = os.path.dirname(__file__) file_name = secure_filename(f.filename) file_path = os.path.join("imgs", file_name) f.save(file_path) questions = 20 # imgPath =f"./imgs/{filename}" im = cv2.imread(file_path) resp = main(im, file_path, questions) return json.dumps(resp)
import anpocs43, anpocs44, transform if __name__ == "__main__": anpocs43.main() anpocs44.main() transform.main()
#!/usr/bin/env python3 from subprocess import check_output, call, STDOUT import os import json import transform DOMAIN = [(2, 128), (2, 128), (2, 128)] def codegen(filename, i, j, k): with open(filename, "w") as fp: row = dict(i=int(i), j=int(j), k=int(k)) json.dump(row, fp) def run(filename): with open(filename) as fp: params = json.load(fp) cmd = "clang++ -std=c++11 -O3 -DTILE0={i} -DTILE1={j} -DTILE2={k} matmul.cpp -o matmul" os.system(cmd.format(**params)) p = check_output(["./matmul", "1024"]) elapsed = float(p.decode("utf-8").split("nodes/s")[0]) print(params, elapsed) return elapsed if __name__ == '__main__': transform.main(DOMAIN, codegen, run)