def run(travel_search_date): report = { "date": datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), "travel_search_date": travel_search_date, } if stop_times_file is not None: stops = csv_loader.load_csv(stop_times_file) print("loaded {number_of_searches} stops from file".format( number_of_searches=len(stops))) report["stopTimes"] = stop_times_executor.run_stop_times_searches( stops) travel_searches = csv_loader.load_csv(travel_search_file) print("loaded {number_of_searches} searches from file".format( number_of_searches=len(travel_searches))) print("Running searches against endpoint " + graphql_endpoint) report["travelSearch"] = travel_search_executor.run_travel_searches( travel_searches, travel_search_date) json_report = json.dumps(report) filename = report_dao.save_json_report(json_report) if upload_gcp: gcp_uploader.upload_blob(os.environ[BUCKET_NAME_ENV], filename, os.environ[DESTINATION_BLOB_NAME_ENV]) # Consider using constructor for gcp uploader gcp_uploader.remove_old_files(os.environ[BUCKET_NAME_ENV], os.environ[DESTINATION_BLOB_NAME_ENV])
def main(): fname = sys.argv[1] sys.argv.pop(1) csv = np.loadtxt(fname, dtype='str', delimiter=',') data = csv_loader.load_csv(csv, fname) closest_row = data.find_closest_row(1481747621, 281100000)
argCount = len(sys.argv) if argCount != 4: print "Wrong number of arguments" exit(1) # SET_NAME_ID = "M19" # SET_NAME = "Core Set 2019" # CSV_FILENAME = "c:\\card\\csv\M19_Wanted.csv" SET_NAME_ID = sys.argv[1] # XLN, M19, etc SET_NAME = sys.argv[2] # Ixalan, Core Set 19 CSV_FILENAME = sys.argv[3] # "c:\\card\\csv\M19_Wanted.csv" card_name_array = csv_loader.load_csv(CSV_FILENAME) all_cards = [] # add_price_to_csv(CSV_FILENAME, all_cards) for card in card_name_array: card_description = get_card_description(card.strip(), SET_NAME_ID, SET_NAME, False) if card_description is None: print "ERROR : can't find card %s" % card.strip() else: all_cards.append(card_description) browser.close() sorted_cards = sorted(all_cards, key=lambda k: float(k["tix"]))
def load_csv(filename): return csv_loader.load_csv(filename)
# Build a search urls for cardhoarder.com def build_mtgo_url(set_name_id, card_name): clean_card_name = card_name.replace(' ', '_') clean_card_name = clean_card_name.replace('\'', '') clean_card_name = clean_card_name.replace(',', '') url = "https://www.mtgotraders.com/store/" + set_name_id + "_" + clean_card_name + ".html" return url browser = webdriver.Chrome("c:\\workspace\\python\\chromedriver.exe") SET_NAME_ID = "M19" SET_NAME = "Core Set 2019" card_name_array = csv_loader.load_csv("c:\\card\\csv\M19_Wanted.csv") # card_name_array = ["Stitcher's Supplier"] all_cards = [] for card in card_name_array: url = build_mtgo_url(SET_NAME_ID, card) html = download_webpage(url) price = get_price_from_webpage(html) card_description = {} card_description['name'] = card card_description['price'] = float(price) all_cards.append(card_description) sorted_cards = sorted(all_cards, key=lambda k: k["price"])
# gradients at last layer dW2 = (1 / m_batch) * np.matmul(dZ2, cache["A1"].T) db2 = (1. / m_batch) * np.sum(dZ2, axis=1, keepdims=True) # back propagate trough first layer dA1 = np.matmul(params["W2"].T, dZ2) dZ1 = dA1 * sigmoid(cache["Z1"]) * (1 - sigmoid(cache["Z1"])) # gradients at first layer dW1 = (1 / m_batch) * np.matmul(dZ1, X.T) db1 = (1 / m_batch) * np.sum(dZ1, axis=1, keepdims=True) return {"dW1": dW1, "db1": db1, "dW2": dW2, "db2": db2} train_images, train_labels, test_images = csv_loader.load_csv() X_train, X_test = train_images.T / 255, test_images.T / 255 # (784, 60000), (784, 10000) # one-hot encoding digits = 10 Y_train = np.eye(digits)[train_labels.T.astype('int32')] Y_train = Y_train.T.reshape(digits, train_labels.shape[0]) # (10, 60000) # Shuffle the training set # np.random.seed(138) # shuffle_index = np.random.permutation(X_train.shape[1]) # X_train, Y_train = X_train[:, shuffle_index], Y_train[:, shuffle_index] # hyperparameters n_1 = X_train.shape[0] # 784 n_2 = 64