def _fetch_league_records(self): divs = self.page.html.find('.block-content') league_records = divs[1].find('script')[0].text.split('(')[-1][1:-3] data = parse_json(league_records) # iterate through teams and insert matchdays for i, team in data.items(): team_id = team.pop('id') team_title = team.pop('title') # get the matchday data matchdays = team['history'] for i, md in enumerate(matchdays): md['ppda_att'] = md['ppda']['att'] md['ppda_def'] = md['ppda']['def'] md['ppda_allowed_att'] = md['ppda_allowed']['att'] md['ppda_allowed_def'] = md['ppda_allowed']['def'] del md['ppda'] del md['ppda_allowed'] md['id'] = team_id md['title'] = team_title md['league'] = self.competition md['season'] = self.year md['md'] = i + 1 return data
def _fetch_rosters(self): divs = self.page.html.find('.block-content') rosters = divs[1].find('script')[0].text.split('(')[1].split(')')[0][1:-1] data = parse_json(rosters) return data
def _fetch_shot_data(self): divs = self.page.html.find('.block-content') sdata = divs[0].find('script')[0].text.split('(')[1].split(')')[0][1:-1] data = parse_json(sdata) return data
def output(): fname = request.args.get('value').replace(' ', '_') fpath = app.config['UPLOAD_FOLDER'] + fname jsonfile = open(fpath, 'r') config = json.load(jsonfile) output = parse_json(config) return render_template('output.html', entries=output)
def test_1(): data = ('{"C": 3}\n' '{"A": 1, "B": "Hello"}\n' '"Blabla"\n' '{"C": 76, "A": -14, "B": 7}\n' '{"C": "Bla", "D": 34, "B": "This is a string!"}\n' '{"B":"A", "A": 50.5}') fd = io.StringIO(data) assert parse_json(fd) == [-14, 'Hello']
def _fetch_player_data(self): divs = self.page.html.find('.block-content') player_data = divs[-1].text.split('(')[-1][1:-3] data = parse_json(player_data) for player in data: player['G'] = player.pop('goals') player['S'] = player.pop('shots') player['A'] = player.pop('assists') player['team'] = player.pop('team_title') player['name'] = player.pop('player_name') return data
def _fetch_match_info(self): divs = self.page.html.find('.block-content') mi = divs[0].find('script')[0].text.split('(')[2].split(')')[0][1:-1] data = parse_json(mi) translate = {'EPL': 'EPL', 'La liga': 'La_Liga', 'Bundesliga': 'Bundesliga', 'Serie A': 'Serie_A', 'Ligue 1': 'Ligue_1'} data['league'] = translate[data['league']] return data
def _fetch_match_data(self): divs = self.page.html.find('.block-content') mdata = divs[0].text.split('=')[1].split('(')[1].split(')')[0][1:-1] data = parse_json(mdata) lodicts = [] # flatten with proper labels for match in data: if match['isResult']: temp_match = {} temp_match['league'] = self.competition temp_match['season'] = self.year temp_match['match_id'] = match['id'] lodicts.append(temp_match) return lodicts
def _fetch_team_data(self): # get json data for team divs = self.page.html.find('.block-content') team_data_script = divs[1].text.split('(')[-1][1:-3] data = parse_json(team_data_script) # flattening dictionary and putting correct labels for k, v in data.items(): for ki, vi in v.items(): vi['SA'] = vi['against']['shots'] vi['GA'] = vi['against']['goals'] vi['xGA'] = vi['against']['xG'] del vi['against'] vi['S'] = vi.pop('shots') vi['G'] = vi.pop('goals') if 'stat' in list(vi.keys()): del vi['stat'] return data
def main(): """Main function""" args = parse_arguments() # Load configuration try: file = args.config or DEFAULT_CONFIG config = parse_json(file) except IOError as ex: print("Failed to load configuration file: " + ex.filename) return 1 except ValueError as ex: print("Invalid configuration in file '{0}': {1}".format( file, ex.args[0])) return 1 # Create logger log = Logger(args.verbose, config["log_email"], config["min_email_period"]) # Create WUG PWS for uploading data pws = WugPws(config["weather_underground"], log, args.noupload) read_sensors(config, log, pws)
# HOW TO HANDLE JSON DATA import pandas as pd import json import parse_json # Open json data with open("Leagues of Legends/champion_info_2.json", "r+") as f: json_data = json.load(f) champion = pd.DataFrame(json_data) champion.reset_index(inplace=True) champion.rename(columns={"index": "name"}, inplace=True) champion.head() # Convert the json data to DataFrame first_row_data = champion.data[0] champ_data = pd.DataFrame(first_row_data) for i in range(1, champion.shape[0]): champ_data = champ_data.append(pd.DataFrame(champion.data[i])) champ_data = pd.DataFrame(champ_data.groupby(['title', 'id', 'name', 'key'])['tags'].apply(lambda x: "%s" % ', '.join(x))) champ_data.reset_index(inplace=True) champ_merge = pd.merge(champion, champ_data, how="left", on="name") print(champion.head()) # General method to parse json and add data to the dataframe champion = parse_json.parse_json(champion, 'data') print(champion.head()_
def main(inputFilename, outputFilename, additionalFilesetAbsDir): input_struct = parse_json(additionalFilesetAbsDir + "/../../" + inputFilename) populate_additional_filesets(input_struct, additionalFilesetAbsDir) write_tcl(input_struct, outputFilename)
from bagged_trees import BaggedTrees from boosted_trees import BoostedTrees import sys import numpy as np import pandas as pd if __name__ == "__main__": max_trees = range(1, 15) max_depth = [2, 4, 10] # max_trees = range(1,3) # max_depth = [2,4] train = "datasets/digits_train.json" test = "datasets/digits_test.json" # load in data X_train, y_train, meta_train = parse_json(train, method="Numpy") X_test, y_test, meta_test = parse_json(test, method="Numpy") for mt in max_trees: for md in max_depth: bag_dt = BaggedTrees(mt, md) bag_dt.fit(X_train, y_train, meta_train, verbose=False) y_pred = bag_dt.predict(X_test, None, verbose=False) bag_acc = (y_pred == y_test).mean() boost_dt = BoostedTrees(mt, md) boost_dt.fit(X_train, y_train, meta_train, verbose=False) y_pred = boost_dt.predict(X_test, None, verbose=False) boost_acc = (y_pred == y_test).mean() print("{},{},{},{}".format(mt, md, "boost", boost_acc))
rc = re.sub(r' ', '', value) fc = re.sub(r' ', '', fetcher_text[key]) l_len = len(rc) r_len = len(fc) retval = dif_content(rc, fc) retval_ground = 0 results = dmp.diff_main(rc, fc) for res in results: if res[0] == 0: retval_ground += len(res[1]) print cnt, ': ', l_len, r_len, retval, retval_ground real_ret = max(retval, retval_ground) rets.append((cnt, l_len, r_len, real_ret)) with open('diff_result_1', 'w') as f: for res in rets: print >> f, res[0], ': ', res[1], res[2], res[3] if __name__ == '__main__': parser = set_parser() ops = parser.parse_args() if not ops.run: read_json_filename = ops.file[0] fetcher_json_filename = ops.file[1] print read_json_filename, fetcher_json_filename parse_json(read_json_filename, fetcher_json_filename) run_compare()
def main(inputFilename, working_dir): if not(working_dir.endswith("/")): working_dir += "/" input_struct = parse_json(inputFilename) execute_quartus_workflow( input_struct, working_dir)
self.print_graph() self.print_predictions(y_pred, y, probs) print(np.equal(y_pred, y).sum()) print("") if confidence: return posterior[:, 0] else: return y_pred if __name__ == "__main__": if len(sys.argv) == 1: train = "datasets/lymphography_train.json" test = "datasets/lymphography_test.json" net_type = "n" else: train = str(sys.argv[1]) test = str(sys.argv[2]) net_type = str(sys.argv[3]) # parse the json file for data X_train, y_train, meta_train = parse_json(train) X_test, y_test, meta_test = parse_json(test) bn = BayesNet(net_type) bn.fit(X_train, y_train, meta_train) bn.predict(X_test, y_test, verbose=True)
from bayes import BayesNet from parse_json import parse_json from accuracy import accuracy_score import pandas as pd import numpy as np import math from scipy import stats if __name__ == "__main__": data = "datasets/tic-tac-toe.json" k = 10 X, y, meta = parse_json(data) np.random.seed(8) random_index = [i for i in range(len(X))] np.random.shuffle(random_index) # split X and y into 10 folds X_split = np.array_split(X.ix[random_index, :].reset_index(drop=True), k) y_split = np.array_split(y.ix[random_index].reset_index(drop=True), k) deltas = np.zeros(k) for i in range(k): acc = [] for net_type in ["n", "t"]: X_train = pd.concat(X_split[:i] + X_split[i + 1:]).reset_index(drop=True) y_train = pd.concat(y_split[:i] + y_split[i + 1:]).reset_index(drop=True) X_test = X_split[i].reset_index(drop=True)
) parser.add_argument( '--make_bar_graphs', action='store_true', help= 'Produces bargraphs and analysis to show hour of day and day of week drop patterns. Call --reformat_json and --split_by_tripcode first.' ) args = parser.parse_args() reformat = args.reformat_json bar_graphs = args.make_bar_graphs tripcode = args.split_by_tripcode if reformat: print("Reformatting json") parse_json() elif bar_graphs: print("Making bar graphs") plot_dow() plot_hod() elif tripcode: print("Splitting input by tripcode") split_by_tripcode() else: print("Reformatting json") parse_json() print("Splitting by tripcode") split_by_tripcode()
def test_3(): data = ('{"C": 3}\n' '{"A": "1", "B": 3}\n' '"Blabla"') fd = io.StringIO(data) assert parse_json(fd) == [None, None]
ret["tmax"] = round(self.tmax_sum/self.count, 2) ret["tmean"] = round(self.tmean_sum/self.count, 2) return ret if __name__=="__main__": rulebased_statistics = {} #data_root_dir = '/home/inseo/Desktop/test_data' data_root_dir = '/home/inseo/DATA/fuse' data_dir_list = glob.glob(os.path.join(data_root_dir, 'fuse*')) prefix_json = 'json_rb' for data_dir in data_dir_list: for json_item in glob.glob(os.path.join(data_dir, prefix_json, '*.json')): data = parse_json(json_item) for obj in data: #unpack data class_name = obj['class'] tmin = obj['tmin'] tmax = obj['tmax'] tmean = obj['tmean'] # create new class if not exist if not class_name in rulebased_statistics.keys(): rulebased_statistics[class_name] = statistics(class_name) #add item to statistics rulebased_statistics[class_name].add_data(tmin, tmax, tmean) # write to json file
__author__ = 'yingbozhan' import json test = """[{"data":{"2015-05-06":[1240.0,450.0],"2015-05-04":[1240.0,450.0],"2015-05-29":[1240.0,670.0],"2015-05-03":[1240.0,670.0],"2015-05-27":[1240.0,670.0],"2015-05-08":[1240.0,450.0],"2015-05-25":[1240.0,670.0],"2015-06-26":[1240.0,1200.0],"2015-06-07":[1240.0,670.0],"2015-06-05":[1240.0,670.0],"2015-05-13":[1240.0,450.0],"2015-05-31":[1240.0,670.0],"2015-05-10":[1240.0,450.0],"2015-05-11":[1240.0,670.0],"2015-04-19":[1240.0,1190.0],"2015-05-18":[1240.0,670.0],"2015-05-15":[1240.0,450.0],"2015-06-01":[1240.0,670.0],"2015-06-03":[1240.0,670.0],"2015-05-17":[1240.0,670.0],"2015-04-22":[1240.0,1190.0],"2015-05-22":[1240.0,670.0],"2015-04-20":[1240.0,1190.0],"2015-04-29":[1240.0,660.0],"2015-05-24":[1240.0,670.0],"2015-04-26":[1240.0,1190.0],"2015-04-27":[1240.0,660.0],"2015-04-24":[1240.0,1190.0],"2015-05-01":[1240.0,900.0],"2015-05-20":[1240.0,670.0]},"date":"2015-04-22","destCity":"NGB","destCityName":"","orgCity":"TSN","orgCityName":""}]""" from parse_json import parse_json from node import Node node = Node(0) nodes = [] parse_json(json.loads(test), 1, nodes, node) node.node_print(0)