def test_BAANLoader(self): for project_name, file_name in ALL_FILES_DICT.items(): if not project_name.startswith('Baan'): continue # load from Loader loader = Loader(project_name) cost, profit, _, _ = loader.load() # should not be empty assert cost and profit
def test_2(self): dummy_raw_file = 'data:application/octet-stream;base64,TEVOIDE2OAoxIDEwIDEKMTAgMTUgMgo0MCA' \ '1NSA3CjU1IDg1IDgKODUgOTkgOQoxMDAgMTYwIDEwCg==' data, invalid = Loader(dummy_raw_file, 'TOPCONS') self.assertIsNone(data) self.assertTrue(invalid)
def create_player_lists(teams, year): ''' create_player_lists: Returns a dataframe with a column DND_IDS, which contains a list of player ids for inactive players. Inactive players are defined as players who were listed as DND (did not dress) or NWT (not with team) ''' df_bs = Loader.load_unsorted_boxscore(year) # df_inactives = df_inactives[~np.isnan(df_inactives['Date'])] # create dataframes with players who DND and NWT df_dnd = df_bs[df_bs['COMMENT'].str.contains('DND', na=False)] df_without_dnd = df_bs[~df_bs['COMMENT'].str.contains('DND', na=False)] df_nwt = df_without_dnd[df_without_dnd['COMMENT'].str.contains( 'NWT', na=False)] # populate list with all player IDs in boxscore df_bs.apply((lambda row: teams[row['TEAM_ID']].player_list[row['Date']] .append(row['PLAYER_ID'])), axis=1) # populate list with all player IDs who DND df_dnd.apply((lambda row: teams[row['TEAM_ID']].inactive_list[row[ 'Date']].append(row['PLAYER_ID'])), axis=1) # populate list with all player IDs who were NWT df_nwt.apply((lambda row: teams[row['TEAM_ID']].inactive_list[row[ 'Date']].append(row['PLAYER_ID'])), axis=1)
def test_RALICLoader_conversion(self): for project_name, file_name in ALL_FILES_DICT.items(): if not project_name.startswith('RALIC'): continue # get content loader = Loader(project_name) neo_cost, neo_profit, _, _ = loader.load() # employ a RALICLoader to load original dataset loader = RALICLoader() level, cost = loader.load(file_name['sreq'], file_name['cost']) cost = {x[0]: x[2] for x in cost if len(x) == 3} # neo_cost and cost compare length assert len(neo_cost) == len(cost) # preprocess level profit = [x for x in level if x[1] in cost] # length assert len(profit) == len(neo_profit)
def test_Motorola_conversion(self): for project_name, file_name in ALL_FILES_DICT.items(): if not project_name.startswith('Motorola'): continue # get content loader = Loader(project_name) cost, profit, _, _ = loader.load() # get content directly from MotorolaLoader loader = MotorolaLoader() cost_revenue = loader.load(file_name) # should not be empty assert cost_revenue assert cost.keys() == profit.keys() # cost and profit <=> cost_revenue cost_profit = [] for ite in cost.keys(): cost_profit.append((cost[ite], profit[ite])) assert cost_profit == cost_revenue
def test_XuanLoader_conversion(self): for project_name, file_name in ALL_FILES_DICT.items(): if not project_name.startswith( 'classic') and not project_name.startswith('realistic'): continue # only test classic* and realistic* NRPs # get content loader = Loader(project_name) content = loader.load() # content should be (cost, profit, dependencies, requests) assert len(content) == 4 cost, profit, dependencies, requests = content # we employe a XuanLoader to load pre-process data loader = XuanLoader() xuan_cost, xuan_dependencies, xuan_customers = loader.load( file_name) # compare # cost => xuan_cost for req, req_cost in cost.items(): flag = False for cost_line in xuan_cost: cost_dict = dict(cost_line) if req in cost_dict and cost_dict[req] == req_cost: flag = True break assert flag # xuan_cost => cost for cost_line in xuan_cost: for req, req_cost in cost_line: assert req in cost assert cost[req] == req_cost # profit <=> xuan_customers xuan_profit = {x[0]: x[1] for x in xuan_customers} assert xuan_profit == profit # dependencies <=> xuan_dependencies assert set(xuan_dependencies) == set(dependencies) # requests => customers xuan_requests = {x[0]: x[2] for x in xuan_customers} for cus, req in requests: assert cus in xuan_requests assert req in xuan_requests[cus] # customers => requests for cus, req_list in xuan_requests.items(): for req in req_list: assert (cus, req) in requests
def get_inception(c3dmodel, args): """Launch chainer and extract inception score""" # # C3D cropped mean load # mean = np.zeros((3, 1, 16, 128, 128)) # loaded_mean = np.load(args.mean).astype('f') # print(loaded_mean[0][0][0]) # loaded_mean = np.expand_dims(loaded_mean, 0) # # loaded_mean = loaded_mean.transpose((4, 0, 1, 2, 3)) # loaded_mean = loaded_mean.reshape((3, 1, 16, 112, 112)) # print(loaded_mean[:, 0, 0, 0, 0]) # mean[:, :, :, 8:120, 8:120] = loaded_mean # C3D full mean load loaded_mean = np.load(args.mean).astype('f') loaded_mean = loaded_mean.reshape((3, 1, 16, 128, 171))[:, :, :, :, 21:21 + 128] print(loaded_mean[:, 0, 0, 0, 0]) mean = loaded_mean loader = Loader(args)() ys = [] for _, (x, _) in tqdm(enumerate(loader), total=len(loader)): x = x.data.cpu().numpy() n, c, f, h, w = x.shape x = x.transpose(0, 2, 3, 4, 1).reshape(n * f, h, w, c) x = x * 128 + 128 x_ = np.zeros((n * f, 128, 128, 3)) for t in range(n * f): x_[t] = np.asarray( cv.resize(x[t], (128, 128), interpolation=args.interpolation)) x = x_.transpose(3, 0, 1, 2).reshape(3, n, f, 128, 128) x = x[::-1] - mean # mean file is BGR-order while model outputs RGB-order x = x[:, :, :, 8:8 + 112, 8:8 + 112].astype('f') x = x.transpose(1, 0, 2, 3, 4) with chainer.using_config('train', False) and \ chainer.no_backprop_mode(): # C3D takes an image with BGR order y = c3dmodel(Variable(chainer.cuda.cupy.asarray(x)), layers=['prob'])['prob'].data.get() ys.append(y) ys = np.asarray(ys).reshape((-1, 101)) return ys
def test_3(self): dummy_raw_file = 'data:application/octet-stream;base64,NTAgWCA1OCBYIDAgMC45OTk5MDg5CjUxIFggNTggWCAwIDAuOTk5ODk' \ '3ODQKMTQzIFggMTg1IFggMCAwLjk5OTg5NDMKMTQwIFggMTUyIFggMCAwLjk5OTg3Nzc1CjE0OCBYIDE5MCBYIDAgMC4' \ '5OTk4NzU0CjUwIFggMTUzIFggMCAwLjk5OTg2MjIKMTQ1IFggMTE3IFggMCAwLjk5OTg1NTY0CjEzOSBYIDEwMSBYIDA' \ 'gMC45OTk4Mjc1CjE0MSBYIDE5MyBYIDAgMC45OTk4MjIzCgo=' expected_data = compress_data([[58, 50, 0.9999089], [58, 51, 0.99989784], [185, 143, 0.9998943], [152, 140, 0.99987775], [190, 148, 0.9998754], [153, 50, 0.9998622], [145, 117, 0.99985564], [139, 101, 0.9998275], [193, 141, 0.9998223]]) data, invalid = Loader(dummy_raw_file, 'EVFOLD') self.assertFalse(invalid) self.assertEqual(data, expected_data)
def test_1(self): dummy_raw_file = 'data:application/octet-stream;base64,TEVOIDE2OAoxIDEwIDEKMTAgMTUgMgo0MCA' \ '1NSA3CjU1IDg1IDgKODUgOTkgOQoxMDAgMTYwIDEwCg==' expected_data = compress_data([ 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 'NAN', 'NAN', 'NAN', 'NAN', 'NAN', 'NAN', 'NAN', 'NAN', 'NAN', 'NAN', 'NAN', 'NAN', 'NAN', 'NAN', 'NAN', 'NAN', 'NAN', 'NAN', 'NAN', 'NAN', 'NAN', 'NAN', 'NAN', 'NAN', 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 'NAN', 'NAN', 'NAN', 'NAN', 'NAN', 'NAN', 'NAN', 'NAN' ]) data, invalid = Loader(dummy_raw_file, 'CUSTOM') self.assertFalse(invalid) self.assertEqual(data, expected_data)
def withLoader(loadingMsg: str, loader: Loader, func, *args): """ show loading to user for function that take some time. """ try: loader.text = loadingMsg loader.start() return func(*args) finally: # short hand '{0:<30}{1}'.format(loadingMsg, "success") loader.complete_text = "{message:{fill}{align}{width}}success".format( message=loadingMsg, fill=".", align="<", width=30 ) loader.stop()
def create_player_lists(teams, year): ''' create_player_lists: Returns a dataframe with a column DND_IDS, which contains a list of player ids for inactive players. Inactive players are defined as players who were listed as DND (did not dress) or NWT (not with team) ''' df_bs = Loader.load_unsorted_boxscore(year) # df_inactives = df_inactives[~np.isnan(df_inactives['Date'])] # create dataframes with players who DND and NWT df_dnd = df_bs[df_bs['COMMENT'].str.contains('DND', na=False)] df_without_dnd = df_bs[~df_bs['COMMENT'].str.contains('DND', na=False)] df_nwt = df_without_dnd[df_without_dnd['COMMENT'].str.contains('NWT', na=False)] # populate list with all player IDs in boxscore df_bs.apply((lambda row: teams[row['TEAM_ID']].player_list[row['Date']].append(row['PLAYER_ID'])), axis=1) # populate list with all player IDs who DND df_dnd.apply((lambda row: teams[row['TEAM_ID']].inactive_list[row['Date']].append(row['PLAYER_ID'])), axis=1) # populate list with all player IDs who were NWT df_nwt.apply((lambda row: teams[row['TEAM_ID']].inactive_list[row['Date']].append(row['PLAYER_ID'])), axis=1)
def __load(self, project_name : str) -> None: # employ a loader loader = Loader(project_name) self.__cost, self.__profit, self.__dependencies, self.__requests = loader.load()
relpath = to_process.pop(0) desc = os.path.join(DATA_ROOT, relpath[1:], DESCRIPTION_FILE ) print "description file %s" % desc description = json.loads(file(desc).read()) path, slug = os.path.dirname(relpath), os.path.basename(relpath) print ">>>>>>>",path,slug,description DBLoader.new_item(path,slug,description) if description.has_key('subcatalogs'): to_process.extend( [ os.path.join( relpath, x) for x in description['subcatalogs'] ] ) elif description.has_key('datafile'): filename = os.path.join( DATA_ROOT, relpath[1:], description['datafile'] ) filename = str(filename) if calc_stat(filename) != saved_stat.get(filename): fields = description['fields'] loader = Loader.get_loader_for_filename(filename,fields) if loader != None: print "Processing %s, %s" % (relpath, description['datafile'] ) DBLoader.del_collection(relpath) for slug,rec in loader.get_processed_rows(): DBLoader.new_item(relpath,slug,rec) saved_stat[filename] = calc_stat(filename)