def __init__(self, rand_seed, display=False, no_op_max=27, testing=False, show_trades=None): #self.ale = ALEInterface() np.random.seed(rand_seed) self._no_op_max = no_op_max self.sequence_length = SEQUENCE_LENGTH features_list = FEATURES_LIST self.features_length = len(FEATURES_LIST) path = STORE_PATH training_days = TRAINING_DAYS testing_days = TESTING_DAYS # Load the data training_store = DataStore(training_days=training_days, features_list=features_list, sequence_length=self.sequence_length) if testing: print("Set up for testing") testing_store = DataStore(training_days=training_days, testing_days=testing_days, features_list=features_list, sequence_length=self.sequence_length, mean=training_store.mean, std=training_store.std) self.environment = Trading(data_store=testing_store, sequence_length=self.sequence_length, features_length=self.features_length, testing=testing, show_trades=show_trades) else: self.environment = Trading(data_store=training_store, sequence_length=self.sequence_length, features_length=self.features_length, testing=testing, show_trades=show_trades) self.old_x = 0. # collect minimal action set #self.real_actions = self.ale.getMinimalActionSet() self.real_actions = [0, 1, 2] # height=210, width=160 self.reset()
def __init__(self, str_id, df=None, rematch=False): self.df = df self.str_id = str_id self.initialize() self.ds = DataStore() if rematch: self.ds.rematch = True
def main(): if (len(sys.argv) > 1): if sys.argv[1] == "check": idl_path = parse(sys.argv[2:], 'i:', ["idl="])[0] #To do: call api dds = DDSConnector(idl_path) if dds.IsIDLValid(): print("valid") else: print("invalid") elif sys.argv[1] == "run": config = configparser.ConfigParser() config.read_file(open(parse(sys.argv[2:], 'c:', ["config="])[0])) if config['common']['converter'] == 'dds': if config['dds']['mode'] == 'subscriber': dtStore = DataStore(db_name=config['common']['package_id']) dtStore.AddDataToCollection( "file_idl", json_obj={ "id": dtStore.AddFileToDB(config['dds']['file_idl']) }) dtStore.AddDataToCollection( "network_config", json_obj={ "id": dtStore.AddFileToDB( config['dds']['network_config']) }) ckan = CKANConnector(config['common']['ckan_sever'], config['common']['api_token'], config['common']['resource_id'], config['common']['package_id']) ckan.CreateResource(config['dds']['file_idl'], "The idl file") ckan.CreateResource(config['dds']['network_config'], "The network config file") x = threading.Thread(target=ReceiveStreamData, args=[dtStore, ckan], daemon=True) x.start() dds = DDSConnector(config['dds']['file_idl'], config['dds']['topic_name'], config['dds']['network_config'], config['dds']['mode']) dds.Build() dds.Run() elif sys.argv[1] == "publish_example": DDSConnector().PublishExample() else: print(sys.argv) print(USAGE) else: print(sys.argv) print(USAGE)
def __init__(self, rand_seed, display=False, no_op_max=27, testing=False, show_trades=None): np.random.seed(rand_seed) self._no_op_max = no_op_max self.sequence_length = SEQUENCE_LENGTH features_list = FEATURES_LIST self.features_length = len(FEATURES_LIST) path = STORE_PATH training_days = TRAINING_DAYS testing_days = TESTING_DAYS # Load the data training_store = DataStore(training_days=training_days, sequence_length=self.sequence_length) if testing: print("Set up for testing") testing_store = DataStore(training_days=training_days, testing_days=testing_days, sequence_length=self.sequence_length, mean=training_store.mean, std=training_store.std) self.environment = Trading(data_store=testing_store, sequence_length=self.sequence_length, features_length=self.features_length, testing=testing, show_trades=show_trades) else: self.environment = Trading(data_store=training_store, sequence_length=self.sequence_length, features_length=self.features_length, testing=testing, show_trades=show_trades) self.old_x = 0. self.reset()
def randomize(graph): i = 0 for n in graph.nodes(): i += 1 randomloc = float(random.randint(0, 999999999)) / 1000000000 newnode = (randomloc, ) neighbors = graph.neighbors(n) graph.add_node(newnode, id=i, ds=DataStore(100000)) for neighbor in neighbors: graph.add_edge(newnode, neighbor) graph.remove_node(n)
def __init__(self): self.main_win = QMainWindow() self.ui = Ui_MainWindow() self.ds = DataStore() self.ui.setupUi(self.main_win) self.ui.stackedWidget.setCurrentWidget(self.ui.home) self.ui.bmr_batton.clicked.connect(self.showBMR) self.ui.bmi_button.clicked.connect(self.showBMI) self.ui.bmr_calculate_button.clicked.connect(self.calcBMR) self.ui.bmi_calculate_button.clicked.connect(self.calcBMI)
def __init__(self): #实例化其他几个功能模块 self.urlgen = URLGenerator() self.downloader = Downloader() self.parser = DataParser() self.datastore = DataStore() #建立3个url队列:未下载、正在下载、完成下载 self.beforedownloadset = set() self.beingdownloadset = set() self.afterdownloadset = set() #设定种子url self.seedurl = 'https://so.gushiwen.org/authors/'
def __init__(self): data_sources = DataSources(authors="data/author.csv", books="data/book.csv", books_to_subjects="data/book2subjects.csv", subjects="data/subject.csv", sub_subjects="data/sub_subject.csv", sub_sub_subjects="data/sub_sub_subject.csv") self._data_store = DataStore(data_sources) self._excel = ExcelReportGenerator(self._data_store) self._controller = Controller(self._data_store, self._excel) self._window = AppWindow(self._controller) sys.exit(self._app.exec_())
def hungarian_main(self): global ds ds = DataStore() mentors, mentees = self.create_assignment_matrix() self.matrix_size = len(self.score_matrix) aux_row_vars = self.init_row_aux_vars() aux_col_vars = self.init_col_aux_vars() # Master method for the hungarian algorithm / job assignment self.hungarian_algorithm(aux_row_vars, aux_col_vars, 1) matching_list = self.format_matching_result(mentors, mentees) # ds.save_matches(matching_list) show_matches(matching_list) return matching_list
def choose_file_rating(folder="data/match_rating/"): ds = DataStore() files = os.listdir(folder) files.sort(key=lambda f: int(re.sub('\D', '', f))) print("Welcome to the analyzer. Which file would you like to analyze?") for index, file in enumerate(files): print("({}) {}".format(index, file)) file_index = input( "Write the number to choose the file. Write anything else to cancel\n") if int(file_index) in range(1, len(files)): print("File {} chosen".format(files[int(file_index)])) results = ds.load_rating_file(files[int(file_index)]) return results else: print("Command not recognized. Returning.") return
def calculate_all_ratings(folder="data/match_rating/"): ds = DataStore() files = os.listdir(folder) files.sort(key=lambda f: int(re.sub('\D', '', f))) for file in files: print("File {} chosen".format(file)) results = ds.load_rating_file(file) # calculate the relevant data count_maybe = 0 count_yes = 0 count_no = 0 for i, result in results.iterrows(): rating = result["match_rating"] if rating == 0: count_no += 1 elif rating == 1: count_yes += 1 elif rating == -1: count_maybe += 1 # Create data data = {"bad": count_no, "good": count_yes, "maybe": count_maybe} output_path = "data/rating_result.csv" now_id = file.replace("rating_", "").replace(".csv", "") data["id"] = now_id if os.path.exists(output_path): df = pd.read_csv(output_path) df2 = pd.DataFrame([data]) df3 = df.append(df2) df3.to_csv(output_path, index=False, mode='w') else: df = pd.DataFrame([data]) df.to_csv(output_path, index=False, mode='w') print("Good: {}\nBad: {}\nMaybe: {}".format(count_yes, count_no, count_maybe))
def choose_file(folder="data/matching_result/"): ds = DataStore() mentors, mentees, person_dict = ds.load_data("initial_data_{}".format( global_vars.ROUND)) files = os.listdir(folder) files.sort(key=lambda f: int(re.sub('\D', '', f))) print("Welcome to the analyzer. Which file would you like to analyze?") for index, file in enumerate(files): print("({}) {}".format(index, file)) file_index = input( "Write the number to choose the file. Write anything else to cancel\n") if int(file_index) in range(1, len(files)): print("File {} chosen".format(files[int(file_index)])) results = ds.load_result_file(files[int(file_index)]) str_id = files[int(file_index)].replace("matching_result_", "").replace(".csv", "") else: print("Command not recognized. Returning.") return return person_dict, results, str_id
def setUp(self): self.ds = DataStore(location)
import pandas as pd import numpy as np from datetime import datetime import _pickle as pk import json from DataStore import DataStore # from FHMM import FHMM # from HMM import HMM, HMM_MAD from Preprocessing import Appliance, train_test_split, create_matrix #with open('aws_keys.json') as f: # data = json.load(f) # ACCESS_KEY = data['access-key'] # SECRET_ACCESS_KEY = data['secret-access-key'] DStore = DataStore('ngalvbucket1', 'house_1') all_channels = [1, 12, 5, 3, 10, 6, 9, 43, 7, 8] select_channels = [12, 5, 3, 10, 6, 9, 43, 8] # select_channels = [12, 5, 6] DStore.create_store(all_channels) #top_10 = DStore.select_top_k(10,'2013-08-01','2013-09-01') combined = DStore.create_combined_df('2013-06-01 00:00:00', '2013-10-31 23:59:59', select_channels=select_channels, freq='1Min') with open('combined.pkl', 'wb') as f: pk.dump(combined, f)
def __init__(self): tok = "8051bf89-e115-4147-8e5a-ff9d6f39f0d7" url = "hermes.wha.la" value = {"Command": "INIT", "Token": tok} headers = { 'Content-type': 'application/json', 'Accept': 'application/json', } jvalue = json.dumps(value) conn = httplib.HTTPConnection(url, 80) conn.request('POST', '/api/hermes', jvalue, headers) response = conn.getresponse() ret = json.loads( str((response.status, response.reason, response.read())[2])) conn.close() DS = DataStore( ret["ServerState"]["ServerTiers"]["DB"]["ServerStartTurnTime"]) ctrl = controller() # myFile = open('output.txt', 'w') # myFile.write(str(ret)) # myFile.close() looptime = ret['ServerState']['ServerTiers']['DB'][ 'ServerStartTurnTime'] coef = (ret["ServerState"]["CostPerServer"] / ret["ServerState"]["ProfitConstant"]) WebRefresh = ret["ServerState"]["ServerTiers"]["WEB"][ "ServerStartTurnTime"] JavaRefresh = ret["ServerState"]["ServerTiers"]["JAVA"][ "ServerStartTurnTime"] DBRefresh = ret["ServerState"]["ServerTiers"]["DB"][ "ServerStartTurnTime"] DS.setCoef(coef) infra = False p = None research = None didGrid = False progression = [None, "GRID", "GREEN", None] #while ret['ServerState']['TurnNo'] < 10080: while True: x = 0 value = {"Command": "PLAY", "Token": tok} headers = { 'Content-type': 'application/json', 'Accept': 'application/json', } jvalue = json.dumps(value) conn = httplib.HTTPConnection(url, 80) conn.request('POST', '/api/hermes', jvalue, headers) response = conn.getresponse() ret = json.loads( str((response.status, response.reason, response.read())[2])) turnnumber = ret['ServerState']['TurnNo'] #Get demand from server demand = [ ret['ServerState']['ServerTiers']['WEB']['ServerRegions']['NA'] ['NoOfTransactionsInput'] ] demand.append(ret['ServerState']['ServerTiers']['WEB'] ['ServerRegions']['EU']['NoOfTransactionsInput']) demand.append(ret['ServerState']['ServerTiers']['WEB'] ['ServerRegions']['AP']['NoOfTransactionsInput']) config = [ ret['ServerState']['ServerTiers']['WEB']['ServerRegions']['NA'] ['NodeCount'] ] config.append(ret['ServerState']['ServerTiers']['WEB'] ['ServerRegions']['EU']['NodeCount']) config.append(ret['ServerState']['ServerTiers']['WEB'] ['ServerRegions']['AP']['NodeCount']) config.append(ret['ServerState']['ServerTiers']['JAVA'] ['ServerRegions']['NA']['NodeCount']) config.append(ret['ServerState']['ServerTiers']['JAVA'] ['ServerRegions']['EU']['NodeCount']) config.append(ret['ServerState']['ServerTiers']['JAVA'] ['ServerRegions']['AP']['NodeCount']) config.append(ret['ServerState']['ServerTiers']['DB'] ['ServerRegions']['NA']['NodeCount']) config.append(ret['ServerState']['ServerTiers']['DB'] ['ServerRegions']['EU']['NodeCount']) config.append(ret['ServerState']['ServerTiers']['DB'] ['ServerRegions']['AP']['NodeCount']) # if turnnumber % (DBRefresh*2): # DS.resetDemand(demand) # else: # DS.avgDemand(demand) DS.runningDemand(demand) DS.setConfig(config) coef = (ret["ServerState"]["CostPerServer"] / ret["ServerState"]["ProfitConstant"]) DS.setCoef(coef) conn.close() lastProfit = ret["ServerState"]["ProfitEarned"] GridCost = ret["ServerState"]["ResearchUpgradeLevels"][1][ "UpgradeCost"] GridTurns = ret["ServerState"]["ResearchUpgradeLevels"][1][ "NoOfTurnsRequired"] GridTotalCost = GridTurns * GridCost #init research when its swaggin to do so if ret['ServerState']['TurnNo'] <= 2000 and ret["ServerState"][ "ProfitAccumulated"] >= GridTotalCost / 8 and GridCost < ( lastProfit - (lastProfit / 3)): didGrid = True try: if ret["ServerState"]["ResearchUpgradeState"][ "GRID"] == -1: #research = "GREEN" pass except: research = "GRID" pass #p = research #Calculate free space #AVERAGE CAPACITY # capacity = [ ( ret['ServerState']['ServerTiers']['WEB']['ServerPerformance']['CapactityLevels'][0]['UpperLimit'] + ret['ServerState']['ServerTiers']['WEB']['ServerPerformance']['CapactityLevels'][1]['UpperLimit'] ) / 2] # capacity.append(( ret['ServerState']['ServerTiers']['JAVA']['ServerPerformance']['CapactityLevels'][0]['UpperLimit'] + ret['ServerState']['ServerTiers']['JAVA']['ServerPerformance']['CapactityLevels'][1]['UpperLimit'] ) / 2) # capacity.append(( ret['ServerState']['ServerTiers']['DB']['ServerPerformance']['CapactityLevels'][0]['UpperLimit'] + ret['ServerState']['ServerTiers']['DB']['ServerPerformance']['CapactityLevels'][1]['UpperLimit'] ) / 2) #97% capacity = [ (ret['ServerState']['ServerTiers']['WEB']['ServerPerformance'] ['CapactityLevels'][0]['UpperLimit'] + ret['ServerState']['ServerTiers']['WEB']['ServerPerformance'] ['CapactityLevels'][1]['UpperLimit'] + ret['ServerState']['ServerTiers']['WEB']['ServerPerformance'] ['CapactityLevels'][0]['UpperLimit']) / 3 ] capacity.append( (ret['ServerState']['ServerTiers']['JAVA']['ServerPerformance'] ['CapactityLevels'][0]['UpperLimit'] + ret['ServerState']['ServerTiers']['JAVA']['ServerPerformance'] ['CapactityLevels'][1]['UpperLimit'] + ret['ServerState']['ServerTiers']['JAVA']['ServerPerformance'] ['CapactityLevels'][0]['UpperLimit']) / 3) capacity.append( (ret['ServerState']['ServerTiers']['DB']['ServerPerformance'] ['CapactityLevels'][0]['UpperLimit'] + ret['ServerState']['ServerTiers']['DB']['ServerPerformance'] ['CapactityLevels'][1]['UpperLimit'] + ret['ServerState']['ServerTiers']['DB']['ServerPerformance'] ['CapactityLevels'][0]['UpperLimit']) / 3) #93% # capacity = [ ( ret['ServerState']['ServerTiers']['WEB']['ServerPerformance']['CapactityLevels'][0]['UpperLimit'] + ret['ServerState']['ServerTiers']['WEB']['ServerPerformance']['CapactityLevels'][1]['UpperLimit']+ ret['ServerState']['ServerTiers']['WEB']['ServerPerformance']['CapactityLevels'][1]['UpperLimit'] ) / 3] # capacity.append(( ret['ServerState']['ServerTiers']['JAVA']['ServerPerformance']['CapactityLevels'][0]['UpperLimit'] + ret['ServerState']['ServerTiers']['JAVA']['ServerPerformance']['CapactityLevels'][1]['UpperLimit'] + ret['ServerState']['ServerTiers']['JAVA']['ServerPerformance']['CapactityLevels'][1]['UpperLimit'] ) / 3) # capacity.append(( ret['ServerState']['ServerTiers']['DB']['ServerPerformance']['CapactityLevels'][0]['UpperLimit'] + ret['ServerState']['ServerTiers']['DB']['ServerPerformance']['CapactityLevels'][1]['UpperLimit'] + ret['ServerState']['ServerTiers']['DB']['ServerPerformance']['CapactityLevels'][1]['UpperLimit']) / 3) #100% CAPACITY # capacity = [ ret['ServerState']['ServerTiers']['WEB']['ServerPerformance']['CapactityLevels'][0]['UpperLimit']] # capacity.append(ret['ServerState']['ServerTiers']['JAVA']['ServerPerformance']['CapactityLevels'][0]['UpperLimit']) # capacity.append(ret['ServerState']['ServerTiers']['DB']['ServerPerformance']['CapactityLevels'][0]['UpperLimit']) # #90% CAPACITY # capacity = [ ret['ServerState']['ServerTiers']['WEB']['ServerPerformance']['CapactityLevels'][1]['UpperLimit']] # capacity.append(ret['ServerState']['ServerTiers']['JAVA']['ServerPerformance']['CapactityLevels'][1]['UpperLimit']) # capacity.append(ret['ServerState']['ServerTiers']['DB']['ServerPerformance']['CapactityLevels'][1]['UpperLimit']) DS.setCapacity(capacity) webchanges = [0, 0, 0, 0, 0, 0, 0, 0, 0] javachanges = [0, 0, 0, 0, 0, 0, 0, 0, 0] dbchanges = [0, 0, 0, 0, 0, 0, 0, 0, 0] if turnnumber % WebRefresh == 0: webchanges = ctrl.calcWeb(DS, WebRefresh * 1.5) if turnnumber % JavaRefresh == 0: javachanges = ctrl.calcJava(DS, JavaRefresh * 1.5) if turnnumber % DBRefresh == 0: dbchanges = ctrl.calcDB(DS, DBRefresh * 1.5) #create 'changes' to know what servers to bring up/down changes = [] changes.append(webchanges[0]) changes.append(webchanges[1]) changes.append(webchanges[2]) changes.append(javachanges[3]) changes.append(javachanges[4]) changes.append(javachanges[5]) changes.append(dbchanges[6]) changes.append(dbchanges[7]) changes.append(dbchanges[8]) jsonchange = { "Servers": { "WEB": { "ServerRegions": { "AP": { "NodeCount": changes[2] }, "EU": { "NodeCount": changes[1] }, "NA": { "NodeCount": changes[0] } } }, "JAVA": { "ServerRegions": { "NA": { "NodeCount": changes[3] }, "EU": { "NodeCount": changes[4] }, "AP": { "NodeCount": changes[5] } } }, "DB": { "ServerRegions": { "NA": { "NodeCount": changes[6] }, "EU": { "NodeCount": changes[7] }, "AP": { "NodeCount": changes[8] } } } }, "UpgradeInfraStructure": infra, "UpgradeToResearch": research } if research != None: research = None value = { "Command": "CHNG", "Token": tok, "ChangeRequest": jsonchange } headers = { 'Content-type': 'application/json', 'Accept': 'application/json', } jvalue = json.dumps(value) conn = httplib.HTTPConnection(url, 80) conn.request('POST', '/api/hermes', jvalue, headers) response = conn.getresponse() ret2 = json.loads( str((response.status, response.reason, response.read())[2])) conn.close() #print Stuff print 'Turn: ' + str(ret['ServerState']['TurnNo']) print 'Total Profit: $' + str( ret["ServerState"]["ProfitAccumulated"]) # print "WEB capacity: " + str(capacity[0]) # print "JAVA capacity: " + str(capacity[1]) # print "DB capacity: " + str(capacity[2]) # print "ServerCost: " + str(ret["ServerState"]["CostPerServer"]) #print didGrid #if didGrid: try: inf = str( ret["ServerState"]["InfraStructureUpgradeState"]["Value"]) if inf >= 0: #print "INFRA value: " + inf pass except: pass try: grid = str(ret["ServerState"]["ResearchUpgradeState"]["GRID"]) if grid != "-1": print "---Researching: " + "GRID" + "---\nTurns Left: " + grid if int(grid) <= 1441 and int(grid) >= 1430: #infra = True pass else: infra = False else: print "GRID UPGRADE COMPLETE" except: pass try: green = str( ret["ServerState"]["ResearchUpgradeState"]["GREEN"]) if green != "-1": print "---Researching: " + "GREEN" + "---\nTurns Left: " + green else: print "GREEN UPGRADE COMPLETE" except: pass print demand print ' ' + str(config[0]) + ' ' + str( config[1]) + ' ' + str(config[2]) + ' ' + '\n ' + str( config[3]) + ' ' + str(config[4]) + ' ' + str( config[5]) + ' ' + '\n ' + str( config[6]) + ' ' + str( config[7]) + ' ' + str(config[8]) print '' conn.close()
import matplotlib.pyplot as plt import numpy as np from DataStore import DataStore from ModelRes import ModelRes data = DataStore() # data.plotTrain() print(data.X_train.shape) # Fixed set of resolutions available resolutions = (28, 14, 7) model28 = ModelRes(28, data.train28x28, data.y_train, data.test28x28, data.y_test) model28.train() model14 = ModelRes(14, data.train14x14, data.y_train, data.test14x14, data.y_test) model14.train() model7 = ModelRes(7, data.train7x7, data.y_train, data.test7x7, data.y_test) model7.train() # Evaluate digits using softmax probability model7x7 = model7.model model14x14 = model14.model model28x28 = model28.model found = np.zeros(len(data.y_test), dtype=np.uint8) - 1
id=1234, gender="m", linkedin="https://www.linkedin.com/in/christian-bøgelund-92747799/", study_line="Cand. Alt", university="DTU", content=LOREM ) mentees.append(mentee) mentor = Mentor( name="En helt anden mentor, der ikke er Nana", age=0, id=2345, gender="m", linkedin="https://www.linkedin.com/in/christian-bøgelund-92747799/", study_line="Cand. Alt", company="Microsoft", university="", company_type="", position="CEO", content=LOREM ) mentors.append(mentor) """ # mentees, mentors = zip(*hungarian_main()) # show_matches(mentees, mentors) ds = DataStore()
def __init__(self): """Default Constructor to initialize data/persistence layer and fetch records from csv.""" self.data_store = DataStore() self.list_records = self.data_store.get_all_records()
def re_match(): # get global list TOGGLE_CMDLINE = 1 ds = DataStore() mentors, mentees, person_dict = ds.load_data("initial_data_{}".format( global_vars.ROUND)) # get rating list ratings = choose_file_rating() solo_mentors = [] solo_mentees = [] # get list of unmatched print("wow") for mentor, mentee in zip(mentors, mentees): mentee_value = ratings.query('mentee_id == "' + str(mentee.id) + '"') mentor_value = ratings.query('mentor_id == "' + str(mentor.id) + '"') if not mentee_value.empty: mentee_index = mentee_value.index is_matched = ratings["match_rating"][mentee_index].values[0] if is_matched != 1: if mentee.id in person_dict: solo_mentees.append(mentee) else: solo_mentees.append(mentee) if not mentee_value.empty: mentor_index = mentor_value.index is_matched = ratings["match_rating"][mentor_index].values[0] if is_matched != 1: if mentor.id in person_dict: solo_mentors.append(mentor) else: solo_mentors.append(mentor) print("fsfdsf") """ for i, rating in ratings.iterrows(): if rating["match_rating"] != 1: if rating["mentee_id"] in person_dict: solo_mentees.append(person_dict[rating["mentee_id"]]) if rating["mentor_id"] in person_dict: solo_mentors.append(person_dict[rating["mentor_id"]])""" print(len(solo_mentees)) print(len(solo_mentors)) # create new Match Generator mg = MatchGenerator(mentors=solo_mentors, mentees=solo_mentees, person_dict=person_dict) mg.set_rematch(True) if TOGGLE_CMDLINE: score_function = get_score_function_from_user() if score_function is None: score_function = ScoreFunction5 else: score_function = ScoreFunction5 mg.calculate_score_object(score_function) filename = mg.run_hungarian() mg.save_hungarian_result() mg.save_matching_configuration() # should we save it in a different way? str_id = mg.get_str_id() re_ratings = ds.load_rating_file(filename, prefix="") matching_list = [] for _, row in re_ratings.iterrows(): mentor = person_dict[row["mentor_id"]] mentee = person_dict[row["mentee_id"]] score = row["score"] matching_list.append((mentor, mentee, score)) show_matches(matching_list, str_id)
def welcomeMessage(): print("") print("") print( "Welcome to Movielens. An interactive movies database built with Apache Spark." ) ############################################################################ printLicense() welcomeMessage() ############################################################################ dataStore = DataStore(ratingsFile, moviesFile) dataStore.loadData() ############################################################################ class Prompt(Cmd): def do_exit(self, args): print("Quitting.") raise SystemExit def do_quit(self, args): print("Quitting.") raise SystemExit # ----------------------------------------------------------------------
def __init__(self): value = { "Command": "INIT", "Token": "8051bf89-e115-4147-8e5a-ff9d6f39f0d7" } headers = { 'Content-type': 'application/json', 'Accept': 'application/json', } jvalue = json.dumps(value) conn = httplib.HTTPConnection('107.20.243.77', 80) conn.request('POST', '/api/hermes', jvalue, headers) response = conn.getresponse() ret = json.loads( str((response.status, response.reason, response.read())[2])) conn.close() DS = DataStore() ctrl = controller() myFile = open('output.txt', 'w') myFile.write(str(ret)) myFile.close() looptime = ret['ServerState']['ServerTiers']['DB'][ 'ServerStartTurnTime'] coef = (ret["ServerState"]["CostPerServer"] / ret["ServerState"]["ProfitConstant"]) DS.setCoef(coef) infra = False p = None research = None didGrid = False progression = [None, "GRID", "GREEN", None] #while ret['ServerState']['TurnNo'] < 10080: while True: x = 0 while x <= looptime - 1: value = { "Command": "PLAY", "Token": "8051bf89-e115-4147-8e5a-ff9d6f39f0d7" } headers = { 'Content-type': 'application/json', 'Accept': 'application/json', } jvalue = json.dumps(value) conn = httplib.HTTPConnection('107.20.243.77', 80) conn.request('POST', '/api/hermes', jvalue, headers) response = conn.getresponse() ret = json.loads( str((response.status, response.reason, response.read())[2])) #Get demand from server demand = [ ret['ServerState']['ServerTiers']['DB']['ServerRegions'] ['NA']['NoOfTransactionsInput'] ] demand.append(ret['ServerState']['ServerTiers']['DB'] ['ServerRegions']['EU']['NoOfTransactionsInput']) demand.append(ret['ServerState']['ServerTiers']['DB'] ['ServerRegions']['AP']['NoOfTransactionsInput']) config = [ ret['ServerState']['ServerTiers']['WEB']['ServerRegions'] ['NA']['NodeCount'] ] config.append(ret['ServerState']['ServerTiers']['WEB'] ['ServerRegions']['EU']['NodeCount']) config.append(ret['ServerState']['ServerTiers']['WEB'] ['ServerRegions']['AP']['NodeCount']) config.append(ret['ServerState']['ServerTiers']['JAVA'] ['ServerRegions']['NA']['NodeCount']) config.append(ret['ServerState']['ServerTiers']['JAVA'] ['ServerRegions']['EU']['NodeCount']) config.append(ret['ServerState']['ServerTiers']['JAVA'] ['ServerRegions']['AP']['NodeCount']) config.append(ret['ServerState']['ServerTiers']['DB'] ['ServerRegions']['NA']['NodeCount']) config.append(ret['ServerState']['ServerTiers']['DB'] ['ServerRegions']['EU']['NodeCount']) config.append(ret['ServerState']['ServerTiers']['DB'] ['ServerRegions']['AP']['NodeCount']) DS.avgDemand(demand) DS.setConfig(config) conn.close() x += 1 lastProfit = ret["ServerState"]["ProfitEarned"] GridCost = ret["ServerState"]["ResearchUpgradeLevels"][1][ "UpgradeCost"] GridTurns = ret["ServerState"]["ResearchUpgradeLevels"][1][ "NoOfTurnsRequired"] GridTotalCost = GridTurns * GridCost if ret['ServerState']['TurnNo'] <= 9000 and ret["ServerState"][ "ProfitAccumulated"] >= GridTotalCost / 10 and GridCost < ( lastProfit - (lastProfit / 3)): didGrid = True try: if ret["ServerState"]["ResearchUpgradeState"][ "GRID"] == -1: #research = "GREEN" pass except: research = "GRID" #p = research #Calculate free space #AVERAGE CAPACITY # capacity = [ ( ret['ServerState']['ServerTiers']['WEB']['ServerPerformance']['CapactityLevels'][0]['UpperLimit'] + ret['ServerState']['ServerTiers']['WEB']['ServerPerformance']['CapactityLevels'][1]['UpperLimit'] ) / 2] # capacity.append(( ret['ServerState']['ServerTiers']['JAVA']['ServerPerformance']['CapactityLevels'][0]['UpperLimit'] + ret['ServerState']['ServerTiers']['JAVA']['ServerPerformance']['CapactityLevels'][1]['UpperLimit'] ) / 2) # capacity.append(( ret['ServerState']['ServerTiers']['DB']['ServerPerformance']['CapactityLevels'][0]['UpperLimit'] + ret['ServerState']['ServerTiers']['DB']['ServerPerformance']['CapactityLevels'][1]['UpperLimit'] ) / 2) #97% capacity = [ (ret['ServerState']['ServerTiers']['WEB']['ServerPerformance'] ['CapactityLevels'][0]['UpperLimit'] + ret['ServerState']['ServerTiers']['WEB']['ServerPerformance'] ['CapactityLevels'][1]['UpperLimit'] + ret['ServerState']['ServerTiers']['WEB']['ServerPerformance'] ['CapactityLevels'][0]['UpperLimit']) / 3 ] capacity.append( (ret['ServerState']['ServerTiers']['JAVA']['ServerPerformance'] ['CapactityLevels'][0]['UpperLimit'] + ret['ServerState']['ServerTiers']['JAVA']['ServerPerformance'] ['CapactityLevels'][1]['UpperLimit'] + ret['ServerState']['ServerTiers']['JAVA']['ServerPerformance'] ['CapactityLevels'][0]['UpperLimit']) / 3) capacity.append( (ret['ServerState']['ServerTiers']['DB']['ServerPerformance'] ['CapactityLevels'][0]['UpperLimit'] + ret['ServerState']['ServerTiers']['DB']['ServerPerformance'] ['CapactityLevels'][1]['UpperLimit'] + ret['ServerState']['ServerTiers']['DB']['ServerPerformance'] ['CapactityLevels'][0]['UpperLimit']) / 3) #93% # capacity = [ ( ret['ServerState']['ServerTiers']['WEB']['ServerPerformance']['CapactityLevels'][0]['UpperLimit'] + ret['ServerState']['ServerTiers']['WEB']['ServerPerformance']['CapactityLevels'][1]['UpperLimit']+ ret['ServerState']['ServerTiers']['WEB']['ServerPerformance']['CapactityLevels'][1]['UpperLimit'] ) / 3] # capacity.append(( ret['ServerState']['ServerTiers']['JAVA']['ServerPerformance']['CapactityLevels'][0]['UpperLimit'] + ret['ServerState']['ServerTiers']['JAVA']['ServerPerformance']['CapactityLevels'][1]['UpperLimit'] + ret['ServerState']['ServerTiers']['JAVA']['ServerPerformance']['CapactityLevels'][1]['UpperLimit'] ) / 3) # capacity.append(( ret['ServerState']['ServerTiers']['DB']['ServerPerformance']['CapactityLevels'][0]['UpperLimit'] + ret['ServerState']['ServerTiers']['DB']['ServerPerformance']['CapactityLevels'][1]['UpperLimit'] + ret['ServerState']['ServerTiers']['DB']['ServerPerformance']['CapactityLevels'][1]['UpperLimit']) / 3) #100% CAPACITY # capacity = [ ret['ServerState']['ServerTiers']['WEB']['ServerPerformance']['CapactityLevels'][0]['UpperLimit']] # capacity.append(ret['ServerState']['ServerTiers']['JAVA']['ServerPerformance']['CapactityLevels'][0]['UpperLimit']) # capacity.append(ret['ServerState']['ServerTiers']['DB']['ServerPerformance']['CapactityLevels'][0]['UpperLimit']) # #90% CAPACITY # capacity = [ ret['ServerState']['ServerTiers']['WEB']['ServerPerformance']['CapactityLevels'][1]['UpperLimit']] # capacity.append(ret['ServerState']['ServerTiers']['JAVA']['ServerPerformance']['CapactityLevels'][1]['UpperLimit']) # capacity.append(ret['ServerState']['ServerTiers']['DB']['ServerPerformance']['CapactityLevels'][1]['UpperLimit']) DS.setCapacity(capacity) changes = ctrl.calc(DS) jsonchange = { "Servers": { "WEB": { "ServerRegions": { "AP": { "NodeCount": changes[2] }, "EU": { "NodeCount": changes[1] }, "NA": { "NodeCount": changes[0] } } }, "JAVA": { "ServerRegions": { "NA": { "NodeCount": changes[3] }, "EU": { "NodeCount": changes[4] }, "AP": { "NodeCount": changes[5] } } }, "DB": { "ServerRegions": { "NA": { "NodeCount": changes[6] }, "EU": { "NodeCount": changes[7] }, "AP": { "NodeCount": changes[8] } } } }, "UpgradeInfraStructure": infra, "UpgradeToResearch": research } if research != None: research = None value = { "Command": "CHNG", "Token": "8051bf89-e115-4147-8e5a-ff9d6f39f0d7", "ChangeRequest": jsonchange } headers = { 'Content-type': 'application/json', 'Accept': 'application/json', } jvalue = json.dumps(value) conn = httplib.HTTPConnection('107.20.243.77', 80) conn.request('POST', '/api/hermes', jvalue, headers) response = conn.getresponse() ret = json.loads( str((response.status, response.reason, response.read())[2])) # if research != None: # research = None # jsonchange = {"Servers":{ # "WEB":{"UpgradeToResearch": "Green"}}} # value = { # "Command": "CHNG", # "Token": "8051bf89-e115-4147-8e5a-ff9d6f39f0d7", # #"Token": "7440b0b0-c5a2-4ab3-bdc3-8935865bb9d1", # "ChangeRequest": jsonchange # } # headers = {'Content-type': 'application/json','Accept': 'application/json',} # jvalue = json.dumps(value) # conn = httplib.HTTPConnection('107.20.243.77', 80) # #conn = httplib.HTTPConnection('uat.hermes.wha.la', 80) # conn.request('POST', '/api/hermes', jvalue, headers) # response = conn.getresponse() # ret = json.loads(str((response.status, response.reason, response.read())[2])) #play value = { "Command": "PLAY", "Token": "8051bf89-e115-4147-8e5a-ff9d6f39f0d7" } headers = { 'Content-type': 'application/json', 'Accept': 'application/json', } jvalue = json.dumps(value) conn = httplib.HTTPConnection('107.20.243.77', 80) conn.request('POST', '/api/hermes', jvalue, headers) response = conn.getresponse() ret = json.loads( str((response.status, response.reason, response.read())[2])) #Get demand from server demand = [ ret['ServerState']['ServerTiers']['DB']['ServerRegions']['NA'] ['NoOfTransactionsInput'] ] demand.append(ret['ServerState']['ServerTiers']['DB'] ['ServerRegions']['EU']['NoOfTransactionsInput']) demand.append(ret['ServerState']['ServerTiers']['DB'] ['ServerRegions']['AP']['NoOfTransactionsInput']) config = [ ret['ServerState']['ServerTiers']['WEB']['ServerRegions']['NA'] ['NodeCount'] ] config.append(ret['ServerState']['ServerTiers']['WEB'] ['ServerRegions']['EU']['NodeCount']) config.append(ret['ServerState']['ServerTiers']['WEB'] ['ServerRegions']['AP']['NodeCount']) config.append(ret['ServerState']['ServerTiers']['JAVA'] ['ServerRegions']['NA']['NodeCount']) config.append(ret['ServerState']['ServerTiers']['JAVA'] ['ServerRegions']['EU']['NodeCount']) config.append(ret['ServerState']['ServerTiers']['JAVA'] ['ServerRegions']['AP']['NodeCount']) config.append(ret['ServerState']['ServerTiers']['DB'] ['ServerRegions']['NA']['NodeCount']) config.append(ret['ServerState']['ServerTiers']['DB'] ['ServerRegions']['EU']['NodeCount']) config.append(ret['ServerState']['ServerTiers']['DB'] ['ServerRegions']['AP']['NodeCount']) DS.resetDemand(demand) DS.setConfig(config) coef = (ret["ServerState"]["CostPerServer"] / ret["ServerState"]["ProfitConstant"]) DS.setCoef(coef) # f = open('myfile.txt','w') # f.write(str(ret)) # python will convert \n to os.linesep # f.close() conn.close() print 'Turn: ' + str(ret['ServerState']['TurnNo']) print "WEB capacity: " + str(capacity[0]) print "JAVA capacity: " + str(capacity[1]) print "DB capacity: " + str(capacity[2]) print "ServerCost: " + str(ret["ServerState"]["CostPerServer"]) #print didGrid #if didGrid: try: inf = str( ret["ServerState"]["InfraStructureUpgradeState"]["Value"]) if inf >= 0: print "INFRA value: " + inf except: pass try: grid = str(ret["ServerState"]["ResearchUpgradeState"]["GRID"]) if grid != "-1": print "---Researching: " + "GRID" + "---\nTurns Left: " + grid if int(grid) <= 1441 and int(grid) >= 1430: #infra = True pass else: infra = False else: print "GRID UPGRADE COMPLETE" except: pass try: green = str( ret["ServerState"]["ResearchUpgradeState"]["GREEN"]) if green != "-1": print "---Researching: " + "GREEN" + "---\nTurns Left: " + green else: print "GREEN UPGRADE COMPLETE" except: pass print demand print ' ' + str(config[0]) + ' ' + str( config[1]) + ' ' + str(config[2]) + ' ' + '\n ' + str( config[3]) + ' ' + str(config[4]) + ' ' + str( config[5]) + ' ' + '\n ' + str( config[6]) + ' ' + str( config[7]) + ' ' + str(config[8]) print '' conn.close()
from DataStore import DataStore # U3 ... is the unmodified column 3. Works only because 3 is in the unmodified list (see FDAXDataStore.py, search for unmodified_group) # features_list is the list of features to extract from the input # TODO: the Input for the unmodified group and where bid and ask are located should be added here also #d = DataStore(sequence_length=500, features_list=[1,2,'U3',4,5,6,7], path='./training_data_large/', filenames="/FDAX_*.csv", colgroups=[[1,4,6],[5,7]], unmodified_group=[3,4,6], bid_col=4, ask_col=6, training_days=1, testing_days=0, debug=True) d = DataStore(sequence_length=500, training_days=1, testing_days=0, debug=True)
SC_list = SeparateCollects(path_to_directory_of_ast_files, AGO.file_name[0:12]) print('Separated collect files for two collects: ', SC_list.collect_files) # Example storing frame stacks, labels, and mean object # centroids in a .hdf5 file. DataStore compresses the # .hdf5 file to reduce the required disk space. name_of_labels_csv =\ 'dataset_labels.csv' path_to_directory_of_fits_files =\ '/home/jermws/Sample Raven Images/FITS' name_of_hdf5_file_for_storage =\ 'labeled_datasets.hdf5' path_to_directory_for_storage =\ '/home/jermws/PycharmProjects/Machine_Learning_Updated/' DS = DataStore(name_of_labels_csv, path_to_directory_of_fits_files, path_to_directory_of_ast_files, name_of_hdf5_file_for_storage, path_to_directory_for_storage) # Save the list of data set names in the .hdf5 file # so they can be recalled when it comes time to read # the data back in. with open('dataset_names.csv', 'w') as f: w = csv.writer(f) for row in DS.dataset_names: w.writerow([row]) # Read in the data set names so they can be used to # label the chip stacks.
def __init__( self, config, programs=None, cohorts=None, whitelist=None, ds=None, indicators_loc=None, grades_loc=None, histograms_loc=None, ): """Object Initialization Args: config(ReportConfig): A ReportConfig object programs(string or list(string)): A list of programs to generate indicators for. Defaults to using all programs. If only one string is passed in, it will get put into a size-one list. cohorts(int or list(int)): A list of cohorts to pull data from. Defaults to using all cohorts. If only one cohort is passed in, it will get put into a size-one list. whitelist(dictionary): A dictionary of lists, keyed by the only stuff to parse (e.g. 'course', 'indicator', etc.) and filled with the specific values to uniquely parse. Defaults to no whitelist. The lists in the dictionaries can be partial (i.e. if you pass 'KB' as part of the 'indicator' whitelist, it will pull all 'KB' indicators). If the lists contain only one string, that string gets put into a size-one list. ds(DataStore): A DataStore object. Defaults to generating one based on the whitelist entries for 'programs' indicators_loc(string): The location of the indicator sheets. Defaults to searching an "Indicators" folder in the directory above the project (see ReportConfig for more info) grades_loc(string): The location of the grades sheets. Defaults to searching a "Grades" folder in the directory above the project (see ReportConfig for more info) histograms_loc(string): The location to store histograms. Defaults to using a "Histograms" folder in the directory above the project (see ReportConfig for more info) """ logging.info("Start of AutoGenerator initialization") self.config = config # logging.info("Initializing whitelist") self.whitelist = whitelist # Ensure that all whitelist entries are lists (if it exists, that is) if self.whitelist: logging.debug("Checking whitelist for list validity") for entry in self.whitelist.keys(): self.whitelist[entry] = _check_list(self.whitelist[entry]) # if type(self.whitelist[entry]) is not type(list()): # logging.debug("Whitelist entry %s is not a list. Converting to a one-size list", entry) # # Change to one-size list # self.whitelist[entry] = [self.whitelist[entry]] # logging.info("Initializing cohorts list") self.cohorts = cohorts # Ensure that all cohorts entries are lists (if it exists, that is) if self.cohorts: logging.debug("Checking cohorts variable for list validity") self.cohorts = _check_list(self.cohorts) # logging.info("Initializing program list") self.programs = programs # Ensure list validity of programs if self.programs: logging.debug("Checking programs for list validity") self.programs = _check_list(self.programs) # Use all programs if none were provided if not self.programs: logging.debug( "Programs not passed as paramater. Using list of all programs") self.programs = globals.all_programs # # Same check as whitelist - ensure that self.programs is a list # if type(self.programs) is not type(list()): # logging.debug("Programs was not passed in list format. Converting to a one-size list") # self.programs = [self.programs] # If any of the file location parameters were passed in, overwrite what ReportConfig has if indicators_loc: self.config.indicators_loc = indicators_loc if grades_loc: self.config.grades_loc = grades_loc if histograms_loc: self.config.histograms_loc = histograms_loc logging.debug("Indicators location is %s", self.config.indicators_loc) logging.debug("Grades location is %s", self.config.grades_loc) logging.debug("Histograms location is %s", self.config.histograms_loc) # Check to see if a DataStore was passed to init, create one if not if not ds: logging.debug( "No DataStore object was passed to init; creating one now") self.ds = DataStore(programs=self.programs, indicators_loc=self.config.indicators_loc, grades_loc=self.config.grades_loc) # Make sure that the histograms folder exists logging.info( "Setting up output directories (Missing Data & Histograms)") os.makedirs(os.path.dirname(__file__) + '/../Missing Data', exist_ok=True) os.makedirs(self.config.histograms_loc, exist_ok=True) logging.info("ReportGenerator initialization done!")
def __init__(self): self.data_store = DataStore() self.url_manager = UrlManager(self.data_store) self.strategy_container = ParserStrategyContainer() self.downloader = HtmlDownloader()
""" # Temp import for testing import sys import os sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)) + '/..') from Report import Report from ReportConfig import ReportConfig from DataStore import DataStore import pandas as pd import unsorted import datetime print("Loading ENCV indicators into DataStore") ds = DataStore(['ENCV']) print("Querying KB.1-D") ds.query_indicators(program='ENCV', indicator='KB.1', level='D') config = ReportConfig() ''' print("Removing the NDA threshold") config.NDA_threshold = 0.0 ''' for row in ds.last_query.iterrows(): print("Processing course", row[1]["Course #"]) indicator_data, bins = unsorted.parse_row(row[1], config.header_attribs) indicator_data['Program'] = 'ENCV'
def __init__(self): #实例化同级类 self.parser = DataParser() self.datastore = DataStore()
#: rough mean plus two sigma deviation for two tries (via bruteforcemindist.py) m2s2 = .022 #: rough median plus twosigma deviation for two tries (via bruteforcemindist.py) medtwosigma2 = 0.02 #: rough median plus twosigma deviation for four tries (via bruteforcemindist.py) medtwosigma4 = 0.0125 small_world_network = navigable_small_world_graph(networksize, 4, 2, 1, 1).to_undirected() # change the locations to be [0..1). g = small_world_network for n in g.nodes()[:]: newnode = (float(n[0]) / networksize, ) neighbors = g.neighbors(n) g.add_node(newnode, id=n[0], ds=DataStore(100000)) for neighbor in neighbors: g.add_edge(newnode, neighbor) g.remove_node(n) random_network = navigable_small_world_graph(networksize, 4, 2, 1, 1).to_undirected() randomize(random_network) clean_swap_network = random_network.copy() attacked_network = random_network.copy() sandberg_solution_network = random_network.copy() sandberg_solution_network_minus = random_network.copy() sandberg_solution_network_mean2 = random_network.copy() sandberg_solution_network_median = random_network.copy()