def _load_models(self): self.N = [Network("n_60.h5"), Network("n_180.h5"), Network("n_800.h5")] self.CM = tf.keras.models.load_model("cn.h5", compile=False) self.CM._make_predict_function() self.FE = FeatureEngineer(self.CM) # my message , teammates message self.MSGS = [(0, 0), (0, 0)]
class LNAgent(BaseAgent): def __init__(self, a_id, n_id, LN): super(LNAgent, self).__init__(characters.Bomber) self.feature_engineer = FeatureEngineer() self.n_id = n_id self.LN_controller = get_controller(self.n_id, LN) self.a_id = a_id self._steps = 0 self.tim = 0 def act(self, observation, action_space): tim = time.time() self.feature_engineer.update_features(observation) features = self.feature_engineer.get_features() if self.a_id == 1: y = self.LN_controller.get_prediction_agent1(features) if self.a_id == 2: y = self.LN_controller.get_prediction_agent2(features) self.tim += time.time() - tim self._steps += 1 return int(min(y, 5)) def episode_end(self, reward): if self.agent_id == 1: self.LN_controller.reset_state() self.feature_engineer = FeatureEngineer() def shutdown(self): print("avg decision time: " + str(self.tim / self._steps)) delete_controller(self.n_id)
def next_episode(self): self.feature_engineers = [ FeatureEngineer(), FeatureEngineer(), FeatureEngineer(), FeatureEngineer() ]
def __init__(self, a_id, n_id, LN): super(LNAgent, self).__init__(characters.Bomber) self.feature_engineer = FeatureEngineer() self.n_id = n_id self.LN_controller = get_controller(self.n_id, LN) self.a_id = a_id self._steps = 0 self.tim = 0
def __init__(self, size, name="db"): self.name = name self.size = size self.step_index = 0 self.x = np.zeros((size, 11, 11, N_FEATURES), dtype="float32") self.y = np.zeros((size, ), dtype="float32") self.feature_engineers = [ FeatureEngineer(), FeatureEngineer(), FeatureEngineer(), FeatureEngineer() ]
def main(): """Simple function to bootstrap a game""" # Print all possible environments in the Pommerman registry print(pommerman.REGISTRY) # video_maker = VideoMaker() # Create a set of agents (exactly four) agent_list = [ # agents.SimpleAgent(), # DockerAgent("multiagentlearning/navocado", port=80), # agents.SimpleAgent(), # agents.SimpleAgent(), # DockerAgent("multiagentlearning/hakozakijunctions", 80) TestAgent(), TestAgent(), TestAgent(), TestAgent(), # agents.RandomAgent(), # agents.RandomAgent(), # agents.PlayerAgent(agent_control="arrows") # DockerAgent("multiagentlearning/navocado", port=81), ] # env = pommerman.make('PommeRadioCompetition-v2', agent_list) env = pommerman.make('PommeRadioCompetition-v2', agent_list) # Run the episodes just like OpenAI Gym i = 0 tim = time.time() for i_episode in range(1): state = env.reset() done = False feature_engineer = FeatureEngineer() while not done: i += 1 env.render() actions = env.act(state) # print(state[3]["bomb_life"]) state, reward, done, info = env.step(actions) # print(done) xd = feature_engineer.get_features(state[3]) # print(xd[0, :, :, 9]) # if i == 300: # break # print(info) # break print('Episode {} finished'.format(i_episode)) env.close() print(time.time() - tim)
def create_feature_label_mat(self): ''' INPUT: None OUTPUT: None Creates a feature-matrix DataFrame, and deals with missing values. ''' self._limit_dates() ''' Engineers features''' for df_name, df in self.feature_dfs.items(): if self.advanced_call_sms_bt_features: df_for_adv = df.copy() if df_name == 'df_BluetoothProximity': df_for_adv = df_for_adv[pd.notnull(df_for_adv['participantID.B'])] if self.basic_features: fe = FeatureEngineer(df, df_name) self.feature_dfs_forflmat[df_name] = fe.engineer() if self.advanced_call_sms_bt_features: # Available for CallLog, SMSLog, BluetoothProximity if (df_name == 'df_CallLog' or df_name == 'df_SMSLog' or df_name == 'df_BluetoothProximity'): if self.add_centrality_chars and df_name == 'df_BluetoothProximity': fe = FeatureEngineer(df_for_adv, df_name, advanced=True, add_centrality_chars=True) else: fe = FeatureEngineer(df_for_adv, df_name, advanced=True) df_newname = df_name + '_advanced' self.feature_dfs_forflmat[df_newname] = fe.engineer().drop(['index', 'cnt'], axis=1) print "ModelTester: Engineered basic and/or advanced for " + df_name + "\n" ''' Merges features and labels into one DataFrame''' for feature_df in self.feature_dfs_forflmat.itervalues(): self.df_labels = self.df_labels.merge(feature_df, how='left', on=['participantID', 'date']) self.feature_label_mat = self.df_labels self.feature_label_mat = self.feature_label_mat[pd.notnull(self.feature_label_mat['participantID'])] self._fill_na() if list(self.feature_label_mat.columns).count('cnt nan') > 0: #Drops 'cnt nan' column if it exists self.feature_label_mat.drop('cnt nan', axis=1, inplace=True) if self.create_demedianed: self._create_demedianed_cols() self.feature_label_mat.fillna(0, inplace=True) ''' Adds a dummy 'weekend', 1 for Sat/Sun (and Fri if Fri_weekend=True), 0 otherwise ''' self._add_weekend_col() if list(self.feature_label_mat.columns).count('index') > 0: #Drops 'index' column if it exists self.feature_label_mat.drop('index', axis=1, inplace=True)
def create_feature_label_mat(self): ''' INPUT: None OUTPUT: None Creates a feature-matrix DataFrame, and deals with missing values. ''' self._limit_dates() ''' Engineers features''' for df_name, df in self.feature_dfs.items(): if self.advanced_call_sms_bt_features: df_for_adv = df.copy() if df_name == 'df_BluetoothProximity': df_for_adv = df_for_adv[pd.notnull( df_for_adv['participantID.B'])] if self.basic_features: fe = FeatureEngineer(df, df_name) self.feature_dfs_forflmat[df_name] = fe.engineer() if self.advanced_call_sms_bt_features: # Available for CallLog, SMSLog, BluetoothProximity if (df_name == 'df_CallLog' or df_name == 'df_SMSLog' or df_name == 'df_BluetoothProximity'): if self.add_centrality_chars and df_name == 'df_BluetoothProximity': fe = FeatureEngineer(df_for_adv, df_name, advanced=True, add_centrality_chars=True) else: fe = FeatureEngineer(df_for_adv, df_name, advanced=True) df_newname = df_name + '_advanced' self.feature_dfs_forflmat[df_newname] = fe.engineer().drop( ['index', 'cnt'], axis=1) print "ModelTester: Engineered basic and/or advanced for " + df_name + "\n" ''' Merges features and labels into one DataFrame''' for feature_df in self.feature_dfs_forflmat.itervalues(): self.df_labels = self.df_labels.merge(feature_df, how='left', on=['participantID', 'date']) self.feature_label_mat = self.df_labels self.feature_label_mat = self.feature_label_mat[pd.notnull( self.feature_label_mat['participantID'])] self._fill_na() if list(self.feature_label_mat.columns).count( 'cnt nan') > 0: #Drops 'cnt nan' column if it exists self.feature_label_mat.drop('cnt nan', axis=1, inplace=True) if self.create_demedianed: self._create_demedianed_cols() self.feature_label_mat.fillna(0, inplace=True) ''' Adds a dummy 'weekend', 1 for Sat/Sun (and Fri if Fri_weekend=True), 0 otherwise ''' self._add_weekend_col() if list(self.feature_label_mat.columns).count( 'index') > 0: #Drops 'index' column if it exists self.feature_label_mat.drop('index', axis=1, inplace=True)
class NetworkAgent(BaseAgent): def __init__(self, network): super(NetworkAgent, self).__init__(characters.Bomber) self.feature_engineer = FeatureEngineer() self.network = network def act(self, observation, action_space): features = self.feature_engineer.get_features(observation) action, _ = self.network.predict(features) return action def episode_end(self, reward): self.feature_engineer = FeatureEngineer()
class NetworkAgent(BaseAgent): def __init__(self, a_id, T): super(NetworkAgent, self).__init__(characters.Bomber) self.feature_engineer = FeatureEngineer() self.a_id = a_id self.T = T def act(self, observation, action_space): features = self.feature_engineer.get_features(observation) action = self.T.training_step(features, self.a_id, observation["position"]) return action, 0, 0 def episode_end(self, reward): self.feature_engineer = FeatureEngineer()
class Agent: def _load_models(self): self.N = [Network("n_60.h5"), Network("n_180.h5"), Network("n_800.h5")] self.CM = tf.keras.models.load_model("cn.h5", compile=False) self.CM._make_predict_function() self.FE = FeatureEngineer(self.CM) # my message , teammates message self.MSGS = [(0, 0), (0, 0)] def __init__(self): self.t = Thread(target=self._load_models) self.t.start() def init_agent(self, id_, game_type): if self.t.isAlive(): time.sleep(0.1) def act(self, observation, action_space): if self.t.isAlive(): time.sleep(0.05) return 0, 0, 0 tim = time.time() self.MSGS[1] = observation["message"] if len( observation["message"]) > 1 else observation["message"][0] features = self.FE.get_features(observation, self.MSGS) n = 0 if observation[ "step_count"] < 60 else 1 if observation["step_count"] < 180 else 2 a, m = self.N[n].predict(features, observation["position"]) self.MSGS[0] = m return int(a), int(m[0]), int(m[1]) def episode_end(self, reward): self.FE = FeatureEngineer(self.CM) def shutdown(self): pass
class TestAgent(BaseAgent): feature_engineer = FeatureEngineer() def act(self, observation, action_space): # xd = self.feature_engineer.get_features(observation) # print(xd[0, :, :, 9]) print(observation["step_count"]) # print(xd[0, :, :, 0]) # print(".........") # print(xd[0, :, :, 11]) # print(observation["board"]) # print("...........................................................") # print(observation["flame_life"]) # print("...........................................................") # self.feature_engineer.update_features(observation) # print(sys.getsizeof(observation)) # self.FeatureEngineer.make_features(observation) # time.sleep(0.5) # print(observation["teammate"].value) # print(observation) return 0 #random.randint(0, 4), random.randint(0, 4), random.randint(0, 4) def episode_end(self, reward): print(reward)
def __init__(self, network): super(NetworkAgent, self).__init__(characters.Bomber) self.feature_engineer = FeatureEngineer() self.network = network
def episode_end(self, reward): if self.agent_id == 1: self.LN_controller.reset_state() self.feature_engineer = FeatureEngineer()
def episode_end(self, reward): self.feature_engineer = FeatureEngineer()
def episode_end(self, reward): self.FE = FeatureEngineer(self.CM)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--load_path', default='./data'.format( os.path.dirname(os.path.abspath(__file__)))) parser.add_argument('--save_path', default='./output/dataset/'.format( os.path.dirname(os.path.abspath(__file__)))) parser.add_argument('--log_path', default='./'.format( os.path.dirname(os.path.abspath(__file__)))) # Arguments args = parser.parse_args() load_path = os.path.normpath(args.load_path) save_path = os.path.normpath(args.save_path) log_path = os.path.normpath(args.log_path) # Set up logging logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %I:%M:%S %p', filename=os.path.join(log_path, 'logs_pc_methods_feat_eng.log'), filemode='w', level=logging.INFO) # READ FILES IN SUB-FOLDERS of load_path and FEATURE ENGINEERING # list load_path sub-folders regex = re.compile(r'^[0-9]') directory_list = [i for i in os.listdir(load_path) if regex.search(i)] # initialize empty array for features X = np.empty([1, 18]) # initialise empty array for labels y = [] logging.info('Creating training set...') start = timeit.default_timer() # iteration on sub-folders for directory in directory_list: # Instantiate FeatureEngineer feature_engineer = FeatureEngineer(label=directory) file_list = os.listdir(os.path.join(load_path, directory)) # iteration on audio files in each sub-folder for audio_file in file_list: file_reader = Reader(os.path.join(load_path, directory, audio_file)) data, sample_rate = file_reader.read_audio_file() avg_features, label = feature_engineer.feature_engineer( audio_data=data) X = np.concatenate((X, avg_features), axis=0) y.append(label) # X.shape is (401, 18) as I'm not using indexing. First line is made of zeros and is to be removed X = X[1:, :] stop = timeit.default_timer() logging.info( 'Time taken for reading files and feature engineering: {0}'.format( stop - start)) # Save to numpy binary format logging.info('Saving training set...') np.save(os.path.join(save_path, 'dataset.npy'), X) np.save(os.path.join(save_path, 'labels.npy'), y) logging.info('Saved! {0}'.format(os.path.join(save_path, 'dataset.npy'))) logging.info('Saved! {0}'.format(os.path.join(save_path, 'labels.npy')))
import torch import torch.nn as nn import numpy as np import pandas as pd import matplotlib.pyplot as plt import time from tabular_model import TabularModel from feature_engineer import FeatureEngineer df = pd.read_csv('~/MachineLearning/UdemyData/NYCTaxiFares.csv') feature_engineer = FeatureEngineer(df) feature_engineer.convert_coordinates_to_distance_travelled() feature_engineer.convert_datetime_to_useful_time_info() # divide into categorisation and continuous data cat_cols = ['Hour', 'AMorPM', 'Weekday'] cont_cols = [ 'pickup_latitude', 'pickup_longitude', 'dropoff_latitude', 'dropoff_longitude', 'passenger_count', 'dist_km' ] y_col = ['fare_amount'] for cat in cat_cols: df[cat] = df[cat].astype('category') cats = np.stack([df[col].cat.codes.values for col in cat_cols], 1) cats = torch.tensor(cats, dtype=torch.int64) conts = np.stack([df[col].values for col in cont_cols], 1) conts = torch.tensor(conts, dtype=torch.float)
def __init__(self, a_id, T): super(NetworkAgent, self).__init__(characters.Bomber) self.feature_engineer = FeatureEngineer() self.a_id = a_id self.T = T
def main(): parser = argparse.ArgumentParser() parser.add_argument('--load_path_data', default=os.path.dirname(os.path.abspath(__file__))) parser.add_argument('--load_path_model', default='{}/../../../output/model/'.format( os.path.dirname(os.path.abspath(__file__)))) parser.add_argument('--save_path', default='{}/../../../output/prediction/'.format( os.path.dirname(os.path.abspath(__file__)))) parser.add_argument('--file_name', default='{}/../../..//files/temp.mp3'.format( os.path.dirname(os.path.abspath(__file__)))) parser.add_argument('--log_path', default='{}/../../'.format( os.path.dirname(os.path.abspath(__file__)))) # Arguments args = parser.parse_args() load_path_data = os.path.normpath(args.load_path_data) load_path_model = os.path.normpath(args.load_path_model) file_name = args.file_name save_path = os.path.normpath(args.save_path) log_path = os.path.normpath(args.log_path) # Set up logging logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %I:%M:%S %p', filename=os.path.join( log_path, 'logs_prediction_test_test_model.log'), filemode='w', level=logging.INFO) # READ RAW SIGNAL logging.info('Reading {0}'.format(file_name)) start = timeit.default_timer() # Read signal (first 5 sec) file_reader = Reader(os.path.join(load_path_data, file_name)) play_list = file_reader.read_audio_file() stop = timeit.default_timer() logging.info('Time taken for reading file: {0}'.format(stop - start)) # FEATURE ENGINEERING logging.info('Starting feature engineering') start = timeit.default_timer() # Feature extraction engineer = FeatureEngineer() play_list_processed = list() for signal in play_list: tmp = engineer.feature_engineer(signal) play_list_processed.append(tmp) stop = timeit.default_timer() logging.info('Time taken for feature engineering: {0}'.format(stop - start)) # MAKE PREDICTION logging.info('Predicting...') start = timeit.default_timer() # https://stackoverflow.com/questions/41146759/check-sklearn-version-before-loading-model-using-joblib with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UserWarning) with open((os.path.join(load_path_model, 'model.pkl')), 'rb') as fp: model = pickle.load(fp) predictor = BabyCryPredictor(model) predictions = list() for signal in play_list_processed: tmp = predictor.classify(signal) predictions.append(tmp) # MAJORITY VOTE majority_voter = MajorityVoter(predictions) majority_vote = majority_voter.vote() stop = timeit.default_timer() logging.info( 'Time taken for prediction: {0}. Is it a baby cry?? {1}'.format( stop - start, majority_vote)) # SAVE if majority_vote == 1: majority_vote = "Baby is Tired" elif majority_vote == 2: majority_vote = "Baby is Crying Because Need Sleep" elif majority_vote == 3: majority_vote = "Baby is Crying Because Hungry" elif majority_vote == 0: majority_vote = "Baby is Silent Now" logging.info('Saving prediction...') # Save prediction result with open(os.path.join(save_path, 'prediction.txt'), 'w') as text_file: text_file.write("{}".format(majority_vote)) logging.info('Saved! {}'.format(os.path.join(save_path, 'prediction.txt')))