def __init__(self,
                 env,
                 optim=Adam,
                 policy_lr=0.001,
                 value_lr=0.001,
                 policy_hidden_size=[32],
                 value_hidden_size=[32],
                 gamma=0.9,
                 batch_size=5000,
                 epochs=50,
                 update_every=50,
                 render=False):
        self.env = env
        self.batch_size = batch_size
        self.render = render
        self.epochs = epochs
        self.gamma = gamma
        self.update_every = update_every

        obs_size = np.prod(env.observation_space.shape)
        action_size = env.action_space.n
        self.policy_mlp = MLP([obs_size] + policy_hidden_size + [action_size])
        self.policy_optim = optim(self.policy_mlp.parameters(), lr=policy_lr)
        self.value_mlp = MLP([obs_size] + value_hidden_size + [1])
        self.value_optim = optim(self.value_mlp.parameters(), lr=value_lr)
Ejemplo n.º 2
0
    def __init__(self,
                 env,
                 optim=Adam,
                 policy_lr=0.01,
                 value_lr=0.1,
                 policy_hidden_size=[32],
                 value_hidden_size=[32],
                 batch_size=5000,
                 render=False):
        self.env = env
        self.batch_size = batch_size
        self.render = render

        obs_size = np.prod(env.observation_space.shape)
        action_size = env.action_space.n
        self.policy_mlp = MLP([obs_size] + policy_hidden_size + [action_size])
        self.policy_optim = optim(self.policy_mlp.parameters(), lr=policy_lr)
        self.value_mlp = MLP([obs_size] + value_hidden_size + [1])
        self.value_optim = optim(self.value_mlp.parameters(), lr=value_lr)
Ejemplo n.º 3
0
    def __init__(self,
                 env,
                 optim=Adam,
                 lr=0.01,
                 hidden_size=[64],
                 batch_size=5000,
                 n_episodes=2000,
                 render=False):
        self.env = env
        self.batch_size = batch_size
        self.n_episodes = n_episodes
        self.lr = lr
        self.render = render

        obs_size = np.prod(env.observation_space.shape)
        action_size = env.action_space.n
        self.mlp = MLP([obs_size] + hidden_size + [action_size])
        self.optim = optim(self.mlp.parameters(), lr=lr)
Ejemplo n.º 4
0
if "NRF" in list(data):
    data.pop("NRF")
if "POSTCR" in list(data):
    data.pop("POSTCR")
if "OpID" in list(data):
    data.pop("OpID")
if "PatID" in list(data):
    data.pop("PatID")
if "DOA" in list(data):
    data.pop("DOA")

y = data.pop("HAEMOFIL")

ros = RandomOverSampler(random_state=1)

scaler = scale()
scaler.fit(data)

print("full")
param = {'layers':[2,5], 'nodes':[5,10], 'dropout':[0.4,0.8], 'epochs':[50]}
gsearch = GridSearchCV(estimator = MLP(),
                      param_grid = param, scoring='roc_auc', iid=False, cv=rkf_search, verbose=2)


gsearch.fit(scaler.transform(data.values), y.values)
clf = gsearch.best_estimator_
pd.DataFrame(gsearch.cv_results_).to_csv("output/HF/MLPfull.csv")

output = cross_validate(clf, scaler.transform(data.values), y.values, scoring=metrics,cv=rkf, verbose=2,return_train_score=True)
pd.DataFrame(output).to_csv('output/HF/performanceMLPfull.csv')