def test_positional_args(): opt0 = Cypher(X, y, random_state=False) opt0.search(search_config) opt1 = Cypher(X, y, random_state=1) opt1.search(search_config) opt2 = Cypher(X, y, random_state=1) opt2.search(search_config)
def test_random_state(): opt0 = Cypher(X, y, random_state=False) opt0.search(search_config) opt1 = Cypher(X, y, random_state=0) opt1.search(search_config) opt2 = Cypher(X, y, random_state=1) opt2.search(search_config)
def getKeys(self): if (len(sys.argv) > 2): cypher = Cypher(sys.argv[1], sys.argv[2]) if (len(sys.argv) > 3): self.pubKey = sys.argv[3] return chyper #generate keys enc = Encryption(79, 83) print("priv:" + str(enc.private)) print("pub:" + str(enc.pub)) print("mod:" + str(enc.n)) cypher = Cypher(enc.private, enc.n) self.pubKey = enc.pub return cypher
def test_verbosity(): opt0 = Cypher(X, y, verbosity=0) opt0.search(search_config) opt0 = Cypher(X, y, verbosity=0) opt0.search(search_config, n_jobs=2) opt1 = Cypher(X, y, verbosity=1) opt1.search(search_config) opt0 = Cypher(X, y, verbosity=1) opt0.search(search_config) opt1 = Cypher(X, y, verbosity=2) opt1.search(search_config, n_jobs=2)
def meta_opt(para, X, y): def model(para, X, y): model = DecisionTreeClassifier( max_depth=para["max_depth"], min_samples_split=para["min_samples_split"], min_samples_leaf=para["min_samples_leaf"], ) scores = cross_val_score(model, X, y, cv=3) return scores.mean() search_config = { model: { "max_depth": range(2, 50), "min_samples_split": range(2, 50), "min_samples_leaf": range(1, 50), } } opt = Cypher( search_config, optimizer={ "ParticleSwarm": { "inertia": para["inertia"], "cognitive_weight": para["cognitive_weight"], "social_weight": para["social_weight"], } }, verbosity=None, ) opt.search(X, y) return opt.score_best
def test_sklearn(): from sklearn.tree import DecisionTreeClassifier def model(para, X_train, y_train): model = DecisionTreeClassifier( criterion=para["criterion"], max_depth=para["max_depth"], min_samples_split=para["min_samples_split"], min_samples_leaf=para["min_samples_leaf"], ) scores = cross_val_score(model, X_train, y_train, cv=3) return scores.mean() search_config = { model: { "criterion": ["gini", "entropy"], "max_depth": range(1, 21), "min_samples_split": range(2, 21), "min_samples_leaf": range(1, 21), } } opt = Cypher(X, y) opt.search(search_config)
def meta_opt(para, X_list, y_list): scores = [] for X, y in zip(X_list, y_list): X_list, y_list = data_aug(X, y, sample_multi=3, feature_multi=3) for X, y in zip(X_list, y_list): for n_iter in [10, 25, 50, 100]: opt = Cypher( search_config_model, optimizer={ "ParticleSwarm": { "inertia": para["inertia"], "cognitive_weight": para["cognitive_weight"], "social_weight": para["social_weight"], } }, n_iter=n_iter, verbosity=None, ) opt.search(X, y) score = opt.score_best scores.append(score) return np.array(scores).mean()
def auth_access(self, key): try: stored_pass = self.file[PASS_FIELD] token = self.file[TOKEN_FIELD] if stored_pass == self.__hash(key, token): self.key = key self.cypher = Cypher(self.key) return True else: raise PassManagerException('Wrong Key') except KeyError: self.key = key self.cypher = Cypher(self.key) token = self.__generate_key() self.file[TOKEN_FIELD] = token self.file[PASS_FIELD] = self.__hash(self.key, token) return True
def test_TabuOptimizer(): opt0 = Cypher( search_config, optimizer="TabuSearch", n_iter=n_iter_min, random_state=random_state, warm_start=warm_start, ) opt0.search(X, y) opt1 = Cypher( search_config, optimizer="TabuSearch", n_iter=n_iter_max, random_state=random_state, warm_start=warm_start, ) opt1.search(X, y) assert opt0._optimizer_.score_best < opt1._optimizer_.score_best
def test_StochasticHillClimbing(): opt0 = Cypher( search_config, optimizer="StochasticHillClimbing", n_iter=n_iter_min, random_state=random_state, warm_start=warm_start, ) opt0.search(X, y) opt1 = Cypher( search_config, optimizer="StochasticHillClimbing", n_iter=n_iter_max, random_state=random_state, warm_start=warm_start, ) opt1.search(X, y) assert opt0._optimizer_.score_best < opt1._optimizer_.score_best
def test_EvolutionStrategy(): opt0 = Cypher( search_config, optimizer="EvolutionStrategy", n_iter=n_iter_min, random_state=random_state, warm_start=warm_start, ) opt0.search(X, y) opt1 = Cypher( search_config, optimizer="EvolutionStrategy", n_iter=n_iter_max, random_state=random_state, warm_start=warm_start, ) opt1.search(X, y) assert opt0._optimizer_.score_best < opt1._optimizer_.score_best
def test_SimulatedAnnealing(): opt0 = Cypher( search_config, optimizer="SimulatedAnnealing", n_iter=n_iter_min, random_state=random_state, warm_start=warm_start, ) opt0.search(X, y) opt1 = Cypher( search_config, optimizer="SimulatedAnnealing", n_iter=n_iter_max, random_state=random_state, warm_start=warm_start, ) opt1.search(X, y) assert opt0._optimizer_.score_best < opt1._optimizer_.score_best
def test_ParallelTempering(): opt0 = Cypher( search_config, optimizer="ParallelTempering", n_iter=n_iter_min, random_state=random_state, warm_start=warm_start, ) opt0.search(X, y) opt1 = Cypher( search_config, optimizer="ParallelTempering", n_iter=n_iter_max, random_state=random_state, warm_start=warm_start, ) opt1.search(X, y) assert opt0._optimizer_.score_best < opt1._optimizer_.score_best
def connect(self): self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.s.connect((self.host, self.port)) if not self.isKeySet: self.s.sendall('need key'.encode()) data = self.s.recv(1024).decode() keys = data.split(',') self.cypher = Cypher(keys[0], keys[1]) self.isKeySet = True print(keys) self.startConversation() self.s.close()
def test_keras(): from keras.models import Sequential from keras.layers import Dense, Conv2D, MaxPooling2D, Flatten from keras.datasets import cifar10 from keras.utils import to_categorical (X_train, y_train), (X_test, y_test) = cifar10.load_data() X_train = X_train[0:1000] y_train = y_train[0:1000] X_test = X_train[0:1000] y_test = y_train[0:1000] y_train = to_categorical(y_train, 10) y_test = to_categorical(y_test, 10) def cnn(para, X_train, y_train): model = Sequential() model.add( Conv2D( filters=para["filters.0"], kernel_size=para["kernel_size.0"], activation="relu", )) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(10, activation="softmax")) model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]) model.fit(X_train, y_train, epochs=1) _, score = model.evaluate(x=X_test, y=y_test) return score search_config = {cnn: {"filters.0": [32, 64], "kernel_size.0": [3, 4]}} opt = Cypher(X_train, y_train) opt.search(search_config)
def test_lightgbm(): from lightgbm import LGBMClassifier def model(para, X_train, y_train): model = LGBMClassifier(num_leaves=para["num_leaves"], learning_rate=para["learning_rate"]) scores = cross_val_score(model, X_train, y_train, cv=3) return scores.mean() search_config = { model: { "num_leaves": range(2, 20), "learning_rate": [0.001, 0.005, 00.01, 0.05, 0.1, 0.5, 1], } } opt = Cypher(X, y) opt.search(search_config)
def test_xgboost(): from xgboost import XGBClassifier def model(para, X_train, y_train): model = XGBClassifier(n_estimators=para["n_estimators"], max_depth=para["max_depth"]) scores = cross_val_score(model, X_train, y_train, cv=3) return scores.mean() search_config = { model: { "n_estimators": range(2, 20), "max_depth": range(1, 11) } } opt = Cypher(X, y) opt.search(search_config)
def test_catboost(): from catboost import CatBoostClassifier def model(para, X_train, y_train): model = CatBoostClassifier( iterations=para["iterations"], depth=para["depth"], learning_rate=para["learning_rate"], ) scores = cross_val_score(model, X_train, y_train, cv=3) return scores.mean() search_config = { model: { "iterations": [1], "depth": range(2, 10), "learning_rate": [0.001, 0.005, 00.01, 0.05, 0.1, 0.5, 1], } } opt = Cypher(X, y) opt.search(search_config)
def test_func_return(): def model1(para, X, y): model = DecisionTreeClassifier( criterion=para["criterion"], max_depth=para["max_depth"], min_samples_split=para["min_samples_split"], min_samples_leaf=para["min_samples_leaf"], ) scores = cross_val_score(model, X, y, cv=3) return scores.mean(), model search_config1 = { model1: { "criterion": ["gini", "entropy"], "max_depth": range(1, 21), "min_samples_split": range(2, 21), "min_samples_leaf": range(1, 21), } } opt = Cypher(X, y) opt.search(search_config1)
import ray data = load_breast_cancer() X, y = data.data, data.target def gbc_(para, X, y): model = GradientBoostingClassifier( n_estimators=para["n_estimators"], max_depth=para["max_depth"], min_samples_split=para["min_samples_split"], ) scores = cross_val_score(model, X, y) return scores.mean() search_config = { gbc_: { "n_estimators": range(1, 20, 1), "max_depth": range(2, 12), "min_samples_split": range(2, 12), } } ray.init(num_cpus=4) opt = Cypher(X, y) opt.search(search_config, n_jobs=4)
import numpy as np from cypher import Cypher def himmelblau(para, X, y): """Himmelblau's function""" return -((para["x"]**2 + para["y"] - 11)**2 + (para["x"] + para["y"]**2 - 7)**2) x_range = np.arange(0, 10, 0.1) search_config = {himmelblau: {"x": x_range, "y": x_range}} opt = Cypher(search_config, n_iter=1000000) opt.search(0, 0)
def collect_data(runs, X, y, opt_list, search_config, n_iter, opt_dict): time_c = time.time() data_runs_1 = [] data_runs_2 = [] for run in tqdm.tqdm(range(runs)): print("\nRun nr.", run, "\n") total_time_list = [] eval_time_list = [] for key in opt_list: print("optimizer:", key) n_iter_temp = n_iter opt_dict_temp = opt_dict if key == "ParallelTempering": n_iter_temp = int(n_iter / 10) if key == "ParticleSwarm": n_iter_temp = int(n_iter / 10) if key == "EvolutionStrategy": n_iter_temp = int(n_iter / 10) opt_obj = Cypher(search_config, optimizer=key, n_iter=n_iter_temp, **opt_dict_temp) opt_obj.search(X, y) total_time = opt_obj.get_total_time() eval_time = opt_obj.get_eval_time() total_time_list.append(total_time) eval_time_list.append(eval_time) total_time_list = np.array(total_time_list) eval_time_list = np.array(eval_time_list) data_runs_1.append(total_time_list) data_runs_2.append(eval_time_list) data_runs_1 = np.array(data_runs_1) data_runs_2 = np.array(data_runs_2) print("\nCreate Dataframe\n") print("data_runs_1", data_runs_1, data_runs_1.shape) data = pd.DataFrame(data_runs_1, columns=opt_list) model_name = list(search_config.keys())[0] calc_optimizer_time_name = "total_time_" + model_name.__name__ file_name = str(calc_optimizer_time_name) data.to_csv(file_name, index=False) data = pd.DataFrame(data_runs_2, columns=opt_list) calc_optimizer_time_name = "eval_time_" + model_name.__name__ file_name = str(calc_optimizer_time_name) data.to_csv(file_name, index=False) print("data collecting time:", time.time() - time_c)
search_config = { model0: { "n_estimators": range(10, 200, 10), "criterion": ["gini", "entropy"], "max_features": np.arange(0.05, 1.01, 0.05), "min_samples_split": range(2, 21), "min_samples_leaf": range(1, 21), "bootstrap": [True, False], }, model1: { "n_estimators": range(10, 200, 10), "criterion": ["gini", "entropy"], "max_features": np.arange(0.05, 1.01, 0.05), "min_samples_split": range(2, 21), "min_samples_leaf": range(1, 21), "bootstrap": [True, False], }, model2: { "n_estimators": range(10, 200, 10), "learning_rate": [1e-3, 1e-2, 1e-1, 0.5, 1.0], "max_depth": range(1, 11), "min_samples_split": range(2, 21), "min_samples_leaf": range(1, 21), "subsample": np.arange(0.05, 1.01, 0.05), "max_features": np.arange(0.05, 1.01, 0.05), }, } opt = Cypher(search_config, n_iter=30, n_jobs=4) opt.search(X, y)
from sklearn.model_selection import cross_val_score from lightgbm import LGBMRegressor from sklearn.datasets import load_breast_cancer from cypher import Cypher data = load_breast_cancer() X, y = data.data, data.target def model(para, X, y): model = LGBMRegressor( num_leaves=para["num_leaves"], bagging_freq=para["bagging_freq"], learning_rate=para["learning_rate"], ) scores = cross_val_score(model, X, y, cv=3) return scores.mean() search_config = { model: { "num_leaves": range(2, 20), "bagging_freq": range(2, 12), "learning_rate": [1e-3, 1e-2, 1e-1, 0.5, 1.0], } } opt = Cypher(search_config, n_iter=30) opt.search(X, y)
from cypher import Cypher from token import CONFIG token = CONFIG['token'] get_url = 'https://api.codenation.dev/v1/challenge/dev-ps/generate-data?token={}'.format(token) post_url = 'https://api.codenation.dev/v1/challenge/dev-ps/submit-solution?token={}'.format(token) filename = 'answer.json' c = Cypher(get_url, post_url, filename) print(c.decrypt()) c.update()
from cypher import Cypher from encryption import Encryption print('---enc---') enc = Encryption(79, 83) print("Pub:" + str(enc.pub)) print("Priv:" + str(enc.private)) print("mod:" + str(enc.n)) print("phiN:" + str(enc.phiN)) print('---text---') text = 'ik ben joeri' #message c = Cypher(enc.pub, enc.n) c2 = Cypher(enc.private, enc.n) crypt = c.chyperString(text) print("plain:" + str(text)) print("encrypt:") print(crypt) print("decrypt:" + str(c2.deChyperList(crypt.split(','))))
def test_ParallelTemperingOptimizer(): opt = Cypher(X, y) opt.search(search_config, n_iter=n_iter, optimizer="ParallelTempering")
def test_BayesianOptimizer(): opt = Cypher(X, y) opt.search(search_config, n_iter=n_iter, optimizer="Bayesian")
def test_EvolutionStrategyOptimizer(): opt = Cypher(X, y) opt.search(search_config, n_iter=n_iter, optimizer="EvolutionStrategy")
def test_ParticleSwarmOptimizer(): opt = Cypher(X, y) opt.search(search_config, n_iter=n_iter, optimizer="ParticleSwarm")