Пример #1
0
def meta_opt(para, X_list, y_list):
    scores = []

    for X, y in zip(X_list, y_list):
        X_list, y_list = data_aug(X, y, sample_multi=3, feature_multi=3)

        for X, y in zip(X_list, y_list):

            for n_iter in [10, 25, 50, 100]:
                opt = Cypher(
                    search_config_model,
                    optimizer={
                        "ParticleSwarm": {
                            "inertia": para["inertia"],
                            "cognitive_weight": para["cognitive_weight"],
                            "social_weight": para["social_weight"],
                        }
                    },
                    n_iter=n_iter,
                    verbosity=None,
                )
                opt.search(X, y)
                score = opt.score_best
                scores.append(score)

    return np.array(scores).mean()
Пример #2
0
def meta_opt(para, X, y):
    def model(para, X, y):
        model = DecisionTreeClassifier(
            max_depth=para["max_depth"],
            min_samples_split=para["min_samples_split"],
            min_samples_leaf=para["min_samples_leaf"],
        )
        scores = cross_val_score(model, X, y, cv=3)

        return scores.mean()

    search_config = {
        model: {
            "max_depth": range(2, 50),
            "min_samples_split": range(2, 50),
            "min_samples_leaf": range(1, 50),
        }
    }

    opt = Cypher(
        search_config,
        optimizer={
            "ParticleSwarm": {
                "inertia": para["inertia"],
                "cognitive_weight": para["cognitive_weight"],
                "social_weight": para["social_weight"],
            }
        },
        verbosity=None,
    )
    opt.search(X, y)

    return opt.score_best
Пример #3
0
def test_sklearn():
    from sklearn.tree import DecisionTreeClassifier

    def model(para, X_train, y_train):
        model = DecisionTreeClassifier(
            criterion=para["criterion"],
            max_depth=para["max_depth"],
            min_samples_split=para["min_samples_split"],
            min_samples_leaf=para["min_samples_leaf"],
        )
        scores = cross_val_score(model, X_train, y_train, cv=3)

        return scores.mean()

    search_config = {
        model: {
            "criterion": ["gini", "entropy"],
            "max_depth": range(1, 21),
            "min_samples_split": range(2, 21),
            "min_samples_leaf": range(1, 21),
        }
    }

    opt = Cypher(X, y)
    opt.search(search_config)
Пример #4
0
 def getKeys(self):
     if (len(sys.argv) > 2):
         cypher = Cypher(sys.argv[1], sys.argv[2])
         if (len(sys.argv) > 3):
             self.pubKey = sys.argv[3]
         return chyper
     #generate keys
     enc = Encryption(79, 83)
     print("priv:" + str(enc.private))
     print("pub:" + str(enc.pub))
     print("mod:" + str(enc.n))
     cypher = Cypher(enc.private, enc.n)
     self.pubKey = enc.pub
     return cypher
Пример #5
0
 def auth_access(self, key):
     try:
         stored_pass = self.file[PASS_FIELD]
         token = self.file[TOKEN_FIELD]
         if stored_pass == self.__hash(key, token):
             self.key = key
             self.cypher = Cypher(self.key)
             return True
         else:
             raise PassManagerException('Wrong Key')
     except KeyError:
         self.key = key
         self.cypher = Cypher(self.key)
         token = self.__generate_key()
         self.file[TOKEN_FIELD] = token
         self.file[PASS_FIELD] = self.__hash(self.key, token)
         return True
Пример #6
0
def test_keras():
    from keras.models import Sequential
    from keras.layers import Dense, Conv2D, MaxPooling2D, Flatten
    from keras.datasets import cifar10
    from keras.utils import to_categorical

    (X_train, y_train), (X_test, y_test) = cifar10.load_data()

    X_train = X_train[0:1000]
    y_train = y_train[0:1000]

    X_test = X_train[0:1000]
    y_test = y_train[0:1000]

    y_train = to_categorical(y_train, 10)
    y_test = to_categorical(y_test, 10)

    def cnn(para, X_train, y_train):
        model = Sequential()

        model.add(
            Conv2D(
                filters=para["filters.0"],
                kernel_size=para["kernel_size.0"],
                activation="relu",
            ))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Flatten())
        model.add(Dense(10, activation="softmax"))

        model.compile(optimizer="adam",
                      loss="categorical_crossentropy",
                      metrics=["accuracy"])
        model.fit(X_train, y_train, epochs=1)

        _, score = model.evaluate(x=X_test, y=y_test)

        return score

    search_config = {cnn: {"filters.0": [32, 64], "kernel_size.0": [3, 4]}}

    opt = Cypher(X_train, y_train)
    opt.search(search_config)
Пример #7
0
def test_lightgbm():
    from lightgbm import LGBMClassifier

    def model(para, X_train, y_train):
        model = LGBMClassifier(num_leaves=para["num_leaves"],
                               learning_rate=para["learning_rate"])
        scores = cross_val_score(model, X_train, y_train, cv=3)

        return scores.mean()

    search_config = {
        model: {
            "num_leaves": range(2, 20),
            "learning_rate": [0.001, 0.005, 00.01, 0.05, 0.1, 0.5, 1],
        }
    }

    opt = Cypher(X, y)
    opt.search(search_config)
Пример #8
0
def test_xgboost():
    from xgboost import XGBClassifier

    def model(para, X_train, y_train):
        model = XGBClassifier(n_estimators=para["n_estimators"],
                              max_depth=para["max_depth"])
        scores = cross_val_score(model, X_train, y_train, cv=3)

        return scores.mean()

    search_config = {
        model: {
            "n_estimators": range(2, 20),
            "max_depth": range(1, 11)
        }
    }

    opt = Cypher(X, y)
    opt.search(search_config)
Пример #9
0
 def connect(self):
     self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
     self.s.connect((self.host, self.port))
     if not self.isKeySet:
         self.s.sendall('need key'.encode())
         data = self.s.recv(1024).decode()
         keys = data.split(',')
         self.cypher = Cypher(keys[0], keys[1])
         self.isKeySet = True
         print(keys)
     self.startConversation()
     self.s.close()
Пример #10
0
def test_catboost():
    from catboost import CatBoostClassifier

    def model(para, X_train, y_train):
        model = CatBoostClassifier(
            iterations=para["iterations"],
            depth=para["depth"],
            learning_rate=para["learning_rate"],
        )
        scores = cross_val_score(model, X_train, y_train, cv=3)

        return scores.mean()

    search_config = {
        model: {
            "iterations": [1],
            "depth": range(2, 10),
            "learning_rate": [0.001, 0.005, 00.01, 0.05, 0.1, 0.5, 1],
        }
    }

    opt = Cypher(X, y)
    opt.search(search_config)
Пример #11
0
def test_func_return():
    def model1(para, X, y):
        model = DecisionTreeClassifier(
            criterion=para["criterion"],
            max_depth=para["max_depth"],
            min_samples_split=para["min_samples_split"],
            min_samples_leaf=para["min_samples_leaf"],
        )
        scores = cross_val_score(model, X, y, cv=3)

        return scores.mean(), model

    search_config1 = {
        model1: {
            "criterion": ["gini", "entropy"],
            "max_depth": range(1, 21),
            "min_samples_split": range(2, 21),
            "min_samples_leaf": range(1, 21),
        }
    }

    opt = Cypher(X, y)
    opt.search(search_config1)
Пример #12
0
def test_TabuOptimizer():
    opt0 = Cypher(
        search_config,
        optimizer="TabuSearch",
        n_iter=n_iter_min,
        random_state=random_state,
        warm_start=warm_start,
    )
    opt0.search(X, y)

    opt1 = Cypher(
        search_config,
        optimizer="TabuSearch",
        n_iter=n_iter_max,
        random_state=random_state,
        warm_start=warm_start,
    )
    opt1.search(X, y)

    assert opt0._optimizer_.score_best < opt1._optimizer_.score_best
Пример #13
0
def test_StochasticHillClimbing():
    opt0 = Cypher(
        search_config,
        optimizer="StochasticHillClimbing",
        n_iter=n_iter_min,
        random_state=random_state,
        warm_start=warm_start,
    )
    opt0.search(X, y)

    opt1 = Cypher(
        search_config,
        optimizer="StochasticHillClimbing",
        n_iter=n_iter_max,
        random_state=random_state,
        warm_start=warm_start,
    )
    opt1.search(X, y)

    assert opt0._optimizer_.score_best < opt1._optimizer_.score_best
Пример #14
0
def test_EvolutionStrategy():
    opt0 = Cypher(
        search_config,
        optimizer="EvolutionStrategy",
        n_iter=n_iter_min,
        random_state=random_state,
        warm_start=warm_start,
    )
    opt0.search(X, y)

    opt1 = Cypher(
        search_config,
        optimizer="EvolutionStrategy",
        n_iter=n_iter_max,
        random_state=random_state,
        warm_start=warm_start,
    )
    opt1.search(X, y)

    assert opt0._optimizer_.score_best < opt1._optimizer_.score_best
Пример #15
0
def test_ParallelTempering():
    opt0 = Cypher(
        search_config,
        optimizer="ParallelTempering",
        n_iter=n_iter_min,
        random_state=random_state,
        warm_start=warm_start,
    )
    opt0.search(X, y)

    opt1 = Cypher(
        search_config,
        optimizer="ParallelTempering",
        n_iter=n_iter_max,
        random_state=random_state,
        warm_start=warm_start,
    )
    opt1.search(X, y)

    assert opt0._optimizer_.score_best < opt1._optimizer_.score_best
Пример #16
0
def test_SimulatedAnnealing():
    opt0 = Cypher(
        search_config,
        optimizer="SimulatedAnnealing",
        n_iter=n_iter_min,
        random_state=random_state,
        warm_start=warm_start,
    )
    opt0.search(X, y)

    opt1 = Cypher(
        search_config,
        optimizer="SimulatedAnnealing",
        n_iter=n_iter_max,
        random_state=random_state,
        warm_start=warm_start,
    )
    opt1.search(X, y)

    assert opt0._optimizer_.score_best < opt1._optimizer_.score_best
Пример #17
0
search_config = {
    model0: {
        "n_estimators": range(10, 200, 10),
        "criterion": ["gini", "entropy"],
        "max_features": np.arange(0.05, 1.01, 0.05),
        "min_samples_split": range(2, 21),
        "min_samples_leaf": range(1, 21),
        "bootstrap": [True, False],
    },
    model1: {
        "n_estimators": range(10, 200, 10),
        "criterion": ["gini", "entropy"],
        "max_features": np.arange(0.05, 1.01, 0.05),
        "min_samples_split": range(2, 21),
        "min_samples_leaf": range(1, 21),
        "bootstrap": [True, False],
    },
    model2: {
        "n_estimators": range(10, 200, 10),
        "learning_rate": [1e-3, 1e-2, 1e-1, 0.5, 1.0],
        "max_depth": range(1, 11),
        "min_samples_split": range(2, 21),
        "min_samples_leaf": range(1, 21),
        "subsample": np.arange(0.05, 1.01, 0.05),
        "max_features": np.arange(0.05, 1.01, 0.05),
    },
}

opt = Cypher(search_config, n_iter=30, n_jobs=4)
opt.search(X, y)
Пример #18
0
from cypher import Cypher

data = load_breast_cancer()
X, y = data.data, data.target


def model(para, X, y):
    rgf = RGFClassifier(
        max_leaf=para["max_leaf"],
        reg_depth=para["reg_depth"],
        min_samples_leaf=para["min_samples_leaf"],
        algorithm="RGF_Sib",
        test_interval=100,
        verbose=False,
    )
    scores = cross_val_score(rgf, X, y, cv=3)

    return scores.mean()


search_config = {
    model: {
        "max_leaf": range(1000, 10000, 100),
        "reg_depth": range(1, 21),
        "min_samples_leaf": range(1, 21),
    }
}

opt = Cypher(search_config, n_iter=5)
opt.search(X, y)
Пример #19
0
def collect_data(runs, X, y, opt_list, search_config, n_iter, opt_dict):
    time_c = time.time()

    data_runs_1 = []
    data_runs_2 = []
    for run in tqdm.tqdm(range(runs)):
        print("\nRun nr.", run, "\n")
        total_time_list = []
        eval_time_list = []

        for key in opt_list:
            print("optimizer:", key)

            n_iter_temp = n_iter
            opt_dict_temp = opt_dict

            if key == "ParallelTempering":
                n_iter_temp = int(n_iter / 10)

            if key == "ParticleSwarm":
                n_iter_temp = int(n_iter / 10)

            if key == "EvolutionStrategy":
                n_iter_temp = int(n_iter / 10)

            opt_obj = Cypher(search_config,
                             optimizer=key,
                             n_iter=n_iter_temp,
                             **opt_dict_temp)
            opt_obj.search(X, y)
            total_time = opt_obj.get_total_time()
            eval_time = opt_obj.get_eval_time()

            total_time_list.append(total_time)
            eval_time_list.append(eval_time)

        total_time_list = np.array(total_time_list)
        eval_time_list = np.array(eval_time_list)

        data_runs_1.append(total_time_list)
        data_runs_2.append(eval_time_list)

    data_runs_1 = np.array(data_runs_1)
    data_runs_2 = np.array(data_runs_2)

    print("\nCreate Dataframe\n")

    print("data_runs_1", data_runs_1, data_runs_1.shape)

    data = pd.DataFrame(data_runs_1, columns=opt_list)

    model_name = list(search_config.keys())[0]

    calc_optimizer_time_name = "total_time_" + model_name.__name__

    file_name = str(calc_optimizer_time_name)
    data.to_csv(file_name, index=False)

    data = pd.DataFrame(data_runs_2, columns=opt_list)

    calc_optimizer_time_name = "eval_time_" + model_name.__name__

    file_name = str(calc_optimizer_time_name)
    data.to_csv(file_name, index=False)

    print("data collecting time:", time.time() - time_c)
Пример #20
0
from cypher import Cypher
from token import CONFIG

token = CONFIG['token']
get_url = 'https://api.codenation.dev/v1/challenge/dev-ps/generate-data?token={}'.format(token)
post_url = 'https://api.codenation.dev/v1/challenge/dev-ps/submit-solution?token={}'.format(token)
filename = 'answer.json'

c = Cypher(get_url, post_url, filename)
print(c.decrypt())
c.update()
Пример #21
0
    model = para["conv_layer.0"](model)
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(para["neurons.0"]))
    model.add(Activation("relu"))
    model.add(Dropout(0.5))
    model.add(Dense(10))
    model.add(Activation("softmax"))

    model.compile(optimizer="adam",
                  loss="categorical_crossentropy",
                  metrics=["accuracy"])
    model.fit(X_train, y_train, epochs=25, batch_size=128)

    _, score = model.evaluate(x=X_test, y=y_test)

    return score


search_config = {
    cnn: {
        "conv_layer.0": [conv1, conv2, conv3],
        "neurons.0": range(100, 1000, 100)
    }
}

opt = Cypher(search_config, n_iter=5)
opt.search(X_train, y_train)
Пример #22
0
import ray

data = load_breast_cancer()
X, y = data.data, data.target


def gbc_(para, X, y):
    model = GradientBoostingClassifier(
        n_estimators=para["n_estimators"],
        max_depth=para["max_depth"],
        min_samples_split=para["min_samples_split"],
    )
    scores = cross_val_score(model, X, y)

    return scores.mean()


search_config = {
    gbc_: {
        "n_estimators": range(1, 20, 1),
        "max_depth": range(2, 12),
        "min_samples_split": range(2, 12),
    }
}

ray.init(num_cpus=4)

opt = Cypher(X, y)
opt.search(search_config, n_jobs=4)
Пример #23
0
from cypher import Cypher
from encryption import Encryption

print('---enc---')
enc = Encryption(79, 83)

print("Pub:" + str(enc.pub))
print("Priv:" + str(enc.private))
print("mod:" + str(enc.n))
print("phiN:" + str(enc.phiN))

print('---text---')
text = 'ik ben joeri'  #message
c = Cypher(enc.pub, enc.n)
c2 = Cypher(enc.private, enc.n)
crypt = c.chyperString(text)
print("plain:" + str(text))
print("encrypt:")
print(crypt)
print("decrypt:" + str(c2.deChyperList(crypt.split(','))))
Пример #24
0
def test_BayesianOptimizer():
    opt = Cypher(X, y)
    opt.search(search_config, n_iter=n_iter, optimizer="Bayesian")
Пример #25
0
def test_EvolutionStrategyOptimizer():
    opt = Cypher(X, y)
    opt.search(search_config, n_iter=n_iter, optimizer="EvolutionStrategy")
Пример #26
0
def test_ParticleSwarmOptimizer():
    opt = Cypher(X, y)
    opt.search(search_config, n_iter=n_iter, optimizer="ParticleSwarm")
Пример #27
0
def test_ParallelTemperingOptimizer():
    opt = Cypher(X, y)
    opt.search(search_config, n_iter=n_iter, optimizer="ParallelTempering")
Пример #28
0
def test_StochasticTunnelingOptimizer():
    opt = Cypher(X, y)
    opt.search(search_config, n_iter=n_iter, optimizer="StochasticTunneling")
Пример #29
0
import numpy as np
from cypher import Cypher


def himmelblau(para, X, y):
    """Himmelblau's function"""

    return -((para["x"]**2 + para["y"] - 11)**2 +
             (para["x"] + para["y"]**2 - 7)**2)


x_range = np.arange(0, 10, 0.1)

search_config = {himmelblau: {"x": x_range, "y": x_range}}

opt = Cypher(search_config, n_iter=1000000)
opt.search(0, 0)
Пример #30
0
def test_SimulatedAnnealingOptimizer():
    opt = Cypher(X, y)
    opt.search(search_config, n_iter=n_iter, optimizer="SimulatedAnnealing")