Пример #1
0
def nn_plotter():
    learning_rate_list = [0.5, 0.1, 0.01, 0.001]
    epochs_list = [500]
    regularisation_list = [0, 1, 2]
    reg_param_list = [0.0001, 0.001, 0.01, 0.1, 0.2, 0.5, 1.0]
    
    for i in epochs_list:
        for j in learning_rate_list:
            for l in regularisation_list:
                for m in reg_param_list:
                    ds = hp.load_ds(hp.PATH, hp.FIXED)
                    X, y = hp.split(ds)

                    y_size = len(y)

                    training_size = int((100 / 100) * y_size)

                    train_X, train_y, test_X, test_y = hp.random_train_test(X, y, training_size)
                    
                    reg_str = ""
                    if l != 0:
                        reg_str = "Regularisation=L" + str(l) + ", Scale=" + str(m)
                    print("\nStarting Neural Network, Epochs=" + str(i) + ", Learning Rate=" + str(j) + reg_str + "\n10-fold Cross Validation")
                    x, y, tx, ty = nn.neural_network(train_X, train_y, test_X, test_y, i, j, [l, m], True, 0)
                    filename = "../figs/RED_NN_E" + str(i) + "_LR" + str(j) + "_R" + str(l) + "_S" + str(m) + ".png"
                    title = "Red Wine\nNeural Network 10-fold Cross Validation, Epochs=" + str(i) + ", Learning Rate=" + str(j) +"\n Hidden ReLU, Output ReLU\n" + reg_str
                    hp.plotter(title, x, y, tx, ty, 90, filename, True)
                    print("Finished Neural Network.\n")
Пример #2
0
  def worker(line):
    try:
      result = execCommand(split(line), True)
      print '' if result is None else '%s\n' % str(result)

    except HandledException as e:
      err('%s\n' % e)
Пример #3
0
 def reinit_chromecasts(self, update, context):
     found_chromecasts = self._cast.get_available_chromecasts()
     keyboard = helpers.split(found_chromecasts, 3)
     update.message.reply_text('Выбирай',
                               reply_markup=ReplyKeyboardMarkup(
                                   keyboard, one_time_keyboard=True),
                               parse_mode='Markdown')
     return self._choose_chromecast
Пример #4
0
def feature_loop_script(data,filter_feature,feature_value):
    #script for whole feature creating process
    feature_value = avg_by_feature(data,filter_feature,feature_value)
    date_features = make_date_features(feature_value)
    X_train,X_valid = helpers.split(feature_value,settings.train_size)
    X_historical,date_avg_features = average_date_features(X_train,X_valid,date_features)
    X_final,features_final = create_features(data,X_historical,date_avg_features)
    return X_final,features_final
Пример #5
0
def block_decryption(ct_block, key):
    # Generate keystream
    keystream = get_keystream(key)
    # w0-w4 according to document
    word_split = helpers.split(ct_block)
    # k0-k4 according to document
    key_split = helpers.split(key)
    # r0-r3 according to dcoument
    r = [0] * 4
    # storage for undoing final swap
    y = [0] * 4
    # storage for ciphertext after whitening
    c = [0] * 4
    # Round counter - set to maximum
    round_number = 45
    # Whiten input
    for n in range(4):
        r[n] = word_split[n] ^ key_split[n]
    # 16 round F()
    for round in range(constants.ROUNDS):
        f0, f1 = F(r[0], r[1], keystream[15 - round], round_number)
        tmp_r0 = r[0]
        tmp_r1 = r[1]
        r[0] = r[2] ^ f0
        r[1] = r[3] ^ f1
        r[2] = tmp_r0
        r[3] = tmp_r1
        round_number = round_number - 3
    # Undo final swap
    y[0] = r[2]
    y[1] = r[3]
    y[2] = r[0]
    y[3] = r[1]
    # Whiten output
    for n in range(4):
        c[n] = y[n] ^ key_split[n]
        # Shift right to allow the bytes to be concatenated, reduce shift amount in increments of 16
        c[n] = c[n] << (constants.KEYSIZE - (16 * (n + 1)))
    # Return encrypted blocks
    return c[0] | c[1] | c[2] | c[3]
Пример #6
0
    def provide_names(self, update, context):
        self._current_watching = mal.get_current_watching()
        message = ''
        for some_anime in self._current_watching:
            message += f'[{some_anime["name"]}]({self.mal_prefix + some_anime["url"]}) - ' \
                 f'*{some_anime["num_watched_episodes"]}*/{some_anime["anime_num_episodes"]}\n'

        reply_keyboard = helpers.split(
            [i['name'] for i in self._current_watching], n=2)

        update.message.reply_text(message,
                                  reply_markup=ReplyKeyboardMarkup(
                                      reply_keyboard, one_time_keyboard=True),
                                  parse_mode='Markdown')
Пример #7
0
def linr_plotter():
    learning_rate_list = [0.05]
    epochs_list = [500]
    cost_fn_list = [1, 2, 4, 5]
    # cost_fn_list = [3]

    regularisation_list = [3]
    # reg_param_list = [0.0001, 0.001, 0.01, 0.1]
    lamb = [0.001, 0.01, 0.1, 0.3, 0.5, 0.7, 1.0]
    alpha = [0.001, 0.01, 0.1, 0.3, 0.5, 0.7, 1.0]

    for i in epochs_list:
        for j in learning_rate_list:
            for k in cost_fn_list:
                for l in lamb:
                    for m in alpha:

                        ds = hp.load_ds(hp.PATH, hp.WHITEFIXED)
                        X, y = hp.split(ds)

                        y_size = len(y)

                        training_size = int((100 / 100) * y_size)

                        train_X, train_y, test_X, test_y = hp.random_train_test(X, y, training_size)

                        loss_str = "L1"
                        if k == 1:
                            loss_str = "L1"
                        elif k == 2:
                            loss_str = "L2"
                        elif k == 3:
                            loss_str = "Elastic Net"
                        elif k == 4:
                            loss_str = "SVR"
                        elif k == 5:
                            loss_str = "Huber"
                        
                        reg_str = " Elastic Net: " + "α=" + str(m) + " λ=" + str(l)
                        # if l != 0:
                            # reg_str = ", Regularisation=L" + str(l) + ", Scale=" + str(m)
                        el = [l, m]
                        reg = [3,0.0]
                        print("\nStarting Linear Regression, Epochs=" + str(i) + ", Learning Rate=" + str(j) + ", Loss Function=" + loss_str + reg_str + "\n10-fold Cross Validation")
                        x, y, tx, ty = linr.linear_regression(train_X, train_y, test_X, test_y, i, j, k, reg, True, el)
                        filename = "../figs/WHITE_ELASTIC_LINR_E" + str(i) + "_LR" + str(j) + "_LF" + str(k) + "_R" + str(l) + "_S" + str(m) + ".png"
                        title = "White Wine\nLinear Regression, Epochs=" + str(i) + ", Learning Rate=" + str(j) +"\n Loss Function=" + loss_str + reg_str
                        hp.plotter(title, x, y, tx, ty, 90, filename, True)
                        print("Finished Linear Regression.\n")
Пример #8
0
def TestingModel_PlotGrid(model="linear_regression"):
    '''choose either `linear_regression` or `XGB` TODO: make more general '''
    '''loop for the different values of a chosen feature, training, testing and plotting 
    the results of each one in a nice grid'''
    sales = annotate.read()
    #get a list of the unique values for the feature
    feature = settings.filter_by_feature
    feature_list = list(sales[feature].unique())
    fig, axs = plt.subplots(math.ceil(len(feature_list) / 2),
                            2,
                            figsize=(15, 15),
                            sharex='col',
                            sharey='row')
    fig.autofmt_xdate()
    #plt.xticks(rotation=70)
    axs = axs.ravel()
    for i, item in enumerate(feature_list):
        print(item)
        X_final, features_final = annotate.feature_loop_script(
            sales, feature, item)
        X_train_final, X_valid_final = split(X_final, .7)
        if model == "linear_regression":
            X_LinRegOut, predictions, model_linreg, error = predict.train_test_LinearReg(
                X_train_final, X_valid_final, features_final)
        elif model == "XGB":
            X_out, gbm, predictions, error = predict.train_test_XGboost(
                X_train_final, X_valid_final, features_final,
                settings.XGBparams, settings.XGBnum_boost_round)
            save('feature_importance_' + str(item))

        else:
            print("not a valid model")
            break

        axs[i].plot(X_train_final['sales'], label="train")
        axs[i].plot(X_valid_final['sales'], label="validate")
        axs[i].plot(predictions, label='predictions')
        axs[i].legend(loc='best')
        axs[i].set_title(item + " rmse: " + str(error))
    fig.text(0.09,
             0.55,
             'Sales',
             fontsize=20,
             ha='center',
             va='center',
             rotation='vertical')

    save(feature + "_using_" + model + "_grid")
Пример #9
0
    def induce_decision_tree(self, dataset):
        current_shares = hp.get_probabilities(hp.get_frequency(dataset),
                                              self.init_freq)
        max_share = max(current_shares.values())

        best_split = hp.find_best_split(dataset)
        # no more info gain or hyperparameter is set to pre-prune the tree
        if (best_split.attribute == None
                or max_share < self.max_share_hyperparameter):
            node = nd.LeafNode(hp.get_frequency(dataset), self.init_freq)
        else:
            true_data, false_data = hp.split(dataset, best_split)
            child_true = self.induce_decision_tree(true_data)
            child_false = self.induce_decision_tree(false_data)
            node = nd.DecisionNode(best_split, child_true, child_false)
        return node
Пример #10
0
def init():
  if Settings.get('helper_tasks', True):
    import helper_tasks
    helper_tasks.main()

  while True:
    try:
      line = raw_input('>')

      if line:
        result = execCommand(split(line), True)
        print '' if result is None else '%s\n' % str(result)

    except HandledException as e:
      err('%s\n' % e)

    except EOFError: # ^z (null character) was passed
      exit()
Пример #11
0
def _make_hints_text(digits, lookup):
    c = 0
    for triplet in split(digits, 3):
        s = "".join(str(d) for d in triplet)
        try:
            hint = lookup[s]
        except:
            raise Exception("We don't have a hint for %s." % (s, ))
        line = "%s - %s" % (s, hint)
        yield line

        c = c + 1
        if not c % 2:
            yield ""
        if not c % 10:
            yield "---"
            yield ""
    yield ""
Пример #12
0
 def test_50_element_list(self):
     input_s = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcedfghijklmnopqrstuvwx"
     expected = [input_s[:30], input_s[30:]]
     self.assertEqual(list(split(input_s, 30)), expected)
Пример #13
0
def print_digits(l):
    for r in split(l, 30):
        print(make_line(r))
Пример #14
0
        X_project['predictions'] = model.predict(X_project[features])
    #plot the results
    plt.figure(figsize=(20, 10))
    plt.xticks(rotation=70)
    plt.plot(data.sales, label="historical sales data")
    plt.plot(X_project.predictions, label="future sales projections")
    plt.plot(40 * X_project.promo1 + 50, label="promo1 state")
    plt.plot(40 * X_project.promo2 + 50, label="promo2 state")
    plt.title("projected sales")
    plt.ylabel("Sales")
    plt.legend(loc='best')
    save("projections_" + str(settings.feature_value))

    return X_project


if __name__ == "__main__":
    sales, filtered_data, features = read(filename=str(settings.feature_value))
    X_train, X_valid = split(filtered_data, settings.train_size)
    X_out, gbm, yhat_valid, error = train_test_XGboost(
        X_train,
        X_valid,
        features,
        settings.XGBparams,
        settings.XGBnum_boost_round,
        verbose=False)
    save('feature_importance_' + settings.feature_value)
    X_out, yhat, linreg, error = train_test_LinearReg(X_train, X_valid,
                                                      features)
    X_project = forecast_future(filtered_data, gbm, features, use_XGBOOST=1)
Пример #15
0
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras

import numpy as np
import matplotlib.pyplot as plt

from helpers import split, fetch_data_set, shuffle_data

save = True

x, y, m = fetch_data_set(percent=100)

x, y = shuffle_data(x, y)

x, x_test = split(x, [80])
y, y_test = split(y, [80])

print(x.shape, y.shape, x_test.shape, y_test.shape)

model = keras.Sequential([
    keras.layers.Flatten(input_shape=(400, 1)),
    keras.layers.Dense(128, activation="relu"),
    keras.layers.Dense(10),
])

model.compile(
    optimizer="adam",
    loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
    metrics=["accuracy"],
)