Example #1
0
def query_handler(call):
    typeS = call.data

    if typeS == "title":
        bot.send_message(call.message.chat.id,
                         load_result(numberOfSentences=1, typeS="title"))
    elif typeS == "sentence":
        bot.send_message(call.message.chat.id, "Сколько предложений?")
    elif typeS == "wiki":
        send_wiki(bot, call.message.chat.id)
Example #2
0
def numberator(message):
    print("Numberator. USER: "******"sentence"))
        else:
            bot.send_message(
                message.from_user.id,
                "У меня не такая большая фантазия( Попробуй попросить у меня текст длиной меньше 25 предложений."
            )
    else:
        bot.send_message(message.from_user.id,
                         "Пишите цифрами! Если хотите начать напишите /start")
Example #3
0
def selection(P_t, Q_t):
    shared_code_acc = utils.load_result()

    # print(f'P_t: {P_t}')
    # print(f'Q_t: {Q_t}')
    # print(f'f: {shared_code_acc}')

    def select_p1(select_pool):
        two = random.sample(range(len(select_pool)), 2)
        a1 = '-'.join([str(i) for i in select_pool[two[0]]])
        a2 = '-'.join([str(i) for i in select_pool[two[1]]])
        p1 = select_pool[two[0]] if shared_code_acc[a1] > shared_code_acc[
            a2] else select_pool[two[1]]
        return p1

    P_t1 = []
    Pt_Qt = P_t + Q_t
    while len(P_t1) < len(P_t):
        p = select_p1(Pt_Qt)
        P_t1.append(p)

    # 如果最好的个体不在P_t1,用最好的替换最差的
    max_code = []
    for k, v in shared_code_acc.items():
        if v == max(shared_code_acc.values()):
            max_code_str = k
            max_code = k.strip().split('-')
            max_code = [int(i) for i in max_code]
        if v == min(shared_code_acc.values()):
            min_code_str = k

    is_max = False
    for i, v in enumerate(P_t1):
        v_str = utils.list2str(v)
        if v_str == max_code_str:
            is_max = True
            break
    if not is_max:
        min_i = 0
        for i, v in enumerate(P_t1):
            v_str = utils.list2str(v)
            if v_str == min_code_str:
                min_i = i
                break
        P_t1[min_i] = max_code
    return P_t1
Example #4
0
def populate(alphas, save_training=False):
    """Populate R1, R2 and R3"""
    result = utils.load_result(parse_key=True)

    if save_training:
        save_dir = os.path.join(config.analysis_dir, "training_result")
        utils.result_to_table(result, save_dir)

    # populate R1
    result_vanilla = group_by_max(result)
    populate_relation(result_vanilla, "R1", alphas=alphas)

    # populate R2
    result_best_model = group_by_best_model(result)
    populate_relation(result_best_model, "R2", alphas=alphas)

    # # populate R3
    result_best_model_clean = group_by_best_model_clean(result_best_model)
    populate_relation(result_best_model_clean, "R3", alphas=alphas)
Example #5
0
def gen_offspring(P_t):
    shared_code_acc = utils.load_result()

    # print(shared_code_acc)
    # 1. Crossover
    def select_p():
        two = random.sample(range(len(P_t)), 2)
        a1 = '-'.join([str(i) for i in P_t[two[0]]])
        a2 = '-'.join([str(i) for i in P_t[two[1]]])
        p1 = P_t[two[0]] if shared_code_acc[a1] > shared_code_acc[a2] else P_t[
            two[1]]
        return p1

    Q_t = []
    while len(Q_t) < len(P_t):
        p1 = select_p()
        p2 = select_p()
        while '-'.join(str(i) for i in p1) == '-'.join(str(i) for i in p2):
            p2 = select_p()
        o1, o2 = crossover(p_1=p1,
                           p_2=p2,
                           crossover_rate=paras['crossover_rate'])
        Q_t.append(o1)
        Q_t.append(o2)
    # 2. Mutation
    Q_tt = []
    for p in Q_t:
        p1 = mutation(p1=p, mutation_rate=paras['mutation_rate'])
        Q_tt.append(p1)
    Q_t = Q_tt
    return Q_t


# crossover(p1=[2,3,6,1,1], p2=[2,5,4,2,1], crossover_rate=1.0)
# mutation(p=[2, 3, 4, 1, 1], mutation_rate=1.0)
# p = quchong(p=[2, 3, 3, 4, 4, 1, 2, 3, 4])
# print(p)
Example #6
0
def send_random_text_question(message):
    bot.send_message(message.from_user.id,
                     load_result(numberOfSentences=1, typeS="title"))
def ensamble(inputs, baseline_result):
    pred = np.zeros((200000, len(inputs)), dtype=int)
    for i, result in enumerate(inputs):
        pred[:, i] = load_result(result)
    baseline = load_result(baseline_result)
    return vote(pred, baseline)
Example #8
0
args = parser.parse_args()

if __name__ == '__main__':
    x = np.arange(1, 1 + epochs, 1)
    xlim = [0, epochs + 1]
    ylim = [0.4, 1.4]
    xlabel = 'number of epochs'
    ylabel = 'root mean square error'
    plots = []
    val_plots = []
    plots_labels = []
    if args.exp == 'default':
        result_dir = './result'
        rmse_train, mae_train, rmse_test, mae_test = load_result(
            os.path.join(
                result_dir,
                "best_result_ " if args.best else 'result_' + args.activation +
                "_" + str(args.hidden) + "_" + args.optimizer))
        title = "Activation function of hidden layer: " + args.activation + "\n Number of hidden units: " + str(
            args.hidden) + "\n Optimization method: " + args.optimizer
        if args.best:
            print(title)
            print('root mean square error of best model on training data: ',
                  rmse_train)
            print('root mean square error of best model on test data: ',
                  rmse_test)
        else:
            plots = [rmse_train, rmse_test]
            plots_labels = ['training', 'test']
            fig_file = args.activation + "_" + str(
                args.hidden) + "_" + args.optimizer
Example #9
0
def one_split_experiment(dataset,
                         n_retrain=5,
                         seed=1,
                         n_jobs=1,
                         nosave=True,
                         error_type=None):
    """Run experiments on one dataset for one split.

    Args:
        dataset (dict): dataset dict in config.py
        models (list): list of model dict in model.py
        nosave (bool): whether not save results
        seed: experiment seed
        n_retrain: times of repeated experiments
    """
    # generate seeds for n experiments
    np.random.seed(seed)
    seeds = np.random.randint(10000, size=n_retrain)

    # load result dict
    result = utils.load_result(dataset['data_dir'])
    result2019 = utils.load_result2019(dataset['data_dir'])

    # run experiments
    for error in dataset["error_types"]:
        if error_type is not None and error != error_type:
            continue

        for train_file in utils.get_train_files(error):
            for model in config.models:
                for seed in seeds:
                    version = utils.get_version(
                        utils.get_dir(dataset, error, train_file))
                    key = "/".join(
                        (dataset['data_dir'], 'v' + str(version), error,
                         train_file, model['name'], str(seed)))

                    if key in result.keys():
                        print(
                            "Ignore experiment {} that has been completed before."
                            .format(key))
                        continue

                    if key in result2019.keys():
                        hyperparams = result2019[key]["best_params"]
                        skip_test_files = [
                            k.rstrip("_test_acc")
                            for k in result2019[key].keys() if "_test_acc" in k
                        ]
                    else:
                        hyperparams = None
                        skip_test_files = []

                    print("{} Processing {}".format(datetime.datetime.now(),
                                                    key))
                    res = one_search_experiment(
                        dataset,
                        error,
                        train_file,
                        model,
                        seed,
                        n_jobs=n_jobs,
                        hyperparams=hyperparams,
                        skip_test_files=skip_test_files)

                    if key in result2019.keys():
                        res = {**result2019[key], **res}

                    if not nosave:
                        utils.save_result(dataset['data_dir'], key, res)