Exemplo n.º 1
0
    drops = np.array(range(len(data_set.column_valid)))[~data_set.column_valid]
    net.drop_inputs(drops)
    net1.drop_inputs(drops)

    n_batch = 16
    n_epochs = 10
    n_al_epochs = 10
    only_real = True
    y_columns = list(range(data_set.y.shape[1]))
    y_columns = [2, 11, 23]
    net.build_multi_reg_k(len(y_columns),
                          activation=tf.keras.layers.LeakyReLU())
    net1.build_multi_reg_k(len(y_columns),
                           activation=tf.keras.layers.LeakyReLU())
    data_set.split_dataset_random(train_perc=0.0,
                                  val_perc=0.1,
                                  notused_perc=0.9)
    data_set.make_dataset_tensors(y_columns=y_columns, only_real=only_real)
    val_sample_size = data_set.val_labels.shape[0]
    validation_steps = val_sample_size // n_batch
    val_gen = data_set.dataset_generator_multi(data_set.val_datas,
                                               data_set.val_labels, n_batch)

    for al in range(n_al_epochs):
        al_res = dataset_predict(net.pre_model, data_set, role=0)[:, 1]
        al_res = al_res.reshape((-1, len(y_columns)))
        al_res1 = dataset_predict(net1.pre_model, data_set, role=0)[:, 1]
        al_res1 = al_res1.reshape((-1, len(y_columns)))
        delta = np.abs(al_res - al_res1).mean(axis=1)
        al_as = delta.argsort()[::-1]
        new_train_num = min(np.sum(delta > 0.02), 200)
Exemplo n.º 2
0
    y_columns = list(range(data_set.y.shape[1]))
    if args.res_type == 'cct':
        targets = ['东北.青北一线', '东北.燕董一线', '东北.丰徐二线']
        y_columns = data_set.get_y_indices(targets)
    elif args.res_type == 'sst':
        y_columns = [0]
    column_names = data_set.y.columns[y_columns]
    print("targets:", column_names)
    net.build_multi_reg(len(y_columns), activation=tf.keras.layers.LeakyReLU())

    n_batch = 16
    n_epochs = 10
    only_real = True
    if args.task_type == 'train':
        if args.dataset_split == 'random':
            ids = data_set.split_dataset_random(ratios=[0.9, 0.05, 0.05])
        else:
            dt_train_begin = datetime.datetime(2018, 8, 1)
            dt_train_end = datetime.datetime(2018, 8, 26) - datetime.timedelta(seconds=1)
            dt_test_begin = datetime.datetime(2018, 8, 26)
            dt_test_end = datetime.datetime(2018, 9, 1) - datetime.timedelta(seconds=1)
            ids = data_set.split_dataset_dt(dt_train_begin=dt_train_begin,
                                            dt_train_end=dt_train_end,
                                            dt_test_begin=dt_test_begin,
                                            dt_test_end=dt_test_end,
                                            val_perc=0.1)
        assert (0 in ids) and (1 in ids)
        data_set.make_dataset_tensors(y_columns=y_columns, only_real=only_real)
        train_data, train_labels, _ = data_set.get_dataset(0)
        train_sample_size = train_labels.shape[0]
        steps_per_epoch = train_sample_size // n_batch