def rate_coding(self):
        print("start")
        self.delta_x_r, self.delta_y_r, self.delta_z_r = self.read_input_file(
            'input_draw_coord.txt')

        ## Uncomment the following line to encode the groundtruth data
        # self.delta_x_r, self.delta_y_r, self.delta_z_r = self.read_spike_file('input_true_coord.txt')

        data_in_total = [self.delta_x_r, self.delta_y_r, self.delta_z_r]
        data_in_total_nor = normalization(data_in_total)
        input_data_in_total = np.multiply(
            np.array(data_in_total_nor, dtype=float), 20e-4)
        self.neuron_output1, self.neuron_output2, self.neuron_output3 = population_coding(
            input_data_in_total)
        return self.neuron_output1, self.neuron_output2, self.neuron_output3  #6*3
Ejemplo n.º 2
0
EPOCHS = 200
BATCH_SIZE = 24

# 1. 读取数据
data = pd.read_csv("../data/denoise.csv")

time_data = pd.read_excel("../data/time.xlsx").values
time_data = time_data[N_TRAIN_WEEKS + TIME_STEPS + 1:]
time_data = time_data.reshape(len(time_data), )
values = data.iloc[:, 1:]

# for NH3-N prediction
# values[['0', '3']] = values[['3', '0']]

# 2. 归一化
scaled, scaler = normalization(values)

# 3. 数据格式转化及数据集划分
train_X, train_y, test_X, test_y = trian_test_split(scaled, N_TRAIN_WEEKS,
                                                    TIME_STEPS)

mae_array = []
rmse_array = []


def main(test_X, test_y):
    # 对pH进行预测,若需要对NH3-N进行预测时,需要讲函数更改为cnn_lstm_ph2,以及在data_processing中将第73行的0改为3.
    m = cnn_lstm_ph3(TIME_STEPS, INPUT_DIMS)
    optimizer = tf.optimizers.Adam(learning_rate=0.001)
    m.compile(optimizer=optimizer, loss='mae')
    # t0 = time.time()
Ejemplo n.º 3
0
import numpy as np
import pandas as pd
import torch
import torch.optim as optim
import torch.utils.data as Data
from data_processing import (load_data, normalization, split_train_valid_set,
                             transform_data)
from torch import nn
from torch.nn import init

# 1. 导入数据
data = load_data()
train_data, valid_data = split_train_valid_set(data)
train_set = transform_data(train_data, data_path="../../Data/train_set.csv")
valid_set = transform_data(train_data, data_path="../../Data/valid_set.csv")
train_X = torch.tensor(normalization(
    train_set[train_set.columns[:-1]].to_numpy()),
                       dtype=torch.float)
train_Y = torch.tensor(train_set[train_set.columns[-1]].to_numpy().reshape(
    -1, 1),
                       dtype=torch.float)
valid_X = torch.tensor(normalization(
    valid_set[valid_set.columns[:-1]].to_numpy()),
                       dtype=torch.float)
valid_Y = torch.tensor(valid_set[valid_set.columns[-1]].to_numpy().reshape(
    -1, 1),
                       dtype=torch.float)

# 2. 数据播放
batch_size = 128
# 结合 feature 张量与 label 向量
dataset = Data.TensorDataset(train_X, train_Y)
Ejemplo n.º 4
0
import pandas as pd
from data_processing import (load_data, normalization, split_train_valid_set,
                             transform_data)

import keras.applications as kapp
from keras.layers import Activation, Dense
from keras.models import Sequential
from keras.optimizers import SGD

# 1. 数据导入
data = load_data()
train_data, valid_data = split_train_valid_set(data)
train_set = transform_data(train_data, data_path="../../Data/train_set.csv")
valid_set = transform_data(valid_data, data_path="../../Data/valid_set.csv")
train_X = normalization(train_set[train_set.columns[:-1]].to_numpy())
train_Y = train_set[train_set.columns[-1]].to_numpy().reshape(-1, 1)
valid_X = normalization(valid_set[train_set.columns[:-1]].to_numpy())
valid_Y = valid_set[valid_set.columns[-1]].to_numpy().reshape(-1, 1)


# 2. 数据播放
def data_iter(
    X: Union[pd.DataFrame, np.array],
    Y: Union[pd.Series, np.array],
    batch_size: int = 10,
    if_shuffle=True,
) -> Tuple[np.array, np.array]:
    """数据播放

    Args:
Ejemplo n.º 5
0
# load encoded DataFrame
'''
In order to run this program, encoded data needs to be obtained first.
'''
encoded_df_cohesive = pd.read_csv(dir_path + "/data/descriptors/cohesive/encoded_compounds.csv", index_col=0)
encoded_df_ltc = pd.read_csv(dir_path + "/data/descriptors/ltc/encoded_compounds.csv", index_col=0)
encoded_df_mp = pd.read_csv(dir_path + "/data/descriptors/mp/encoded_compounds.csv", index_col=0)

parser = argparse.ArgumentParser(description="normalize descriptors")
parser.add_argument("--property", required=True, help="property of a dataset")
parser.add_argument("--file_path", required=True,
                    help="path to a target file. Make sure that property of the \
                    target file is the same with --property param.")
args = parser.parse_args()


if __name__ == "__main__":
    if args.property == "cohesive":
        encoded_df = encoded_df_cohesive
    elif args.property == "ltc":
        encoded_df = encoded_df_ltc
    elif args.property == "mp":
        encoded_df = encoded_df_mp
    else:
        assert False, 'please choose a valid property name'

    df = pd.read_csv(args.file_path, index_col=0)
    save_path = os.path.splitext(args.file_path)[0] + '_norm.csv'
    normalization(df, encoded_df, save_path)
        return epoch_data


if __name__ == '__main__':
    # Configuración del comando para graficar
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--graph_curves',
        help='True si se quiere graficar las curvas de precisión y error',
        required=False,
        default=False)
    args = parser.parse_args()

    # Creación del dataframe con los datos
    df_seeds = data_processing.init_data('seeds_dataset.txt')
    data_processing.normalization(df_seeds)

    # División de los datos
    training_data = df_seeds.iloc[:int(0.7 * len(df_seeds))]
    test_data = df_seeds.iloc[int(0.7 * len(df_seeds)):]

    print('Datos de entrenamiento: {}   -   Datos de prueba: {}\n'.format(
        len(training_data), len(test_data)))

    # Entrenamiento de la red neuronal
    x = training_data[[
        'area', 'perimeter', 'compactness', 'length', 'width', 'asymmetry',
        'length_groove'
    ]].to_numpy()
    y = training_data['label'].to_numpy()
    test_x = test_data[[
Ejemplo n.º 7
0
sys.path.append("..")

from data_processing import (load_data, normalization, split_train_valid_set,
                             transform_data)

from mxnet import autograd, gluon, init, nd
from mxnet.gluon import data as gdata
from mxnet.gluon import loss as gloss
from mxnet.gluon import nn

# 1. 数据导入
data = load_data("../../Data/train.csv")
train_data, valid_data = split_train_valid_set(data)
train_set = transform_data(train_data, data_path="../../Data/train_set.csv")
valid_set = transform_data(valid_data, data_path="../../Data/valid_set.csv")
features = nd.array(normalization(
    train_set[train_set.columns[:-1]].to_numpy()),
                    dtype="float32")
labels = nd.array(train_set[train_set.columns[-1]].to_numpy(), dtype="float32")

# 2. 数据播放
batch_size = 128
dataset = gdata.ArrayDataset(features, labels)
data_iter = gdata.DataLoader(dataset, batch_size, shuffle=False)

# 3. 定义模型
net = nn.Sequential()
net.add(nn.Dense(1))
net.initialize(init.Normal(sigma=0.1))

# 4. 优化方式
# trainer = gluon.Trainer(net.collect_params(), "adam", {"learning_rate": 0.2})