예제 #1
0
    class_num = 2
    feature_num = pima.shape[1] - 1
    x_train = torch.from_numpy(pima[:, 0:feature_num]).float().cuda()
    y_train = torch.from_numpy(pima[:, feature_num]).cuda()
    # data_num个特征,需要data_num组编码器
    enc_layer = []
    x_train_min = x_train.min(0)[0]
    x_train_max = x_train.max(0)[0]
    print(x_train_min)
    print(x_train_max)
    enc_neuron_num = 512
    per_class_neuron_num = 1

    for i in range(feature_num):
        enc_layer.append(
            encoder.GaussianEncoder(x_train_min[i], x_train_max[i],
                                    enc_neuron_num, 'cuda:0'))
    # 共有class_num类,需要class_num个tempotron
    dec_layer = []
    for i in range(class_num * per_class_neuron_num):
        dec_layer.append(
            node.LIFNode(tau=15.0,
                         tau_s=15.0 / 4,
                         v_rest=0,
                         T=500,
                         N=enc_neuron_num * feature_num,
                         device='cuda:0'))  # 由于是全连接,因此enc_layer的所有元素都与这个节点相连

    W = torch.rand(
        size=[class_num * per_class_neuron_num, enc_neuron_num *
              feature_num]).cuda()  # W[i]表示enc_layer与dec_layer[i]的连接权重
    B = torch.rand(
예제 #2
0
    iris = datasets.load_iris()  # 字典,iris.data为特征,iris.target为类别
    # print(iris['data'].shape)  # [150,4]
    # print(iris['target'].shape) # [150]
    x_train = torch.from_numpy(iris['data']).float().cuda()
    y_train = torch.from_numpy(iris['target']).cuda()
    # 4个特征,需要4组编码器
    enc_layer = []
    x_train_min = x_train.min(0)[0]
    x_train_max = x_train.max(0)[0]
    print(x_train_min)
    print(x_train_max)
    enc_neuron_num = 4096
    per_class_neuron_num = 16
    # [0, per_class_neuron_num-1]对应第0类,[per_class_neuron_num, 2*per_class_neuron_num-1]对应第1类,[2*per_class_neuron_num, 3*per_class_neuron_num-1]对应第2类
    for i in range(4):
        enc_layer.append(encoder.GaussianEncoder(x_train_min[i], x_train_max[i], enc_neuron_num, 'cuda:' + sys.argv[2]))
    # 共有3类,需要3个tempotron
    dec_layer = []
    for i in range(3*per_class_neuron_num):
        dec_layer.append(node.LIFNode(tau=15.0, tau_s=15.0/4, v_rest=0, T=500, N=enc_neuron_num*4, device='cuda:' + sys.argv[2]))  # 由于是全连接,因此enc_layer的所有元素都与这个节点相连

    W = torch.rand(size=[3*per_class_neuron_num, enc_neuron_num*4]).cuda()  # W[i]表示enc_layer与dec_layer[i]的连接权重
    B = torch.rand(size=[3*per_class_neuron_num, enc_neuron_num*4]).cuda()
    learn_rate = 0.1
    stdp_learn_rate = 1
    A_LTD = 0.02656
    C_ = 0.0001
    A_LTP = C_ * A_LTD
    tau_LTP = 16.8
    tau_LTD = 33.7