コード例 #1
0
ファイル: run.py プロジェクト: jinpoon/SimpleNeuralNetwork
def main():
    train_loader = dataloader(train_data_path,
                              train_label_path,
                              val_data_path,
                              val_label_path,
                              test_data_path,
                              test_label_path,
                              batchsize=32)
    net = Net()

    nn_input_size = train_loader.traindata.shape[1]
    num_of_class = 10

    l1 = Linear(nn_input_size, 100)
    b1 = BatchNorm(100)
    a1 = Activation('sigmoid', 100)
    l2 = Linear(100, num_of_class)
    b2 = BatchNorm(num_of_class)
    a2 = Activation('sigmoid', num_of_class)

    lm = Linear(100, 100)
    bm = BatchNorm(100)
    am = Activation('sigmoid', 100)

    net.add(l1)
    net.add(b1)  #add batch normalization
    net.add(a1)

    net.add(lm)
    net.add(bm)
    net.add(am)

    net.add(l2)
    net.add(b2)  #add batch normalization
    net.add(a2)

    loss = Softmax_Cross_entropy()

    trainer = Trainer(train_loader, None, loss, net)

    x = []
    tl = []
    vl = []
    te = []
    ve = []

    for i in range(epoch):
        print "epoch %d" % i
        training_loss = trainer.train()
        x.append(i + 1)

        tl.append(training_loss)
        vl.append(trainer.get_val_loss())
        te.append(1 - trainer.get_train_acc())
        ve.append(1 - trainer.get_val_acc())
        print "val loss: %f" % vl[i]
        print "train error: %f " % te[i]
        print "val error: %f" % ve[i]
コード例 #2
0
def main():

    numn = [20, 100, 200, 500]
    plt.figure(num=0)

    for j in range(len(numn)):
        train_loader = dataloader(train_data_path,
                                  train_label_path,
                                  val_data_path,
                                  val_label_path,
                                  batchsize=1)
        net = Net()

        nn_input_size = train_loader.traindata.shape[1]
        num_of_class = 10

        l1 = Linear(nn_input_size, numn[j], lr=0.01, momentum=0.5)
        a1 = Activation('sigmoid', numn[j])
        l2 = Linear(numn[j], num_of_class, lr=0.01, momentum=0.5)
        a2 = Activation('sigmoid', num_of_class)

        net.add(l1)
        #net.add(b1) #add batch normalization
        net.add(a1)
        net.add(l2)
        #net.add(b2) #add batch normalization
        net.add(a2)

        loss = Softmax_Cross_entropy()

        trainer = Trainer(train_loader, None, loss, net)

        x = []
        vl = []
        ve = []

        for i in range(epoch):
            print "epoch %d" % i
            training_loss = trainer.train()
            x.append(i + 1)
            ve.append(1 - trainer.get_val_acc())
        plt.plot(x, ve, color=colorstr[j], linewidth=1.0, linestyle='--')

    plt.show()
    """
コード例 #3
0
 def __init__(self, feature_columns, cin_size, hidden_units, out_dim=1, activation='relu', dropout=0.0):
     super(xDeepFM, self).__init__()
     self.dense_feature_columns, self.sparse_feature_columns = feature_columns
     self.embed_layers = [Embedding(feat['feat_onehot_dim'], feat['embed_dim'])
                                 for feat in self.sparse_feature_columns]
     self.linear = Linear()
     self.dense_layer = Dense_layer(hidden_units, out_dim, activation, dropout)
     self.cin_layer = CIN(cin_size)
     self.out_layer = Dense(1, activation=None)
コード例 #4
0
def train_classifier(learning_rate: float,
                     epochs: int,
                     batch_size: int,
                     print_every: int = 50) -> None:
    data_loader = DataLoader(batch_size)

    loss = CrossEntropy()
    model = Model([Linear(784, 50), ReLU(), Linear(50, 10)])

    for i in range(epochs):
        # One training loop
        training_data = data_loader.get_training_data()
        validation_data = data_loader.get_validation_data()
        for j, batch in enumerate(training_data):
            input, target = batch
            y = model(input)
            loss(y, target)
            gradient = loss.gradient()
            model.backward(gradient)
            model.update(learning_rate)
            if j % print_every == 0:
                print(
                    f"Epoch {i+1}/{epochs}, training iteration {j+1}/{len(training_data)}"
                )

        accuracy_values = []
        loss_values = []
        # One validation loop
        for j, batch in enumerate(validation_data):
            input, target = batch
            y = model(input)
            loss_value = loss(y, target)
            accuracy = calculate_accuracy(y, target)
            accuracy_values.append(accuracy)
            loss_values.append(loss_value)

        print(
            f"Epoch {i+1}: loss {np.round(np.average(loss_values), 2)}, accuracy {np.round(np.average(accuracy_values), 2)}"
        )
def train_variational_autoencoder(
    learning_rate: float,
    epochs: int,
    batch_size: int,
    latent_variables: int = 10,
    print_every: int = 50,
) -> None:
    print(
        f"Training a variational autoencoder for {epochs} epochs with batch size {batch_size}"
    )
    data_loader = DataLoader(batch_size)
    image_loss = CrossEntropy()
    divergence_loss = KLDivergenceStandardNormal()
    encoder_mean = Model([Linear(784, 50), ReLU(), Linear(50, latent_variables)])
    encoder_variance = Model(
        [Linear(784, 50), ReLU(), Linear(50, latent_variables), Exponential()]
    )
    reparameterization = Reparameterization()
    decoder = Model([Linear(latent_variables, 50), ReLU(), Linear(50, 784)])

    for i in range(epochs):
        # One training loop
        training_data = data_loader.get_training_data()

        for j, batch in enumerate(training_data):
            input, target = batch
            # Forward pass
            mean = encoder_mean(input)
            variance = encoder_variance(input)
            z = reparameterization(mean=mean, variance=variance)
            generated_samples = decoder(z)
            # Loss calculation
            divergence_loss_value = divergence_loss(mean, variance)
            generation_loss = image_loss(generated_samples, input)
            if j % print_every == 0:
                print(
                    f"Epoch {i+1}/{epochs}, "
                    f"training iteration {j+1}/{len(training_data)}"
                )
                print(
                    f"KL loss {np.round(divergence_loss_value, 2)}\t"
                    f"Generation loss {np.round(generation_loss, 2)}"
                )

            # Backward pass
            decoder_gradient = image_loss.gradient()
            decoder_gradient = decoder.backward(decoder_gradient)
            decoder_mean_gradient, decoder_variance_gradient = reparameterization.backward(
                decoder_gradient
            )
            encoder_mean_gradient, encoder_variance_gradient = (
                divergence_loss.gradient()
            )
            encoder_mean.backward(decoder_mean_gradient + encoder_mean_gradient)
            encoder_variance.backward(
                decoder_variance_gradient + encoder_variance_gradient
            )
コード例 #6
0
ファイル: main.py プロジェクト: ChengyaoWang/myTorch
from tensor import Tensor
from optimizer import SGD
from layer import MSELoss, Linear, Tanh, Sigmoid
from model import Sequential

import numpy as np

#Toy example of Using Tensor Class
np.random.seed(0)
data = Tensor(np.array([[0, 0], [0, 1], [1, 0], [1, 1]]), requires_grad=True)
target = Tensor(np.array([[0], [1], [0], [1]]), requires_grad=True)
#Every element in w, is an Object of Tensor representing weight matrix
model = Sequential(
    Linear(2, 3),
    Tanh(),
    Linear(3, 3),
    Tanh(),
    Linear(3, 1),
)
optim = SGD(parameters=model.get_parameters(), lr=0.1)
criterion = MSELoss()
for i in range(10):
    pred = model(data)
    loss = criterion(pred, target)
    loss.backward(Tensor(np.ones_like(loss.data), is_grad=True))
    optim.step()
    print(loss.data)
print(
    "------------------------------------------------------------------------")
コード例 #7
0
ファイル: neuralnet.py プロジェクト: EmanueleBoa/PyNeuralNet
 def build_network(self, *d, **types):
     """
     Method to build a neural network structure
     """
     # check number of layers
     Nlayers = len(d)
     if Nlayers < 2:
         print(
             "ERROR: A neural network needs at least an input and an output layer!"
         )
         exit(1)
     if types.has_key("verbose"):
         self.verbose = types.get("verbose")
     # check if the user specified the scope of the network
     # if not set it to classification
     if types.has_key("scope"):
         self.scope = types.get("scope")
     else:
         self.scope = "classification"
     # check if the user specified the types of layers
     # if not set to default types
     if types.has_key("out_type"):
         self.out_type = types.get("out_type")
     else:
         if d[Nlayers - 1] == 1:
             self.out_type = "linear"
         else:
             self.out_type = "softmax"
     if types.has_key("hidden_type"):
         self.hidden_type = types.get("hidden_type")
     else:
         if Nlayers > 2:
             self.hidden_type = "tanh"
     # add layers to the neural network
     # add input layers
     self.layers.append(Layer(d[0]))
     # if present, add hidden layers
     if Nlayers > 2:
         if self.hidden_type == "tanh":
             for i in range(1, Nlayers - 1):
                 self.layers.append(Tanh(d[i], d[i - 1]))
                 self.layers[i].xavier_init_weights()
         elif self.hidden_type == "sigmoid":
             for i in range(1, Nlayers - 1):
                 self.layers.append(Sigmoid(d[i], d[i - 1]))
                 self.layers[i].xavier_init_weights()
         elif self.hidden_type == "linear":
             for i in range(1, Nlayers - 1):
                 self.layers.append(Linear(d[i], d[i - 1]))
                 self.layers[i].xavier_init_weights()
         elif self.hidden_type == "softmax":
             for i in range(1, Nlayers - 1):
                 self.layers.append(Softmax(d[i], d[i - 1]))
                 self.layers[i].xavier_init_weights()
         elif self.hidden_type == "softsign":
             for i in range(1, Nlayers - 1):
                 self.layers.append(SoftSign(d[i], d[i - 1]))
                 self.layers[i].xavier_init_weights()
         elif self.hidden_type == "relu":
             for i in range(1, Nlayers - 1):
                 self.layers.append(ReLU(d[i], d[i - 1]))
                 self.layers[i].xavier_init_weights()
         else:
             print("ERROR: no layer with " + str(self.hidden_type) +
                   " exist!")
             exit(1)
     # add output layer
     if self.out_type == "softmax":
         self.layers.append(Softmax(d[Nlayers - 1], d[Nlayers - 2]))
         self.layers[Nlayers - 1].xavier_init_weights()
     elif self.out_type == "sigmoid":
         self.layers.append(Sigmoid(d[Nlayers - 1], d[Nlayers - 2]))
         self.layers[Nlayers - 1].xavier_init_weights()
     elif self.out_type == "linear":
         self.layers.append(Linear(d[Nlayers - 1], d[Nlayers - 2]))
         self.layers[Nlayers - 1].xavier_init_weights()
     elif self.out_type == "tanh":
         self.layers.append(Tanh(d[Nlayers - 1], d[Nlayers - 2]))
         self.layers[Nlayers - 1].xavier_init_weights()
     elif self.out_type == "softsign":
         self.layers.append(SoftSign(d[Nlayers - 1], d[Nlayers - 2]))
         self.layers[Nlayers - 1].xavier_init_weights()
     elif self.out_type == "relu":
         self.layers.append(ReLU(d[Nlayers - 1], d[Nlayers - 2]))
         self.layers[Nlayers - 1].xavier_init_weights()
     else:
         print("ERROR: no layer with " + str(self.out_type) + " exist!")
         exit(1)
     #save number of layers
     self.Nlayers = Nlayers
     if self.verbose:
         self.print_network_structure()
コード例 #8
0
ファイル: neuralnet.py プロジェクト: EmanueleBoa/PyNeuralNet
 def add_layer(self, type, dim):
     """
     Method that adds to the network a layer
     of dimension dim and type type
     """
     if type == "input":
         if self.Nlayers == 0:
             self.layers.append(Layer(dim))
             self.Nlayers = len(self.layers)
         else:
             print("ERROR: the network already has an input layer!")
             exit(1)
     elif type == "linear":
         if self.Nlayers == 0:
             print("ERROR: the network needs an input layer first!")
             exit(1)
         else:
             self.layers.append(Linear(dim,
                                       self.layers[self.Nlayers - 1].n))
             self.Nlayers = len(self.layers)
             self.layers[self.Nlayers - 1].xavier_init_weights()
     elif type == "tanh":
         if self.Nlayers == 0:
             print("ERROR: the network needs an input layer first!")
             exit(1)
         else:
             self.layers.append(Tanh(dim, self.layers[self.Nlayers - 1].n))
             self.Nlayers = len(self.layers)
             self.layers[self.Nlayers - 1].xavier_init_weights()
     elif type == "relu":
         if self.Nlayers == 0:
             print("ERROR: the network needs an input layer first!")
             exit(1)
         else:
             self.layers.append(ReLU(dim, self.layers[self.Nlayers - 1].n))
             self.Nlayers = len(self.layers)
             self.layers[self.Nlayers - 1].xavier_init_weights()
     elif type == "softsign":
         if self.Nlayers == 0:
             print("ERROR: the network needs an input layer first!")
             exit(1)
         else:
             self.layers.append(
                 SoftSign(dim, self.layers[self.Nlayers - 1].n))
             self.Nlayers = len(self.layers)
             self.layers[self.Nlayers - 1].xavier_init_weights()
     elif type == "sigmoid":
         if self.Nlayers == 0:
             print("ERROR: the network needs an input layer first!")
             exit(1)
         else:
             self.layers.append(
                 Sigmoid(dim, self.layers[self.Nlayers - 1].n))
             self.Nlayers = len(self.layers)
             self.layers[self.Nlayers - 1].xavier_init_weights()
     elif type == "softmax":
         if self.Nlayers == 0:
             print("ERROR: the network needs an input layer first!")
             exit(1)
         else:
             self.layers.append(
                 Softmax(dim, self.layers[self.Nlayers - 1].n))
             self.Nlayers = len(self.layers)
             self.layers[self.Nlayers - 1].xavier_init_weights()
     else:
         print("ERROR: no such layer available!")
         exit(1)
コード例 #9
0
# now what to do with input
# can't train on 1 to 100 (actual set under consideration),
# so  we'll train on numbers bigger than 100
def binary_encode(x: int) -> List[int]:
    """
    10 digit binary enconding of x
    """
    return [x >> i & 1 for i in range(10)]


# train numbers bigger than 100
inputs = np.array([binary_encode(x) for x in range(101, 1024)])

targets = np.array([fizz_buzz_encode(x) for x in range(101, 1024)])

net = NeuralNet([
    Linear(input_size=10, output_size=50),
    Tanh(),
    Linear(input_size=50, output_size=4),
])

train(net, inputs, targets, num_epochs=5000)

for x in range(1, 101):
    predicted = net.forward(binary_encode(x))
    predicted_idx = np.argmax(predicted)
    actual_idx = np.argmax(fizz_buzz_encode(x))
    labels = [str(x), "fizz", "buzz", "fizzbuzz"]
    print(x, labels[predicted_idx], labels[actual_idx])
コード例 #10
0
ファイル: CNN.py プロジェクト: daisuke0728/daisuke-mech
RMSprop_list = [0.01, 0.99, 1e-8]  #p,eps
Adadelta_list = [0.95, 1e-6]  #p,eps
Adam_list = [0.001, 0.9, 0.999, 1e-8]  #lr,p1,p2,eps
RMSpropGraves_list = [0.0001, 0.95, 1e-4]  #lr,p,eps
SMORMS3_list = [0.001, 1e-8]  #lr,eps

#層構造を定義
model_list = [
    Convolution(1, 3, 3, 3, stride=1, pad=1),
    ReLU(),
    MaxPooling(2, 2, stride=1, pad=0),
    Convolution(3, 10, 3, 3, stride=1, pad=1),
    ReLU(),
    #MaxPooling(2,2,stride=2,pad=0),
    #Convolution(10,20,3,3,stride=1,pad=0),
    Linear(7290, 100, init_weight='HeNormal'),
    ReLU(),
    Linear(100, 10, init_weight='HeNormal'),
    Softmax()
]

#学習のハイパーパラメーター
n_epoch = 20
batchsize = 100 * 2**(int(aug_flag) * aug_num)

#########################################################################
#############関数・クラスの定義##########################################
#########################################################################


def learning(model, optimizer, n_epoch=20, batchsize=100):
コード例 #11
0
ファイル: models.py プロジェクト: foreversunx/GCN
 def __init__(self, nfeat, nhop, nclass, dropout):
     super(DCNN, self).__init__()
     self.dcnn = DiffusionConvolution(nhop, nfeat)
     self.fc = Linear(nhop*nfeat ,nclass)
     self.dropout = dropout
     self.nhop = nhop
コード例 #12
0
ファイル: xor.py プロジェクト: upstarter/python_algorithms
"""
XOR Cannot be learned with linear model
"""

import numpy as np

from train import train
from nn import NeuralNet
from layer import Linear, Tanh

inputs = np.array([[0, 0], [1, 0], [0, 1], [1, 1]])

targets = np.array([[1, 0], [0, 1], [0, 1], [1, 0]])

net = NeuralNet([Linear(input_size=2, output_size=2)])

train(net, inputs, targets)

for x, y in zip(inputs, targets):
    predicted = net.forward(x)
    print(x, predicted, y)

# now try hidden layer
net = NeuralNet([
    Linear(input_size=2, output_size=2),
    Tanh(),
    Linear(input_size=2, output_size=2)
])

train(net, inputs, targets)