Exemple #1
0
    def gradient(self, network, x_vec, y_vec):
        # ランダムにバッチを取得
        batch_mask = np.random.choice(len(x_vec), self.batch_size)
        # ミニバッチに対応する教師訓練ラベルデータを取得
        x_batch = x_vec[batch_mask]
        # ミニバッチに対応する訓練正解ラベルデータを取得する
        y_batch = y_vec[batch_mask]
        #         y_batch = y_batch[:, np.newaxis]

        z1, z2, y = self.forward(network, x_batch)
        grad = self.backward(x_batch, y_batch, z1, z2, y)

        # optimizerの設定 モメンタムを利用
        opt = optimizer.Adam(self.learning_rate)
        opt.update(network, grad)

        return y_batch, y
                            'filter_num': 10,
                            'filter_size': 7,
                            'pad': 1,
                            'stride': 1
                        },
                        conv_param_2={
                            'filter_num': 20,
                            'filter_size': 3,
                            'pad': 1,
                            'stride': 1
                        },
                        hidden_size=100,
                        output_size=10,
                        weight_init_std=0.01)

optimizer = optimizer.Adam()

# 時間がかかるため100に設定
iters_num = 100
# iters_num = 1000
train_size = x_train.shape[0]
batch_size = 100

train_loss_list = []
accuracies_train = []
accuracies_test = []

plot_interval = 10

for i in range(iters_num):
    batch_mask = np.random.choice(train_size, batch_size)
Exemple #3
0
		correct += check
	return correct/len(data)


def run(model, train_set, vali_set, test_set):
	for epoch in range(1, 300):
		train_loss = train(model, train_set)
		vali_loss = validation(model, vali_set)
		accuracy = test(model, test_set)

		print("epoch:", epoch, "\ttrain_loss:", train_loss, "\tvali_loss:", vali_loss, "\taccuracy:", accuracy)


lr = 0.01

model = net.model(optimizer.Adam(lr=lr)) # 30 66
#model = net.model(optimizer.GradientDescent(lr=lr))  #30번에 32퍼 학,검,테 데이터셋 128개일때 

model.add(nn.conv2d(filters=32, kernel_size=[3,3], strides=[1,1], w_init=init.he))
model.add(nn.relu())
model.add(nn.maxpool2d(kernel_size=[2,2], strides=[2,2]))
model.add(nn.dropout(0.6))

model.add(nn.conv2d(filters=64, kernel_size=[3,3], strides=[1,1], w_init=init.he))
model.add(nn.relu())
model.add(nn.maxpool2d(kernel_size=[2,2], strides=[2,2]))
model.add(nn.dropout(0.6))

model.add(nn.conv2d(filters=128, kernel_size=[3,3], strides=[1,1], w_init=init.he))
model.add(nn.relu())
model.add(nn.maxpool2d(kernel_size=[2,2], strides=[2,2]))
Exemple #4
0
                    },
                    hidden_size_1=120,
                    hidden_size_2=84,
                    output_size=10,
                    weight_init_std=0.01)
    # for layer in network.layers.values():
    #     print(layer)
    # print(network.lossLayer)
    # print('****** Print structure without values: OK ******')

    train_x_batch, train_y_batch = get_one_batch(train_x,
                                                 train_y,
                                                 batch_size=10)
    show_structure(network, train_x_batch, train_y_batch)

    op = optimizer.Adam(lr=0.001)
    epoch = 100
    for i in range(5000):
        train_x_batch, train_y_batch = get_one_batch(train_x,
                                                     train_y,
                                                     batch_size=30)
        grads = network.gradient(train_x_batch, train_y_batch)
        try:
            op.update(network.params, grads)
        except ZeroDivisionError as e:
            print('Handling run-time error:', e)

        if i % epoch == 0:
            # calculate accuracy
            train_acc = network.accuracy(train_x_batch, train_y_batch)
            train_acc_list.append(train_acc)