コード例 #1
0
ファイル: __init__.py プロジェクト: nel215/dogs-vs-cats
        def train(X_train, X_test, y_train, y_test):
            xp = self.xp

            model = self._create_model()

            optimizer = SGD(lr=0.001)
            optimizer.setup(model)

            if self.cv:
                y_test = xp.array(y_test).reshape((-1, 1))
            for epoch in range(n_epoch):
                print('epoch:', epoch)
                chainer.using_config('train', True)
                self._train_once(model, optimizer, X_train, y_train)

                with chainer.no_backprop_mode():
                    chainer.using_config('train', False)
                    pred = self._predict(model, X_test)
                    if self.cv:
                        loss = F.sigmoid_cross_entropy(pred, y_test)
                        print("test loss:", loss.data)

            if self.gpu is not None:
                pred = chainer.cuda.to_cpu(pred)
            return pred
コード例 #2
0
def setup_optimizer(cfg):
    if cfg.solver.optimizer == 'SGD':
        optimizer = SGD(cfg.optimizer.base_lr)
    elif cfg.solver.optimizer == 'MomentumSGD':
        optimizer = MomentumSGD(cfg.solver.base_lr, cfg.solver.momentum)
    else:
        raise ValueError('Not support `optimizer`: {}.'.format(
            cfg.solver.optimizer))
    return optimizer
コード例 #3
0
    classes = np.unique(t_train)  # 定義されたクラスラベル
    num_classes = len(classes)  # クラス数
    dim_features = x_train.shape[-1]  # xの次元

    # 超パラメータの定義
    learning_rate = 0.5  # learning_rate(学習率)を定義する
    max_iteration = 100      # 学習させる回数
    batch_size = 200       # ミニバッチ1つあたりのサンプル数
    dim_hidden = 200         # 隠れ層の次元数を定義する

    linear_1 = F.Linear(dim_features, dim_hidden)
    linear_2 = F.Linear(dim_hidden, num_classes)
    model = FunctionSet(linear_1=linear_1,
                        linear_2=linear_2)

    optimizer = SGD(learning_rate)
    optimizer.setup(model)

    loss_history = []
    train_accuracy_history = []
    loss_valid_history = []
    valid_accuracy_history = []

    valid_accuracy_best = 0
    valid_loss_best = 10
    num_batches = num_train / batch_size  # ミニバッチの個数
    num_valid_batches = num_valid / batch_size

    # 学習させるループ
    for epoch in range(max_iteration):
        print "epoch:", epoch
コード例 #4
0
        y = self.h2y(h)
        return y


max_epoch = 100
hidden_size = 512  #100
bptt_length = 30
batch_size = 100
lr = 0.05  #1e-4

indices, char_to_id, id_to_char = load_shakespear()
iterator = RnnIterator(indices, batch_size)
vocab_size = len(char_to_id)
rnn = SimpleRNN(vocab_size, hidden_size, vocab_size)
model = L.Classifier(rnn)
optimizer = SGD(lr=lr)
optimizer.setup(model)


def generate_sample(n=30, init_char=' '):
    rnn.reset_state()

    s = ''
    x = np.array([char_to_id[init_char]])
    for i in range(n):
        y = rnn(x)
        m = y.data.argmax()
        c = id_to_char[m]
        s += c
        x = np.array([m])
コード例 #5
0
ファイル: test_sgd.py プロジェクト: nihohi0428/chainer
 def setUp(self):
     self.optimizer = SGD(0.1)
     self.model = LinearModel(self.optimizer)