예제 #1
0
    def transform(self, data, test=False):
        #make sure that data has the right shape.
        if not type(data) == Variable:
            if len(data.shape) < 4:
                data = data[np.newaxis]
            if len(data.shape) != 4:
                raise TypeError("Invalid dimensions for image data. Dim = %s.                     Must be 4d array." % str(data.shape))
            if data.shape[1] != self.color_channels:
                if data.shape[-1] == self.color_channels:
                    data = data.transpose(0, 3, 1, 2)
                else:
                    raise TypeError("Invalid dimensions for image data. Dim = %s"
                                    % str(data.shape))
            data = Variable(data)
        else:
            if len(data.data.shape) < 4:
                data.data = data.data[np.newaxis]
            if len(data.data.shape) != 4:
                raise TypeError("Invalid dimensions for image data. Dim = %s.                     Must be 4d array." % str(data.data.shape))
            if data.data.shape[1] != self.color_channels:
                if data.data.shape[-1] == self.color_channels:
                    data.data = data.data.transpose(0, 3, 1, 2)
                else:
                    raise TypeError("Invalid dimensions for image data. Dim = %s"
                                    % str(data.shape))

        # Actual transformation.
        if self.flag_gpu:
            data.to_gpu()
        z = self._encode(data, test=test)[0]

        z.to_cpu()

        return z.data
예제 #2
0
파일: main.py 프로젝트: sweetrabh/testeeg
    def forward_eye_states(self, x_batch_curr, y_batch_curr, volatile):

        current_sample = Variable(x_batch_curr, volatile=volatile)

        y_batch_curr = np.asarray(y_batch_curr).reshape(32, -1)
        current_output = Variable(y_batch_curr, volatile=volatile)

        h1_current = F.sigmoid(self.model_to_use.x_h1(current_sample))

        h2_current = F.sigmoid(self.model_to_use.h1_h2(h1_current))

        h3_current = F.sigmoid(self.model_to_use.h2_h3(h2_current))

        h4_current = F.sigmoid(self.model_to_use.h3_h4(h3_current))

        h4 = h4_current
        y = self.model_to_use.h4_y(h4)

        y.data = y.data.reshape(32, -1)
        loss = F.sigmoid_cross_entropy(y, current_output)
        current_output.data = np.squeeze(current_output.data)

        accuracy = F.accuracy(y, current_output)

        return accuracy, loss, y
예제 #3
0
    def inverse_transform(self, data, test=False):
        if not type(data) == Variable:
            if len(data.shape) < 2:
                data = data[np.newaxis]
            if len(data.shape) != 2:
                raise TypeError("Invalid dimensions for latent data. Dim = %s.                     Must be a 2d array." % str(data.shape))
            data = Variable(data)

        else:
            if len(data.data.shape) < 2:
                data.data = data.data[np.newaxis]
            if len(data.data.shape) != 2:
                raise TypeError("Invalid dimensions for latent data. Dim = %s.                     Must be a 2d array." % str(data.data.shape))
        assert data.data.shape[-1] == self.latent_width,            "Latent shape %d != %d" % (data.data.shape[-1], self.latent_width)

        if self.flag_gpu:
            data.to_gpu()
        out = self._decode(data, test=test)

        out.to_cpu()

        if self.mode == 'linear':
            final = out.data
        else:
            final = out.data.transpose(0, 2, 3, 1)

        return final
예제 #4
0
파일: main.py 프로젝트: sweetrabh/testeeg
    def forward(self, x_batch_curr, y_batch_curr, volatile=False):

        current_sample = Variable(x_batch_curr, volatile=volatile)

        y_batch_curr = np.asarray(y_batch_curr).reshape(1, -1)
        current_output = Variable(y_batch_curr, volatile=volatile)

        h1_current = F.sigmoid(self.model_to_use.x_h1(current_sample))
        # h1_previous = F.sigmoid(self.model_to_use.x_h1(previous_sample))
        # h1_next = F.sigmoid(self.model_to_use.x_h1(next_sample))
        # h1_diff_previous = h1_current - h1_previous
        # h1_diff_next = h1_next - h1_current

        h2_current = F.sigmoid(self.model_to_use.h1_h2(h1_current))
        # h2_diff_n = F.sigmoid(self.model_to_use.h1_h2(h1_diff_next))
        # h2_diff_p = F.sigmoid(self.model_to_use.h1_h2(h1_diff_previous))
        # h2_diff_next = h2_diff_n - h2_current
        # h2_diff_previous = h2_current - h2_diff_p

        h3_current = F.sigmoid(self.model_to_use.h2_h3(h2_current))
        # h3_diff_p = F.sigmoid(self.model_to_use.h2_h3(h2_diff_previous))
        # h3_diff_n = F.sigmoid(self.model_to_use.h2_h3(h2_diff_next))
        # h3_diff_next = h3_diff_n - h3_current
        # h3_diff_previous = h3_current - h3_diff_p

        h4_current = F.sigmoid(self.model_to_use.h3_h4(h3_current))
        # h4_diff_previous = F.sigmoid(self.model_to_use.h3_h4(h3_diff_previous))
        # h4_diff_next = F.sigmoid(self.model_to_use.h3_h4(h3_diff_next))
        # h4_diff = h4_diff_next + h4_diff_previous
        # h4 = h4_current * h4_diff

        h4 = h4_current
        y = self.model_to_use.h4_y(h4)

        loss = F.sigmoid_cross_entropy(y, current_output)
        current_output.data = np.squeeze(current_output.data)
        y.data = y.data.reshape(-1, 1)
        accuracy = F.accuracy(y, current_output)

        return accuracy, loss, y
예제 #5
0
    # Forward pass
    y_pred = model(batch_x, W, b)

    # 損失関数 MSE(mean square error)
    loss = F.mean_squared_error(y_pred, batch_y)

    # Manually zero the gradients after updating weights
    # パラメータの勾配をゼロ化する.(重要)
    W.cleargrad()
    b.cleargrad()

    # Backward pass
    loss.backward()

    # Apply gradients
    learning_rate = 0.1
    W.data = W.data - learning_rate * W.grad
    b.data = b.data - learning_rate * b.grad

    # Stop criterion
    if loss.data < 1.e-3:
        break

# 計算結果の出力
print('Loss: {:>8.4f} after {:d} batches'.format(float(loss.data), batch_idx))
print('==> Learned function:\t' +
      'y = {:>8.4f} x + {:>8.4f}'.format(float(W.data), float(b.data)))
print('==> Actual function:\t' +
      'y = {:>8.4f} x + {:>8.4f}'.format(float(W_target), float(b_target)))
예제 #6
0
    test_acc = []

    # Learning loop
    for i in xrange(jump * n_epoch):
        x_batch = np.array([
            train_data[(jump * j + i) % whole_len]
            for j in six.moves.range(batchsize)
        ]).astype(np.float32)
        y_batch = np.array([
            train_arget[(jump * j + i + 1) % whole_len]
            for j in six.moves.range(batchsize)
        ]).astype(np.int32)

        state, loss = model(x_batch, y_batch, state)

        accum_loss.data = accum_loss.data.astype(np.float32)
        accum_loss += loss

        if i % 1000 == 0:
            print('epoch = {} \n\ttrain loss: {}'.format(i, accum_loss.data))

        if (i + 1) % bprop_len == 0:
            optimizer.zero_grads()
            accum_loss.backward()
            accum_loss.unchain_backward()  # truncate
            accum_loss = Variable(mod.zeros(()))
            optimizer.clip_grads(grad_clip)
            optimizer.update()

        epoch += 1
예제 #7
0
    train_loss = []
    train_acc = []
    test_loss = []
    test_acc = []

    # Learning loop
    for i in xrange(jump * n_epoch):   
        x_batch = np.array([train_data[(jump * j+i) % whole_len]
                        for j in six.moves.range(batchsize)]).astype(np.float32)
        y_batch = np.array([train_arget[(jump * j + i+1) % whole_len]
                        for j in six.moves.range(batchsize)]).astype(np.int32)

        state, loss = model(x_batch, y_batch, state)

        accum_loss.data =  accum_loss.data.astype(np.float32)
        accum_loss += loss

        if i % 1000 ==0:
            print('epoch = {} \n\ttrain loss: {}'.format(i,accum_loss.data))

        if (i + 1) % bprop_len == 0:
            optimizer.zero_grads()
            accum_loss.backward()
            accum_loss.unchain_backward()  # truncate
            accum_loss = Variable(mod.zeros(()))
            optimizer.clip_grads(grad_clip)
            optimizer.update()

        epoch += 1
            
    """

    xp = cuda.get_array_module(W.data)
    u = xp.random.normal(size=(1, W.shape[0])).astype(dtype="f")

    for _ in range(Ip):
        _v = F.normalize(F.matmul(u, W), eps=1e-12)
        _u = F.normalize(F.matmul(_v, F.transpose(W)), eps=1e-12)
    sigma = F.sum(F.linear(_u, F.transpose(W)) * _v)
    return sigma


W = 10 * np.random.normal(size=(2, 3)).astype(dtype="f")
W = Variable(W)
U, s, V = np.linalg.svd(W.data, full_matrices=True)
print(U.shape)
print(s.shape)
print(V.shape)
S = np.zeros(W.data.shape)
s_num = min(W.data.shape)
S[:s_num, :s_num] = np.identity(s.shape[0])
W_hat = np.dot(U, np.dot(S, V))
error = np.sum(np.square(W.data / max(s) - W_hat))
print(np.allclose(W.data / max(s), W_hat))
print(error)
print(W.data / max(s))
W.data = W_hat
print(W.data)
U, s, V = np.linalg.svd(W.data, full_matrices=True)
print(s)
            loss = F.softmax_cross_entropy(a_y, t)

            # 逆伝播
            w_1.zerograd()
            w_2.zerograd()
            b_1.zerograd()
            b_2.zerograd()

            loss.backward(retain_grad=True)
            grad_w_1 = w_1.grad
            grad_w_2 = w_2.grad
            grad_b_1 = b_1.grad
            grad_b_2 = b_2.grad

            w_1.data = w_1.data - learning_rate * grad_w_1
            w_2.data = w_2.data - learning_rate * grad_w_2
            b_1.data = b_1.data - learning_rate * grad_b_1
            b_2.data = b_2.data - learning_rate * grad_b_2

        time_finish = time.time()
        time_elapsed = time_finish - time_start
        print "time_elapsed:", time_elapsed

        # 誤差
        # E(K×K)を出す0.5×(y-t)×(y-t).T次元数は,{0.5×(1×K)(K×1)}
        # E = sum(t×log(y)(1×K))
        # 訓練データセットの交差エントロピー誤差と正解率を表示する
        train_error, train_accuracy = error_and_accuracy(
            w_1, w_2, b_1, b_2, x_train, t_train)
        print "[train] Error:", train_error
            loss = F.softmax_cross_entropy(a_y, t)

            # 逆伝播
            w_1.zerograd()
            w_2.zerograd()
            b_1.zerograd()
            b_2.zerograd()

            loss.backward(retain_grad=True)
            grad_w_1 = w_1.grad
            grad_w_2 = w_2.grad
            grad_b_1 = b_1.grad
            grad_b_2 = b_2.grad

            w_1.data = w_1.data - learning_rate * grad_w_1
            w_2.data = w_2.data - learning_rate * grad_w_2
            b_1.data = b_1.data - learning_rate * grad_b_1
            b_2.data = b_2.data - learning_rate * grad_b_2

        time_finish = time.time()
        time_elapsed = time_finish - time_start
        print "time_elapsed:", time_elapsed

        # 誤差
        # E(K×K)を出す0.5×(y-t)×(y-t).T次元数は,{0.5×(1×K)(K×1)}
        # E = sum(t×log(y)(1×K))
        # 訓練データセットの交差エントロピー誤差と正解率を表示する
        train_error, train_accuracy = error_and_accuracy(w_1, w_2, b_1, b_2, 
                                                         x_train, t_train)
        print "[train] Error:", train_error