コード例 #1
0
ファイル: linear.py プロジェクト: kevinsfr/class
 def forward(self, X, dump_chunks=-1):
     A = self.A
     b = self.b
     """ Layer 1: linear activation """
     layer1 = linear_forward(X, A, b)
     if dump_chunks > 0:
         dump_big_matrix(layer1, "lin_l1_mat", dump_chunks)
     return [layer1]
コード例 #2
0
ファイル: linear.py プロジェクト: YuguangTong/spark-training
 def forward(self, X, dump_chunks = -1):
   A = self.A
   b = self.b
   """ Layer 1: linear activation """
   layer1 = linear_forward(X, A, b)
   if dump_chunks > 0:
     dump_big_matrix(layer1, "lin_l1_mat", dump_chunks)
   return [layer1]
コード例 #3
0
ファイル: linear.py プロジェクト: YuguangTong/spark-training
  def backward(self, X, layers, Y, dump_chunks = -1):
    A = self.A
    b = self.b
    layer1 = layers[-1]

    """ softmax classification """
    L, dLdl1 = softmax_loss(layer1, Y)
    if dump_chunks > 0:
      dump_big_matrix(dLdl1, "lin_dLdl1_mat", dump_chunks)

    """ regularization: loss = 1/2 * lam * sum_nk(A_nk * A_nk) """
    L += 0.5 * self.lam * np.sum(A * A) 

    """ backpropagation for Layer 1 """
    dLdX, dLdA, dLdb = linear_backward(dLdl1, X, A)
    if dump_chunks > 0:
      dump_big_matrix(dLdX, "lin_dLdX_mat", dump_chunks)
      dump_big_matrix(dLdA, "lin_dLdA_mat", 1)
      dump_big_matrix(dLdb, "lin_dLdb_mat", 1)

    """ regularization gradient """
    dLdA = dLdA.reshape(A.shape)
    dLdA += self.lam * A

    """ tune the parameter """
    self.v = self.mu * self.v - self.rho * dLdA
    self.A += self.v
    self.b += -self.rho * dLdb

    return L
コード例 #4
0
ファイル: linear.py プロジェクト: kevinsfr/class
    def backward(self, X, layers, Y, dump_chunks=-1):
        A = self.A
        b = self.b
        layer1 = layers[-1]
        """ softmax classification """
        L, dLdl1 = softmax_loss(layer1, Y)
        if dump_chunks > 0:
            dump_big_matrix(dLdl1, "lin_dLdl1_mat", dump_chunks)
        """ regularization: loss = 1/2 * lam * sum_nk(A_nk * A_nk) """
        L += 0.5 * self.lam * np.sum(A * A)
        """ backpropagation for Layer 1 """
        dLdX, dLdA, dLdb = linear_backward(dLdl1, X, A)
        if dump_chunks > 0:
            dump_big_matrix(dLdX, "lin_dLdX_mat", dump_chunks)
            dump_big_matrix(dLdA, "lin_dLdA_mat", 1)
            dump_big_matrix(dLdb, "lin_dLdb_mat", 1)
        """ regularization gradient """
        dLdA = dLdA.reshape(A.shape)
        dLdA += self.lam * A
        """ tune the parameter """
        self.v = self.mu * self.v - self.rho * dLdA
        self.A += self.v
        self.b += -self.rho * dLdb

        return L
コード例 #5
0
    def forward(self, X, dump_chunks=-1):
        A1 = self.A1
        b1 = self.b1
        A3 = self.A3
        b3 = self.b3
        """
    Layer 1 : linear
    Layer 2 : ReLU
    Layer 3 : linear
    """
        layer1 = linear_forward(X, A1, b1)
        layer2 = ReLU_forward(layer1)
        layer3 = linear_forward(layer2, A3, b3)

        if dump_chunks > 0:
            dump_big_matrix(layer1, "nn_l1_mat", dump_chunks)
            dump_big_matrix(layer2, "nn_l2_mat", dump_chunks)
            dump_big_matrix(layer3, "nn_l3_mat", dump_chunks)

        return [layer1, layer2, layer3]
コード例 #6
0
ファイル: nn.py プロジェクト: bofie123/proj4_starter
  def forward(self, X, dump_chunks = -1):
    A1 = self.A1
    b1 = self.b1
    A3 = self.A3
    b3 = self.b3

    """
    Layer 1 : linear
    Layer 2 : ReLU
    Layer 3 : linear
    """
    layer1 = linear_forward(X, A1, b1)
    layer2 = ReLU_forward(layer1)
    layer3 = linear_forward(layer2, A3, b3)

    if dump_chunks > 0:
      dump_big_matrix(layer1, "nn_l1_mat", dump_chunks)
      dump_big_matrix(layer2, "nn_l2_mat", dump_chunks)
      dump_big_matrix(layer3, "nn_l3_mat", dump_chunks)

    return [layer1, layer2, layer3]
コード例 #7
0
    def backward(self, X, layers, Y, dump_chunks=-1):
        A1 = self.A1
        b1 = self.b1
        A3 = self.A3
        b3 = self.b3
        layer1, layer2, layer3 = layers
        """ softmax classification """
        L, dLdl3 = softmax_loss(layer3, Y)
        """ backpropagation for Layer 3 """
        dLdl2, dLdA3, dLdb3 = linear_backward(dLdl3, layer2, A3)
        """ backpropagation for Layer 2 """
        dLdl1 = ReLU_backward(dLdl2, layer1)
        """ backpropagation for Layer 1 """
        dLdX, dLdA1, dLdb1 = linear_backward(dLdl1, X, A1)
        """ regularization """
        L += 0.5 * self.lam * (np.sum(A1 * A1) + np.sum(A3 * A3))
        """ regularization gradient """
        dLdA3 = dLdA3.reshape(A3.shape)
        dLdA1 = dLdA1.reshape(A1.shape)
        dLdA3 += self.lam * A3
        dLdA1 += self.lam * A1
        """ tune the parameter """
        self.v1 = self.mu * self.v1 - self.rho * dLdA1
        self.v3 = self.mu * self.v3 - self.rho * dLdA3
        self.A1 += self.v1
        self.A3 += self.v3
        self.b1 += -self.rho * dLdb1
        self.b3 += -self.rho * dLdb3
        """ dump """
        if dump_chunks > 0:
            dump_big_matrix(dLdl3, "nn_dLdl3_mat", dump_chunks)
            dump_big_matrix(dLdl2, "nn_dLdl2_mat", dump_chunks)
            dump_big_matrix(dLdl1, "nn_dLdl1_mat", dump_chunks)
            dump_big_matrix(dLdX, "nn_dLdX_mat", dump_chunks)
            dump_big_matrix(dLdA3, "nn_dLdA3_mat", 1)
            dump_big_matrix(dLdb3, "nn_dLdb3_mat", 1)
            dump_big_matrix(dLdA1, "nn_dLdA1_mat", 1)
            dump_big_matrix(dLdb1, "nn_dLdb1_mat", 1)

        return L
コード例 #8
0
    def backward(self, X, layers, Y, dump_chunks=-1):
        A1 = self.A1
        b1 = self.b1
        S1 = self.S1
        P1 = self.P1

        F3 = self.F3
        S3 = self.S3

        A4 = self.A4
        b4 = self.b4
        S4 = self.S4
        P4 = self.P4

        F6 = self.F6
        S6 = self.S6

        A7 = self.A7
        b7 = self.b7
        S7 = self.S7
        P7 = self.P7

        F9 = self.F9
        S9 = self.S9

        A10 = self.A10
        b10 = self.b10

        (layer1, X_col1), layer2, (layer3, X_idx3), \
        (layer4, X_col4), layer5, (layer6, X_idx6), \
        (layer7, X_col7), layer8, (layer9, X_idx9), layer10 = layers

        s = time()
        L, dLdl10 = softmax_loss(layer10, Y)
        e = time()
        if self.verbose:
            print """ Softmax loss calc done : %.2f sec """ % (e - s)

        s = time()
        dLdl9, dLdA10, dLdb10 = linear_backward(dLdl10, layer9, A10)
        e = time()
        if self.verbose:
            print """ Layer10: FC (1 x 1 x 10) backward done: %.2f sec """ % (
                e - s)
        """ Pool (4 x 4 x 20) Backward """
        s = time()
        dLdl8 = max_pool_backward(dLdl9, layer8, X_idx9, F9, S9)
        e = time()
        if self.verbose:
            print """ Layer9: Pool (4 x 4 x 20) backward done: %.2f sec """ % (
                e - s)

        s = time()
        dLdl7 = ReLU_backward(dLdl8, layer7)
        e = time()
        if self.verbose:
            print """ Layer8: ReLU (8 x 8 x 20) backward done: %.2f sec """ % (
                e - s)

        s = time()
        dLdl6, dLdA7, dLdb7 = conv_backward(dLdl7, layer6, X_col7, A7, S7, P7)
        e = time()
        if self.verbose:
            print """ Layer7: Conv (8 x 8 x 20) backward done: %.2f sec """ % (
                e - s)

        s = time()
        dLdl5 = max_pool_backward(dLdl6, layer5, X_idx6, F6, S6)
        e = time()
        if self.verbose:
            print """ Layer6: Pool (8 x 8 x 20) backward done: %.2f sec """ % (
                e - s)

        s = time()
        dLdl4 = ReLU_backward(dLdl5, layer4)
        e = time()
        if self.verbose:
            print """ Layer5: ReLU (16 x 16 x 20) backward done: %.2f sec """ % (
                e - s)

        s = time()
        dLdl3, dLdA4, dLdb4 = conv_backward(dLdl4, layer3, X_col4, A4, S4, P4)
        e = time()
        if self.verbose:
            print """ Layer4: Conv (16 x 16 x 20) backward done: %.2f sec """ % (
                e - s)

        s = time()
        dLdl2 = max_pool_backward(dLdl3, layer2, X_idx3, F3, S3)
        e = time()
        if self.verbose:
            print """ Layer3: Pool (16 x 16 x 16) backward done: %.2f sec """ % (
                e - s)

        s = time()
        dLdl1 = ReLU_backward(dLdl2, layer1)
        e = time()
        if self.verbose:
            print """ Layer2: ReLU (32 x 32 x 16) backward done: %.2f sec """ % (
                e - s)

        s = time()
        dLdX, dLdA1, dLdb1 = conv_backward(dLdl1, X, X_col1, A1, S1, P1)
        e = time()
        if self.verbose:
            print """ Layer1: Conv (32 x 32 x 16) backward done: %.2f sec """ % (
                e - s)
        """ regularization """
        L += 0.5 * self.lam * np.sum(A1 * A1)
        L += 0.5 * self.lam * np.sum(A4 * A4)
        L += 0.5 * self.lam * np.sum(A7 * A7)
        L += 0.5 * self.lam * np.sum(A10 * A10)
        """ regularization gradient """
        dLdA10 = dLdA10.reshape(A10.shape)
        dLdA7 = dLdA7.reshape(A7.shape)
        dLdA4 = dLdA4.reshape(A4.shape)
        dLdA1 = dLdA1.reshape(A1.shape)
        dLdA10 += self.lam * A10
        dLdA7 += self.lam * A7
        dLdA4 += self.lam * A4
        dLdA1 += self.lam * A1
        """ tune the parameter """
        self.v1 = self.mu * self.v1 - self.rho * dLdA1
        self.v4 = self.mu * self.v4 - self.rho * dLdA4
        self.v7 = self.mu * self.v7 - self.rho * dLdA7
        self.v10 = self.mu * self.v10 - self.rho * dLdA10
        self.A1 += self.v1
        self.A4 += self.v4
        self.A7 += self.v7
        self.A10 += self.v10
        self.b1 += -self.rho * dLdb1
        self.b4 += -self.rho * dLdb4
        self.b7 += -self.rho * dLdb7
        self.b10 += -self.rho * dLdb10
        """ dump """
        if dump_chunks > 0:
            dump_big_matrix(dLdl10, "cnn_dLdl10_mat", dump_chunks)
            dump_big_matrix(dLdl9, "cnn_dLdl9_mat", dump_chunks)
            dump_big_matrix(dLdl8, "cnn_dLdl8_mat", dump_chunks)
            dump_big_matrix(dLdl7, "cnn_dLdl7_mat", dump_chunks)
            dump_big_matrix(dLdl6, "cnn_dLdl6_mat", dump_chunks)
            dump_big_matrix(dLdl5, "cnn_dLdl5_mat", dump_chunks)
            dump_big_matrix(dLdl4, "cnn_dLdl4_mat", dump_chunks)
            dump_big_matrix(dLdl3, "cnn_dLdl3_mat", dump_chunks)
            dump_big_matrix(dLdl2, "cnn_dLdl2_mat", dump_chunks)
            dump_big_matrix(dLdl1, "cnn_dLdl1_mat", dump_chunks)
            dump_big_matrix(dLdX, "cnn_dLdX_mat", dump_chunks)
            dump_big_matrix(dLdA10, "cnn_dLdA10_mat", 1)
            dump_big_matrix(dLdb10, "cnn_dLdb10_mat", 1)
            dump_big_matrix(dLdA7, "cnn_dLdA7_mat", 1)
            dump_big_matrix(dLdb7, "cnn_dLdb7_mat", 1)
            dump_big_matrix(dLdA4, "cnn_dLdA4_mat", 1)
            dump_big_matrix(dLdb4, "cnn_dLdb4_mat", 1)
            dump_big_matrix(dLdA1, "cnn_dLdA1_mat", 1)
            dump_big_matrix(dLdb1, "cnn_dLdb1_mat", 1)

        return L
コード例 #9
0
    def forward(self, X, dump_chunks=-1):
        A1 = self.A1
        b1 = self.b1
        S1 = self.S1
        P1 = self.P1

        F3 = self.F3
        S3 = self.S3

        A4 = self.A4
        b4 = self.b4
        S4 = self.S4
        P4 = self.P4

        F6 = self.F6
        S6 = self.S6

        A7 = self.A7
        b7 = self.b7
        S7 = self.S7
        P7 = self.P7

        F9 = self.F9
        S9 = self.S9

        A10 = self.A10
        b10 = self.b10

        s = time()
        layer1, X_col1 = conv_forward(X, A1, b1, S1, P1)
        e = time()
        if self.verbose:
            print """ Layer1: Conv (32 x 32 x 16) forward doen: %.2f sec """ % (
                e - s)

        s = time()
        layer2 = ReLU_forward(layer1)
        e = time()
        if self.verbose:
            print """ Layer2: ReLU (32 x 32 x 16) forward done: %.2f sec """ % (
                e - s)

        s = time()
        layer3, X_idx3 = max_pool_forward(layer2, F3, S3)
        e = time()
        if self.verbose:
            print """ Layer3: Pool (16 x 16 x 16) forward done: %.2f sec """ % (
                e - s)

        s = time()
        layer4, X_col4 = conv_forward(layer3, A4, b4, S4, P4)
        e = time()
        if self.verbose:
            print """ Layer4: Conv (16 x 16 x 20) forward done: %.2f sec """ % (
                e - s)

        s = time()
        layer5 = ReLU_forward(layer4)
        e = time()
        if self.verbose:
            print """ Layer5: ReLU (16 x 16 x 20) forward done: %.2f sec """ % (
                e - s)

        s = time()
        layer6, X_idx6 = max_pool_forward(layer5, F6, S6)
        e = time()
        if self.verbose:
            print """ Layer6: Pool (8 x 8 x 20) forward done: %.2f sec """ % (
                e - s)

        s = time()
        layer7, X_col7 = conv_forward(layer6, A7, b7, S7, P7)
        e = time()
        if self.verbose:
            print """ Layer7: Conv (8 x 8 x 20) forward: %.2f sec """ % (e - s)

        s = time()
        layer8 = ReLU_forward(layer7)
        e = time()
        if self.verbose:
            print """ Layer8: ReLU (8 x 8 x 20) forward done: %.2f sec """ % (
                e - s)

        s = time()
        layer9, X_idx9 = max_pool_forward(layer8, F9, S9)
        e = time()
        if self.verbose:
            print """ Layer9: Pool (4 x 4 x 20) forward done: %.2f sec """ % (
                e - s)

        s = time()
        layer10 = linear_forward(layer9, A10, b10)
        e = time()
        if self.verbose:
            print """ Layer10: FC (1 x 1 x 10) forward done: %.2f sec """ % (
                e - s)

        if dump_chunks > 0:
            dump_big_matrix(layer1, "cnn_l1_mat", dump_chunks)
            dump_big_matrix(layer2, "cnn_l2_mat", dump_chunks)
            dump_big_matrix(layer3, "cnn_l3_mat", dump_chunks)
            dump_big_matrix(layer4, "cnn_l4_mat", dump_chunks)
            dump_big_matrix(layer5, "cnn_l5_mat", dump_chunks)
            dump_big_matrix(layer6, "cnn_l6_mat", dump_chunks)
            dump_big_matrix(layer7, "cnn_l7_mat", dump_chunks)
            dump_big_matrix(layer8, "cnn_l8_mat", dump_chunks)
            dump_big_matrix(layer9, "cnn_l9_mat", dump_chunks)
            dump_big_matrix(layer10, "cnn_l10_mat", dump_chunks)

        return [(layer1, X_col1), layer2, (layer3, X_idx3), (layer4, X_col4),
                layer5, (layer6, X_idx6), (layer7, X_col7), layer8,
                (layer9, X_idx9), layer10]
コード例 #10
0
ファイル: nn.py プロジェクト: bofie123/proj4_starter
  def backward(self, X, layers, Y, dump_chunks = -1):
    A1 = self.A1
    b1 = self.b1
    A3 = self.A3
    b3 = self.b3
    layer1, layer2, layer3 = layers

    """ softmax classification """
    L, dLdl3 = softmax_loss(layer3, Y)

    """ backpropagation for Layer 3 """
    dLdl2, dLdA3, dLdb3 = linear_backward(dLdl3, layer2, A3)

    """ backpropagation for Layer 2 """
    dLdl1 = ReLU_backward(dLdl2, layer1)

    """ backpropagation for Layer 1 """
    dLdX, dLdA1, dLdb1 = linear_backward(dLdl1, X, A1)

    """ regularization """
    L += 0.5 * self.lam * (np.sum(A1*A1) + np.sum(A3*A3))

    """ regularization gradient """
    dLdA3 = dLdA3.reshape(A3.shape)
    dLdA1 = dLdA1.reshape(A1.shape)
    dLdA3 += self.lam * A3
    dLdA1 += self.lam * A1

    """ tune the parameter """
    self.v1 = self.mu * self.v1 - self.rho * dLdA1
    self.v3 = self.mu * self.v3 - self.rho * dLdA3
    self.A1 += self.v1
    self.A3 += self.v3
    self.b1 += - self.rho * dLdb1
    self.b3 += - self.rho * dLdb3

    """ dump """
    if dump_chunks > 0:
      dump_big_matrix(dLdl3, "nn_dLdl3_mat", dump_chunks)
      dump_big_matrix(dLdl2, "nn_dLdl2_mat", dump_chunks)
      dump_big_matrix(dLdl1, "nn_dLdl1_mat", dump_chunks)
      dump_big_matrix(dLdX, "nn_dLdX_mat", dump_chunks)
      dump_big_matrix(dLdA3, "nn_dLdA3_mat", 1)
      dump_big_matrix(dLdb3, "nn_dLdb3_mat", 1)
      dump_big_matrix(dLdA1, "nn_dLdA1_mat", 1)
      dump_big_matrix(dLdb1, "nn_dLdb1_mat", 1)

    return L
コード例 #11
0
ファイル: cnn.py プロジェクト: cs61c-spring2015/proj4_starter
  def backward(self, X, layers, Y, dump_chunks = -1):
    A1 = self.A1
    b1 = self.b1
    S1 = self.S1
    P1 = self.P1

    F3 = self.F3
    S3 = self.S3

    A4 = self.A4
    b4 = self.b4
    S4 = self.S4
    P4 = self.P4

    F6 = self.F6
    S6 = self.S6

    A7 = self.A7
    b7 = self.b7
    S7 = self.S7
    P7 = self.P7

    F9 = self.F9
    S9 = self.S9

    A10 = self.A10
    b10 = self.b10

    (layer1, X_col1), layer2, (layer3, X_idx3), \
    (layer4, X_col4), layer5, (layer6, X_idx6), \
    (layer7, X_col7), layer8, (layer9, X_idx9), layer10 = layers

    s = time()
    L, dLdl10 = softmax_loss(layer10, Y)
    e = time()
    if self.verbose:
      print """ Softmax loss calc done : %.2f sec """ % (e - s)

    s = time()
    dLdl9, dLdA10, dLdb10 = linear_backward(dLdl10, layer9 , A10)
    e = time()
    if self.verbose:
      print """ Layer10: FC (1 x 1 x 10) backward done: %.2f sec """ % (e - s)

    """ Pool (4 x 4 x 20) Backward """
    s = time()
    dLdl8 = max_pool_backward(dLdl9, layer8, X_idx9, F9, S9)
    e = time()
    if self.verbose:
      print """ Layer9: Pool (4 x 4 x 20) backward done: %.2f sec """ % (e - s)

    s = time()
    dLdl7 = ReLU_backward(dLdl8, layer7)
    e = time()
    if self.verbose:
      print """ Layer8: ReLU (8 x 8 x 20) backward done: %.2f sec """ % (e - s)

    s = time()
    dLdl6, dLdA7, dLdb7 = conv_backward(dLdl7, layer6, X_col7, A7, S7, P7)
    e = time()
    if self.verbose:
      print """ Layer7: Conv (8 x 8 x 20) backward done: %.2f sec """ % (e - s)

    s = time()
    dLdl5 = max_pool_backward(dLdl6, layer5, X_idx6, F6, S6)
    e = time()
    if self.verbose:
      print """ Layer6: Pool (8 x 8 x 20) backward done: %.2f sec """ % (e - s)

    s = time()
    dLdl4 = ReLU_backward(dLdl5, layer4)
    e = time()
    if self.verbose:
      print """ Layer5: ReLU (16 x 16 x 20) backward done: %.2f sec """ % (e - s)

    s = time()
    dLdl3, dLdA4, dLdb4 = conv_backward(dLdl4, layer3, X_col4, A4, S4, P4)
    e = time()
    if self.verbose:
      print """ Layer4: Conv (16 x 16 x 20) backward done: %.2f sec """ % (e - s)

    s = time()
    dLdl2 = max_pool_backward(dLdl3, layer2, X_idx3, F3, S3)
    e = time()
    if self.verbose:
      print """ Layer3: Pool (16 x 16 x 16) backward done: %.2f sec """ % (e - s)

    s = time()
    dLdl1 = ReLU_backward(dLdl2, layer1)
    e = time()
    if self.verbose:
      print """ Layer2: ReLU (32 x 32 x 16) backward done: %.2f sec """ % (e - s)

    s = time()
    dLdX, dLdA1, dLdb1 = conv_backward(dLdl1, X, X_col1, A1, S1, P1)
    e = time()
    if self.verbose:
      print """ Layer1: Conv (32 x 32 x 16) backward done: %.2f sec """ % (e - s)

    """ regularization """
    L += 0.5 * self.lam * np.sum(A1*A1)
    L += 0.5 * self.lam * np.sum(A4*A4)
    L += 0.5 * self.lam * np.sum(A7*A7)
    L += 0.5 * self.lam * np.sum(A10*A10)

    """ regularization gradient """
    dLdA10 = dLdA10.reshape(A10.shape)
    dLdA7 = dLdA7.reshape(A7.shape)
    dLdA4 = dLdA4.reshape(A4.shape)
    dLdA1 = dLdA1.reshape(A1.shape)
    dLdA10 += self.lam * A10
    dLdA7 += self.lam * A7
    dLdA4 += self.lam * A4
    dLdA1 += self.lam * A1

    """ tune the parameter """
    self.v1 = self.mu * self.v1 - self.rho * dLdA1
    self.v4 = self.mu * self.v4 - self.rho * dLdA4
    self.v7 = self.mu * self.v7 - self.rho * dLdA7
    self.v10 = self.mu * self.v10 - self.rho * dLdA10
    self.A1 += self.v1
    self.A4 += self.v4 
    self.A7 += self.v7
    self.A10 += self.v10
    self.b1 += -self.rho * dLdb1
    self.b4 += -self.rho * dLdb4
    self.b7 += -self.rho * dLdb7
    self.b10 += -self.rho * dLdb10

    """ dump """
    if dump_chunks > 0:
      dump_big_matrix(dLdl10, "cnn_dLdl10_mat", dump_chunks)
      dump_big_matrix(dLdl9, "cnn_dLdl9_mat", dump_chunks)
      dump_big_matrix(dLdl8, "cnn_dLdl8_mat", dump_chunks)
      dump_big_matrix(dLdl7, "cnn_dLdl7_mat", dump_chunks)
      dump_big_matrix(dLdl6, "cnn_dLdl6_mat", dump_chunks)
      dump_big_matrix(dLdl5, "cnn_dLdl5_mat", dump_chunks)
      dump_big_matrix(dLdl4, "cnn_dLdl4_mat", dump_chunks)
      dump_big_matrix(dLdl3, "cnn_dLdl3_mat", dump_chunks)
      dump_big_matrix(dLdl2, "cnn_dLdl2_mat", dump_chunks)
      dump_big_matrix(dLdl1, "cnn_dLdl1_mat", dump_chunks)
      dump_big_matrix(dLdX, "cnn_dLdX_mat", dump_chunks)
      dump_big_matrix(dLdA10, "cnn_dLdA10_mat", 1)
      dump_big_matrix(dLdb10, "cnn_dLdb10_mat", 1)
      dump_big_matrix(dLdA7, "cnn_dLdA7_mat", 1)
      dump_big_matrix(dLdb7, "cnn_dLdb7_mat", 1)
      dump_big_matrix(dLdA4, "cnn_dLdA4_mat", 1)
      dump_big_matrix(dLdb4, "cnn_dLdb4_mat", 1)
      dump_big_matrix(dLdA1, "cnn_dLdA1_mat", 1)
      dump_big_matrix(dLdb1, "cnn_dLdb1_mat", 1)

    return L
コード例 #12
0
ファイル: cnn.py プロジェクト: cs61c-spring2015/proj4_starter
  def forward(self, X, dump_chunks = -1):
    A1 = self.A1
    b1 = self.b1
    S1 = self.S1
    P1 = self.P1

    F3 = self.F3
    S3 = self.S3

    A4 = self.A4
    b4 = self.b4
    S4 = self.S4
    P4 = self.P4

    F6 = self.F6
    S6 = self.S6

    A7 = self.A7
    b7 = self.b7
    S7 = self.S7
    P7 = self.P7

    F9 = self.F9
    S9 = self.S9

    A10 = self.A10
    b10 = self.b10

    s = time()
    layer1, X_col1 = conv_forward(X, A1, b1, S1, P1)
    e = time()
    if self.verbose:
      print """ Layer1: Conv (32 x 32 x 16) forward doen: %.2f sec """ % (e - s)
 
    s = time()
    layer2 = ReLU_forward(layer1)
    e = time()
    if self.verbose:
      print """ Layer2: ReLU (32 x 32 x 16) forward done: %.2f sec """ % (e - s)

    s = time()
    layer3, X_idx3 = max_pool_forward(layer2, F3, S3)
    e = time()
    if self.verbose:
      print """ Layer3: Pool (16 x 16 x 16) forward done: %.2f sec """ % (e - s)

    s = time()
    layer4, X_col4 = conv_forward(layer3, A4, b4, S4, P4)
    e = time()
    if self.verbose:
      print """ Layer4: Conv (16 x 16 x 20) forward done: %.2f sec """ % (e - s)

    s = time()
    layer5  = ReLU_forward(layer4)
    e = time()
    if self.verbose:
      print """ Layer5: ReLU (16 x 16 x 20) forward done: %.2f sec """ % (e - s)

    s = time()
    layer6, X_idx6 = max_pool_forward(layer5, F6, S6)
    e = time()
    if self.verbose:
      print """ Layer6: Pool (8 x 8 x 20) forward done: %.2f sec """ % (e - s)

    s = time()
    layer7, X_col7 = conv_forward(layer6, A7, b7, S7, P7)
    e = time()
    if self.verbose:
      print """ Layer7: Conv (8 x 8 x 20) forward: %.2f sec """ % (e - s)

    s = time()
    layer8 = ReLU_forward(layer7)
    e = time()
    if self.verbose:
      print """ Layer8: ReLU (8 x 8 x 20) forward done: %.2f sec """ % (e - s)

    s = time()
    layer9, X_idx9 = max_pool_forward(layer8, F9, S9)
    e = time()
    if self.verbose:
      print """ Layer9: Pool (4 x 4 x 20) forward done: %.2f sec """ % (e - s)

    s = time()
    layer10 = linear_forward(layer9, A10, b10)
    e = time()
    if self.verbose:
      print """ Layer10: FC (1 x 1 x 10) forward done: %.2f sec """ % (e - s)

    if dump_chunks > 0:
      dump_big_matrix(layer1, "cnn_l1_mat", dump_chunks)
      dump_big_matrix(layer2, "cnn_l2_mat", dump_chunks)
      dump_big_matrix(layer3, "cnn_l3_mat", dump_chunks)
      dump_big_matrix(layer4, "cnn_l4_mat", dump_chunks)
      dump_big_matrix(layer5, "cnn_l5_mat", dump_chunks)
      dump_big_matrix(layer6, "cnn_l6_mat", dump_chunks)
      dump_big_matrix(layer7, "cnn_l7_mat", dump_chunks)
      dump_big_matrix(layer8, "cnn_l8_mat", dump_chunks)
      dump_big_matrix(layer9, "cnn_l9_mat", dump_chunks)
      dump_big_matrix(layer10, "cnn_l10_mat", dump_chunks)

    return [
      (layer1, X_col1), layer2, (layer3, X_idx3),
      (layer4, X_col4), layer5, (layer6, X_idx6),
      (layer7, X_col7), layer8, (layer9, X_idx9), layer10]