Beispiel #1
0
    def forward_pass(self):
        # sample_size, num_samples = self.X.shape

        X_layer_out = np.zeros((self._sample_size, self._num_samples, self._num_layers + 1))
        X_layer_out[:, :, 0] = self.X  # first Xis is the input layer

        for k in range(self._num_layers):
            [W, b] = self.get_layer_weights(k, self._sample_size)

            prev_x = X_layer_out[:, :, k]

            X_layer_out[:, :, k + 1] = th.tanh(X_layer_out[:, :, k], W, b)

        loss_weights_idx = self._num_layers * ((self._sample_size ** 2) + self._sample_size)
        b = self.Theta[loss_weights_idx: loss_weights_idx + self._num_labels]
        W = self.Theta[
            loss_weights_idx + self._num_labels: loss_weights_idx + self._num_labels + self._sample_size * self._num_labels]
        W = W.reshape(self._sample_size, self._num_labels)

        # Compute loss and probabilities
        all_prob = sm.softmax(X_layer_out[:, :, self._num_layers], W, b)
        relevant_prob = self.C * np.log(all_prob + self.eps)
        loss = - sum(relevant_prob[:])

        return all_prob, loss, X_layer_out
Beispiel #2
0
    def calculate_loss(self,x, y):

        ## Check if length of input and output is same or not
        assert len(y) == len(x)
        
        output = Softmax()
        layers = forward_propogation(x)

        ## Initialize loss with 0
        loss =0

        ## Iterating over layers 
        for i, layer in enumerate(layers):
            loss += output.loss(layer.mulya, y[i])

        return loss/float(len(y))
Beispiel #3
0
    def backward_pass(self, X_layer_out):
        # sample_size, num_samples = self.X.shape
        # num_labels, _ = self.C.shape

        # Extract loss weights from Theta
        loss_weights_idx = self._num_layers * ((self._sample_size ** 2) + self._sample_size)
        loss_weights_end_loc = loss_weights_idx + self._num_layers + self._sample_size * self._num_labels

        b_loss = self.Theta[loss_weights_idx: loss_weights_idx + self._num_labels]
        W_loss = self.Theta[
                 loss_weights_idx + self._num_labels: loss_weights_idx + self._num_labels + self._sample_size * self._num_labels]
        W_loss = W_loss.reshape(self._sample_size, self._num_labels)

        grad_theta = np.zeros(self.Theta.shape)

        grad_w_loss = self.grad_last_layer(X_layer_out[:, :, self._num_layers], W_loss, b_loss)
        grad_theta[loss_weights_idx: loss_weights_end_loc] = grad_w_loss

        grad_x_loss = sm.grad_x(X_layer_out[:, :, self._num_layers], W_loss, b_loss, self.C)

        for k in range(self._num_layers - 1, -1, -1):
            W, b = self.get_layer_weights(k, self._sample_size)
            grad_x_loss = W * grad_x_loss

            curr_x = X_layer_out[:, :, k]

            grad_w_layer = th.grad_tanh(curr_x, W, b)

            vl = grad_x_loss * grad_w_layer
            der_ce_b = np.mean(vl, axis=1)
            der_ce_w = vl * np.transpose(curr_x)
            grad_theta[(k - 1) * self._theta_layer_size: k * self._theta_layer_size] = np.hstack(
                (der_ce_b, der_ce_w.flatten()))

        return grad_theta
def MultiClass(W1, W2, X, D):
    alpha = 0.9

    N = 5
    for k in range(N):
        x = np.reshape(X[:, :, k], (25, 1))
        d = D[k, :].T

        v1 = np.matmul(W1, x)
        y1 = Sigmoid(v1)
        v = np.matmul(W2, y1)
        y = Softmax(v)

        e = d - y
        delta = e

        e1 = np.matmul(W2.T, delta)
        delta1 = y1 * (1 - y1) * e1

        dW1 = alpha * delta1 * x.T
        W1 = W1 + dW1

        dW2 = alpha * delta * y1.T
        W2 = W2 + dW2

    return W1, W2
Beispiel #5
0
def RealMultiClass():
    W1, W2 = TestMultiClass()

    X = np.zeros((5, 5, 5))

    X[:, :, 0] = [[0, 0, 1, 1, 0], [0, 0, 1, 1, 0], [0, 1, 0, 1, 0],
                  [0, 0, 0, 1, 0], [0, 1, 1, 1, 0]]

    X[:, :, 1] = [[1, 1, 1, 1, 0], [0, 0, 0, 0, 1], [0, 1, 1, 1, 0],
                  [1, 0, 0, 0, 1], [1, 1, 1, 1, 1]]

    X[:, :, 2] = [[1, 1, 1, 1, 0], [0, 0, 0, 0, 1], [0, 1, 1, 1, 0],
                  [1, 0, 0, 0, 1], [1, 1, 1, 1, 0]]

    X[:, :, 3] = [[0, 1, 1, 1, 0], [0, 1, 0, 0, 0], [0, 1, 1, 1, 0],
                  [0, 0, 0, 1, 0], [0, 1, 1, 1, 0]]

    X[:, :, 4] = [[0, 1, 1, 1, 1], [0, 1, 0, 0, 0], [0, 1, 1, 1, 0],
                  [0, 0, 0, 1, 0], [1, 1, 1, 1, 0]]

    N = 5
    for k in range(N):
        x = np.reshape(X[:, :, k], (25, 1))
        v1 = np.matmul(W1, x)
        y1 = Sigmoid(v1)
        v = np.matmul(W2, y1)
        y = Softmax(v)

        print("N = {}: ".format(k + 1))
        print(y)
Beispiel #6
0
    def do_POST( self ):
        try:
            global path
            os.chdir(path)
            content_len = int(self.headers['content-length'])
            post_body = self.rfile.read(content_len)
            self.send_response( 200, message = None )
            self.send_header( 'Content-type', 'text/html' )
            self.end_headers()
            if post_body[0:5] == b'data=':
                j_data = urllib.parse.unquote(str(post_body[5:]))
                j_data = j_data[2:len(j_data)-1]

                # Method 1
                import Softmax
                import CNN
                data = json.loads(j_data)
                rate, code = Softmax.predic(data)
                rate_CNN, code_CNN = CNN.predic(data)
                out_text = '["%f","%f","%f","%s","%s","%s","%f","%f","%f","%s","%s","%s"]' % ( rate_CNN[0],rate_CNN[1],rate_CNN[2],code_CNN[0],code_CNN[1],code_CNN[2],rate[0],rate[1],rate[2],code[0],code[1],code[2])
                self.wfile.write( out_text.encode( encoding = 'utf_8', errors = 'strict' ) )
            else:
                res = 'error'
                self.wfile.write( res.encode( encoding = 'utf_8', errors = 'strict' ) )
        except IOError:
            self.send_error( 404, message = None )
Beispiel #7
0
    def grad_last_layer(self, x_last_layer, W, b):
        o = sm.softmax(x_last_layer, W, b)

        der_c_e = np.transpose(self.C) - np.transpose(o)
        der_b = np.mean(der_c_e, axis=1)
        der_w = (x_last_layer * der_c_e) / self._num_samples

        return np.concatenate(der_b, der_w.flatten())
def DeepDropout(W1, W2, W3, W4, X, D):
    alpha = 0.01

    N = 5
    for k in range(N):
        x = np.reshape(X[:, :, k], (25, 1))

        v1 = np.matmul(W1, x)
        y1 = Sigmoid(v1)
        y1 = y1 * Dropout(y1, 0.2)

        v2 = np.matmul(W2, y1)
        y2 = Sigmoid(v2)
        y2 = y2 * Dropout(y2, 0.2)

        v3 = np.matmul(W3, y2)
        y3 = Sigmoid(v3)
        y3 = y3 * Dropout(y3, 0.2)

        v = np.matmul(W4, y3)
        y = Softmax(v)

        d = D[k, :].T
        e = d - y
        delta = e

        e3 = np.matmul(W4.T, delta)
        delta3 = y3 * (1 - y3) * e3

        e2 = np.matmul(W3.T, delta3)
        delta2 = y2 * (1 - y2) * e2

        e1 = np.matmul(W2.T, delta2)
        delta1 = y1 * (1 - y1) * e1

        dW4 = alpha * delta * y3.T
        W4 = W4 + dW4

        dW3 = alpha * delta3 * y2.T
        W3 = W3 + dW3

        dW2 = alpha * delta2 * y1.T
        W2 = W2 + dW2

        dW1 = alpha * delta1 * x.T
        W1 = W1 + dW1

    return W1, W2, W3, W4
def TestDeepDropout():
    X = np.zeros((5, 5, 5))

    X[:, :, 0] = [[0, 1, 1, 0, 0], [0, 0, 1, 0, 0], [0, 0, 1, 0, 0],
                  [0, 0, 1, 0, 0], [0, 1, 1, 1, 0]]

    X[:, :, 1] = [[1, 1, 1, 1, 0], [0, 0, 0, 0, 1], [0, 1, 1, 1, 0],
                  [1, 0, 0, 0, 0], [1, 1, 1, 1, 1]]

    X[:, :, 2] = [[1, 1, 1, 1, 0], [0, 0, 0, 0, 1], [0, 1, 1, 1, 0],
                  [0, 0, 0, 0, 1], [1, 1, 1, 1, 0]]

    X[:, :, 3] = [[0, 0, 0, 1, 0], [0, 0, 1, 1, 0], [0, 1, 0, 1, 0],
                  [1, 1, 1, 1, 1], [0, 0, 0, 1, 0]]

    X[:, :, 4] = [[1, 1, 1, 1, 1], [1, 0, 0, 0, 0], [1, 1, 1, 1, 0],
                  [0, 0, 0, 0, 1], [1, 1, 1, 1, 0]]

    D = np.array([[[1, 0, 0, 0, 0]], [[0, 1, 0, 0, 0]], [[0, 0, 1, 0, 0]],
                  [[0, 0, 0, 1, 0]], [[0, 0, 0, 0, 1]]])

    W1 = 2 * np.random.random((20, 25)) - 1
    W2 = 2 * np.random.random((20, 20)) - 1
    W3 = 2 * np.random.random((20, 20)) - 1
    W4 = 2 * np.random.random((5, 20)) - 1

    for _epoch in range(20000):
        W1, W2, W3, W4 = DeepDropout(W1, W2, W3, W4, X, D)

    N = 5
    for k in range(N):
        x = np.reshape(X[:, :, k], (25, 1))

        v1 = np.matmul(W1, x)
        y1 = Sigmoid(v1)

        v2 = np.matmul(W2, y1)
        y2 = Sigmoid(v2)

        v3 = np.matmul(W3, y2)
        y3 = Sigmoid(v3)

        v = np.matmul(W4, y3)
        y = Softmax(v)

        print("Y = ", k + 1, ": ")
        print(y)
Beispiel #10
0
def DeepReLU(W1, W2, W3, W4, X, D):
    alpha = 0.01

    N = 5
    for k in range(N):
        x = np.reshape(X[:, :, k], (25, 1))

        v1 = np.matmul(W1, x)
        y1 = ReLU(v1)

        v2 = np.matmul(W2, y1)
        y2 = ReLU(v2)

        v3 = np.matmul(W3, y2)
        y3 = ReLU(v3)

        v = np.matmul(W4, y3)
        y = Softmax(v)

        d = D[k, :].T
        e = d - y
        delta = e

        e3 = np.matmul(W4.T, delta)
        delta3 = (v3 > 0) * e3

        e2 = np.matmul(W3.T, delta3)
        delta2 = (v2 > 0) * e2

        e1 = np.matmul(W2.T, delta2)
        delta1 = (v1 > 0) * e1

        dW4 = alpha * delta * y3.T
        W4 = W4 + dW4

        dW3 = alpha * delta3 * y2.T
        W3 = W3 + dW3

        dW2 = alpha * delta2 * y1.T
        W2 = W2 + dW2

        dW1 = alpha * delta1 * x.T
        W1 = W1 + dW1

    return W1, W2, W3, W4
Beispiel #11
0
def TestMnistConv():
    # Learn
    #
    Images, Labels = LoadMnistData('MNIST\\t10k-images-idx3-ubyte.gz',
                                   'MNIST\\t10k-labels-idx1-ubyte.gz')
    Images = np.divide(Images, 255)

    W1 = 1e-2 * np.random.randn(9, 9, 20)
    W5 = np.random.uniform(-1, 1,
                           (100, 2000)) * np.sqrt(6) / np.sqrt(360 + 2000)
    Wo = np.random.uniform(-1, 1, (10, 100)) * np.sqrt(6) / np.sqrt(10 + 100)

    X = Images[0:8000, :, :]
    D = Labels[0:8000]

    for _epoch in range(3):
        print(_epoch)
        W1, W5, Wo = MnistConv(W1, W5, Wo, X, D)

    # Test
    #
    X = Images[8000:10000, :, :]
    D = Labels[8000:10000]

    acc = 0
    N = len(D)
    for k in range(N):
        x = X[k, :, :]

        y1 = Conv(x, W1)
        y2 = ReLU(y1)
        y3 = Pool(y2)
        y4 = np.reshape(y3, (-1, 1))
        v5 = np.matmul(W5, y4)
        y5 = ReLU(v5)
        v = np.matmul(Wo, y5)
        y = Softmax(v)

        i = np.argmax(y)
        if i == D[k][0]:
            acc = acc + 1

    acc = acc / N
    print("Accuracy is : ", acc)
def TestMultiClass():
    X = np.zeros((5, 5, 5))

    X[:, :, 0] = [[0, 1, 1, 0, 0], [0, 0, 1, 0, 0], [0, 0, 1, 0, 0],
                  [0, 0, 1, 0, 0], [0, 1, 1, 1, 0]]

    X[:, :, 1] = [[1, 1, 1, 1, 0], [0, 0, 0, 0, 1], [0, 1, 1, 1, 0],
                  [1, 0, 0, 0, 0], [1, 1, 1, 1, 1]]

    X[:, :, 2] = [[1, 1, 1, 1, 0], [0, 0, 0, 0, 1], [0, 1, 1, 1, 0],
                  [0, 0, 0, 0, 1], [1, 1, 1, 1, 0]]

    X[:, :, 3] = [[0, 0, 0, 1, 0], [0, 0, 1, 1, 0], [0, 1, 0, 1, 0],
                  [1, 1, 1, 1, 1], [0, 0, 0, 1, 0]]

    X[:, :, 4] = [[1, 1, 1, 1, 1], [1, 0, 0, 0, 0], [1, 1, 1, 1, 0],
                  [0, 0, 0, 0, 1], [1, 1, 1, 1, 0]]

    D = np.array([[[1, 0, 0, 0, 0]], [[0, 1, 0, 0, 0]], [[0, 0, 1, 0, 0]],
                  [[0, 0, 0, 1, 0]], [[0, 0, 0, 0, 1]]])

    W1 = 2 * np.random.random((50, 25)) - 1
    W2 = 2 * np.random.random((5, 50)) - 1

    for _epoch in range(10000):
        W1, W2 = MultiClass(W1, W2, X, D)

    N = 5
    for k in range(N):
        x = np.reshape(X[:, :, k], (25, 1))
        v1 = np.matmul(W1, x)
        y1 = Sigmoid(v1)
        v = np.matmul(W2, y1)
        y = Softmax(v)

        print("Y = {}: ".format(k + 1))
        print(y)

    return W1, W2
Beispiel #13
0
def CNN(W1, W5, Wo, x_train, ans_train):

    #This will cause the numberOfBatches to update weights after every batch
    batchSize = 100
    #batchSize = 50
    totalToTest = x_train.shape[0]
    numberOfBatches = int(totalToTest / batchSize)
    learningRate = .01  #alpha
    beta = .90  #momentum

    batchList2 = np.arange(0, totalToTest, batchSize)

    mm1 = np.zeros_like(W1)
    mm5 = np.zeros_like(W5)
    mm0 = np.zeros_like(Wo)

    print('Number of Batches: ', numberOfBatches)

    for batch in range(len(batchList2)):
        dW1 = np.zeros_like(W1)
        dW5 = np.zeros_like(W5)
        dWo = np.zeros_like(Wo)

        begin = batchList2[batch]
        print('Batch number: ', batch)
        for k in range(begin, begin + batchSize):

            image = x_train[k, :, :]  #input 28x28
            ConvOut1 = Convv2(
                image,
                W1)  #in: 10000x28x28, 9x9x20 out: 20x20x20// input * weights
            ReLuOut2 = ReLU(ConvOut1)  # in: 20x20x20 out: 20x20x20
            PoolOut3 = Pool(ReLuOut2)  # in: 20x20x20 out: 10x10x20
            FlattenPool4 = np.reshape(
                PoolOut3,
                (-1, 1))  # in: 10x10x20 out: 2000x1 (10*10*20=2000) "Flatten"
            Dense = np.matmul(
                W5, FlattenPool4)  # in: 100x2000, 2000x1 out: 100x1 "Dense"
            ReLuOut5 = ReLU(Dense)  # in: 100x1 out: 100x1
            x = np.matmul(Wo, ReLuOut5)  # in: 100x1, 10x100 out: 10x1
            ans = Softmax(x)

            # one-hot encoding
            output = np.zeros((10, 1))
            output[ans_train[k]][0] = 1

            #####__Back-Prop__#######

            # calcs error by subtracting 10x1 ans array (ans_train) by
            # the 10x1 output array that hold the guesses the network made
            e = output - ans
            delta = e
            e5 = np.matmul(
                Wo.T, delta)  # the goal is to get back to the dims of ReLuOut5
            # in: Wo 10x100 transposed to 100x10,
            # delta 10x1
            # out: 100x1 =100x10 * 10x1 (ReLuOut5 = Wo*T * y)

            delta5 = (ReLuOut5 > 0) * e5  # Turns value "on" or "off"
            # (ReLuOut5 > 0) = if value of pixel (x,y) is > 0 then that
            # value will be represented as a 1 else 0.
            # ReLuOut5 is a vector of all nodes that were active upon
            # the outputs "decision"
            # e5 represents the amount of error
            # ReLuOut5 and e5 are multiplyed to come up with the
            # adjustments to be made for the next layer
            e4 = np.matmul(W5.T, delta5)

            e3 = np.reshape(
                e4, PoolOut3.shape
            )  # undoes the reshape made after the initial pooling layer

            e2 = np.zeros_like(
                ReLuOut2)  # shape of what came out of our Convv layer
            W3 = np.ones_like(ReLuOut2) / (2 * 2)  # 20x20x20 Mean tensor

            for c in range(e2.shape[2]):
                # takes a 1x1 in e3 and copies it into a 2x2
                # thereby expanding a 10x10x20 back into a 20x20x20
                # then multiplies each pixel by 1/4 (W3) 20x20x20 * 20x20x20
                e2[:, :, c] = np.kron(e3[:, :, c], np.ones(
                    (2, 2))) * W3[:, :, c]

            delta2 = (ReLuOut2 > 0) * e2

            delta1_x = np.zeros_like(W1)

            for c in range(20):
                delta1_x[:, :, c] = sp.convolve2d(image[:, :],
                                                  np.rot90(delta2[:, :, c], 2),
                                                  'valid')

            dW1 = dW1 + delta1_x
            dW5 = dW5 + np.matmul(delta5, FlattenPool4.T)
            dWo = dWo + np.matmul(delta, ReLuOut5.T)

            dW1 = dW1 / batchSize
            dW5 = dW5 / batchSize
            dWo = dWo / batchSize

            mm1 = learningRate * dW1 + beta * mm1
            W1 = W1 + mm1

            mm5 = learningRate * dW5 + beta * mm5
            W5 = W5 + mm5

            mm0 = learningRate * dWo + beta * mm0
            Wo = Wo + mm0

    return W1, W5, Wo
Beispiel #14
0
import Controller
import Convolutions
import Decimate
import Distance
import FastMath
import FIR
import Matrix
import Softmax
import Stats
import Support
import SVM
import Transform

BasicMaths.generatePatterns()
Bayes.generatePatterns()
BIQUAD.generatePatterns()
ComplexMaths.generatePatterns()
Controller.generatePatterns()
Convolutions.generatePatterns()
Decimate.generatePatterns()
Distance.generatePatterns()
FastMath.generatePatterns()
FIR.generatePatterns()
Interpolate.generatePatterns()
Matrix.generatePatterns()
Softmax.generatePatterns()
Stats.generatePatterns()
Support.generatePatterns()
SVM.generatePatterns()
Transform.generatePatterns()
Beispiel #15
0
    def grad_x_loss(self, X, W, b):
        o = sm.softmax(X, W, b)

        return o - self.C
    W1, W5, Wo = MnistConv(W1, W5, Wo, X, D)

    
# Test
#
X = Images[8000:10000, :, :]
D = Labels[8000:10000]

acc = 0
N   = len(D)
for k  in range(N):
    x  = X[k, :, :]

    y1 = Conv(x, W1)
    y2 = ReLU(y1)
    y3 = Pool(y2)
    y4 = np.reshape(y3, (-1, 1))
    v5 = np.matmul(W5, y4)
    y5 = ReLU(v5)
    v  = np.matmul(Wo, y5)
    y  = Softmax(v)
    
    i = np.argmax(y)
    if i == D[k][0]:
        acc = acc + 1
        
acc = acc / N
print("Accuracy is : ", acc)


Beispiel #17
0
 def predict(self,x):
     output = Softmax()
     layers = self.forward_propogation(x)
     return [ np.argmax(output.predict(layer.mulya)) for layer in layers]        
Beispiel #18
0
def MnistConv(W1, W5, Wo, X, D):
    alpha = 0.01
    beta = 0.95

    momentum1 = np.zeros_like(W1)
    momentum5 = np.zeros_like(W5)
    momentumo = np.zeros_like(Wo)

    N = len(D)

    bsize = 100
    blist = np.arange(0, N, bsize)

    for batch in range(len(blist)):
        dW1 = np.zeros_like(W1)
        dW5 = np.zeros_like(W5)
        dWo = np.zeros_like(Wo)

        begin = blist[batch]

        for k in range(begin, begin + bsize):
            # Forward pass = inference
            x = X[k, :, :]
            y1 = Conv(x, W1)
            y2 = ReLU(y1)
            y3 = Pool(y2)
            y4 = np.reshape(y3, (-1, 1))
            v5 = np.matmul(W5, y4)
            y5 = ReLU(v5)
            v = np.matmul(Wo, y5)
            y = Softmax(v)

            # one-hot encoding
            d = np.zeros((10, 1))
            d[D[k][0]][0] = 1

            # Backpropagation
            e = d - y
            delta = e

            e5 = np.matmul(Wo.T, delta)  # Hidden(ReLU)
            delta5 = (y5 > 0) * e5

            e4 = np.matmul(W5.T, delta5)  # Pooling layer

            e3 = np.reshape(e4, y3.shape)

            e2 = np.zeros_like(y2)  # pooling
            W3 = np.ones_like(y2) / (2 * 2)
            for c in range(20):
                e2[:, :, c] = np.kron(e3[:, :, c], np.ones(
                    (2, 2))) * W3[:, :, c]

            delta2 = (y2 > 0) * e2

            delta1_x = np.zeros_like(W1)
            for c in range(20):
                delta1_x[:, :,
                         c] = signal.convolve2d(x[:, :],
                                                np.rot90(delta2[:, :, c], 2),
                                                'valid')

            dW1 = dW1 + delta1_x
            dW5 = dW5 + np.matmul(delta5, y4.T)
            dWo = dWo + np.matmul(delta, y5.T)

        dW1 = dW1 / bsize
        dW5 = dW5 / bsize
        dWo = dWo / bsize

        momentum1 = alpha * dW1 + beta * momentum1
        W1 = W1 + momentum1

        momentum5 = alpha * dW5 + beta * momentum5
        W5 = W5 + momentum5

        momentumo = alpha * dWo + beta * momentumo
        Wo = Wo + momentumo

    return W1, W5, Wo
Beispiel #19
0
 def loss(self, x_batch, y_batch, reg):
     return Softmax.softmax_loss_vectorized(self.w, x_batch, y_batch, reg)
Beispiel #20
0
    #     sa, tt = subAlign(S, T, Xs, Xt_noisy)
    #
    #     # softmax部分,epsilon是0.5
    #     all_theta = multi_class.one_vs_all(sa, Ys, 10, 1, 0.5)
    #
    #     # 预测
    #     y_pred = multi_class.predict_all(tt, all_theta)
    #
    #     # 准确率
    #     acc7 = sklearn.metrics.accuracy_score(y_pred, Yt)
    #
    #     worksheet.write(i, 1, acc7)

    for i in range(0, 20):
        # 以下是添加噪声的部分
        predict_list = Softmax.begging_by_tree3(S, T, Ys, 10, 600, Yt)
        predict_label = calc_error(predict_list)
        predict_label = tuple(predict_label)
        acc8 = sklearn.metrics.accuracy_score(Yt, predict_label[0])
        worksheet.write(i, 1, acc8)

    #
    # #第二组
    # # D to C
    # src, tar = 'data/webcam_SURF_L10', 'data/Caltech10_SURF_L10'
    # src_domain, tar_domain = scipy.io.loadmat(src), scipy.io.loadmat(tar)
    # S, Ys = src_domain['fts'], src_domain['labels']
    # T, Yt = tar_domain['fts'], tar_domain['labels']
    #
    # # 数据预处理
    # S = np.mat(S)  # 读进来是数组形式,要将他转成矩阵形式
Beispiel #21
0
    with tensorflow.Session(graph=graph) as sessao:

        # Criando Decodificadores JPEG
        tensor_dados_jpeg, tensor_decodificador_imagem = Adiciona_Decodificadores_jpeg(
            model_info)

        #Criacao do cache de bottleneck
        if sys.argv[1] == 'rebuild_bottleneck':
            Bottleneck.Refaz_Todo_Bottleneck(sessao, lista_imagens,
                                             tensor_dados_jpeg,
                                             tensor_decodificador_imagem,
                                             tensor_entrada_redimensionado,
                                             bottleneck_tensor)

        #Criacao da camada Softmax
        entropia_cruzada_mean, entradas_bottleneck, ground_truth_input, tensor_final, evaluation_step, prediction = Softmax.Cria_Softmax(
            sessao, lista_imagens, bottleneck_tensor, model_info)

        # Salva os sumarios
        merged = tensorflow.summary.merge_all()
        escritor_treinamento = tensorflow.summary.FileWriter(
            'summaries', sessao.graph)

        init = tensorflow.global_variables_initializer()
        sessao.run(init)

        # Chama o retreinamento.
        if sys.argv[1] == 'retrain':
            sumarios_treinamento = Treinamento.Retreinamento_Por_BatchSize(
                sessao, lista_imagens, ground_truths, entradas_bottleneck,
                ground_truth_input, tensor_dados_jpeg,
                tensor_decodificador_imagem, tensor_entrada_redimensionado,