Esempio n. 1
0
 def stochasticgradientdescent(self,
                               training_data,
                               epochs,
                               mini_batch_size,
                               learning_rate,
                               test_data=None):
     # training data given as a list of tuples (x, y) i.e training input and desired output
     if test_data:
         n_test = len(test_data)
     n = len(training_data)
     # in each epoch (one single pass of data through the model)
     for j in xrange(epochs):
         # randomly shuffle training data
         random.shuffle(training_data)
         # partition it into mini-batches of appropriate size
         mini_batches = [
             training_data[k:k + mini_batch_size]
             for k in xrange(0, n, mini_batch_size)
         ]
         for mini_batch in mini_batches:
             # apply a single step of gradient descent for each mini_batch
             self.update_mini_batch(mini_batch, learning_rate)
         if test_data:
             print
             "Epoch {0}: {1} / {2}".format(j, self.evaluate(test_data),
                                           n_test)
         else:
             print
             "Epoch {0} complete".format(j)
def least_squares_cg(Cui, X, Y, lambda_val, cg_steps=3):
    users, features = X.shape

    YtY = Y.T.dot(Y) + lambda_val * np.eye(features)

    for u in xrange(users):

        x = X[u]
        r = -YtY.dot(x)

        for i, confidence in nonzeros(Cui, u):
            r += (confidence - (confidence - 1) * Y[i].dot(x)) * Y[i]

        p = r.copy()
        rsold = r.dot(r)

        for it in xrange(cg_steps):
            Ap = YtY.dot(p)
            for i, confidence in nonzeros(Cui, u):
                Ap += (confidence - 1) * Y[i].dot(p) * Y[i]

            alpha = rsold / p.dot(Ap)
            x += alpha * p
            r -= alpha * Ap

            rsnew = r.dot(r)
            p = r + (rsnew / rsold) * p
            rsold = rsnew

        X[u] = x
Esempio n. 3
0
    def recalculate_center(self):
        # TODO 1: implementirati racunanje centra klastera
        # centar klastera se racuna kao prosecna vrednost svih podataka u klasteru
        new_center = [0 for i in xrange(len(self.center))]
        for d in self.data:
            for i in xrange(len(d)):
                new_center[i] += d[i]

        n = len(self.data)
        if n != 0:
            self.center = [x / n for x in new_center]
Esempio n. 4
0
def prediction_process(prediction_array):
    success = 0
    for i in xrange(len(prediction_array)):
        if predict(prediction_array[i][0]) == test_data_y[i][0]:
            success += 1

    return success
Esempio n. 5
0
 def backprop(self, x, y):
     """Return a tuple ``(nabla_b, nabla_w)`` representing the
           gradient for the cost function C_x.  ``nabla_b`` and
           ``nabla_w`` are layer-by-layer lists of numpy arrays, similar
           to ``self.biases`` and ``self.weights``."""
     nabla_b = [np.zeros(b.shape) for b in self.biases]
     nabla_w = [np.zeros(w.shape) for w in self.weights]
     # feedforward
     activation = x
     activations = [x]  # list to store all the activations, layer by layer
     zs = []  # list to store all the z vectors, layer by layer
     for b, w in zip(self.biases, self.weights):
         z = np.dot(w, activation) + b
         zs.append(z)
         activation = sigmoid(z)
         activations.append(activation)
     # backward pass
     delta = self.cost_derivative(activations[-1], y) * \
             sigmoid_prime(zs[-1])
     nabla_b[-1] = delta
     nabla_w[-1] = np.dot(delta, activations[-2].transpose())
     # Note that the variable l in the loop below is used a little
     # differently to the notation in Chapter 2 of the book.  Here,
     # l = 1 means the last layer of neurons, l = 2 is the
     # second-last layer, and so on.  It's a renumbering of the
     # scheme in the book, used here to take advantage of the fact
     # that Python can use negative indices in lists.
     for l in xrange(2, self.num_layers):
         z = zs[-l]
         sp = sigmoid_prime(z)
         delta = np.dot(self.weights[-l + 1].transpose(), delta) * sp
         nabla_b[-l] = delta
         nabla_w[-l] = np.dot(delta, activations[-l - 1].transpose())
     return nabla_b, nabla_w
def all_process(x, w, y, nb_of_iterations, learning_rate):
    global min_error
    global syn1
    for iter in xrange(nb_of_iterations):
        # forward propagation
        l0 = x
        l1 = nn(l0, w)
        # dimension l1 : (1,5)
        print("output ********************************\n", l1)
        print("***************************************")
        # how much did we miss?
        l1_error = loss(l1, y)

        print(
            "average iteration error *************************************** \n",
            l1_error)
        print("***************************************")

        # multiply how much we missed by the
        # slope of the sigmoid at the values in l1
        # l1_delta = l1_error * nonlin(l1,True)
        if (l1_error < min_error):
            print("found a minimum local error \n")
            min_error = l1_error
            syn1 = syn0

        l1_delta = delta_w(w, l0, y, learning_rate)
        # dimension l1_delta (1,5)
        print("delta *************************************** \n", l1_delta)
        print("***************************************")
        print("****************************************MATRIX READJUSTMENT \n")
        w += np.dot(l0.T, l1_delta)
def all_process(x, w, y, nb_of_iterations, learning_rate):
    global min_error
    global syn1
    for iter in xrange(nb_of_iterations):
        # forward propagation
        l0 = x
        l1 = nn(l0, w)
        # dimension l1 : (1,5)
        print("output ********************************\n", l1)
        print("***************************************")
        # how much did we miss?
        l1_error = loss(l1, y)

        print(
            "average iteration error *************************************** \n",
            l1_error)
        print("***************************************")

        #on cherche la matrice avec l'eeror minimal est on l'a récupere dans syn1
        if (l1_error < min_error):
            print("found a minimum local error \n")
            min_error = l1_error
            syn1 = syn0

        l1_delta = delta_w(w, l0, y, learning_rate)
        # dimension l1_delta (1,5)
        print("delta *************************************** \n", l1_delta)
        print("***************************************")
        print("****************************************MATRIX READJUSTMENT \n")
        w += np.dot(l0.T, l1_delta)
Esempio n. 8
0
    def xls_to_csv(self):
        num_user = 0

        x = xlrd.open_workbook('../data/jester-data-1/jester-data-1.xls')
        x1 = x.sheet_by_name('jester-data-1-new')

        list_item_seem_by_user_test = []
        list_item_seem_by_user_train = []
        for rownum in xrange(x1.nrows):  #To determine the total rows.

            self.check = 0
            for idx, val in enumerate(x1.row_values(rownum)):
                if idx == 0 and val >= 50:
                    break
                #     Lấy so user tu idx = 1 va co danh gia (!=99)
                if idx != 0 and val != 99 and num_user <= 50:
                    list_item_seem_by_user_train.append([rownum, idx, val])
                    self.check = 1
                elif idx != 0 and val != 99:
                    list_item_seem_by_user_test.append([rownum, idx, val])

            if (self.check == 1):
                num_user += 1

        WriteFile(out_test_file, list_item_seem_by_user_test).write()
        WriteFile(out_train_file, list_item_seem_by_user_train).write()
    def minimumDeleteSum(self, s1, s2):
        dp = [[0] * (len(s2) + 1) for _ in xrange(len(s1) + 1)]

        for i in xrange(len(s1) - 1, -1, -1):
            dp[i][len(s2)] = dp[i + 1][len(s2)] + ord(s1[i])
        for j in xrange(len(s2) - 1, -1, -1):
            dp[len(s1)][j] = dp[len(s1)][j + 1] + ord(s2[j])

        for i in xrange(len(s1) - 1, -1, -1):
            for j in xrange(len(s2) - 1, -1, -1):
                if s1[i] == s2[j]:
                    dp[i][j] = dp[i + 1][j + 1]
                else:
                    dp[i][j] = min(dp[i + 1][j] + ord(s1[i]),
                                   dp[i][j + 1] + ord(s2[j]))

        return dp[0][0]
def getColorsInPlate(source):
    percentage, colors = find_histogram(source)
    bgIndex, fgIndex = heapq.nlargest(2,
                                      xrange(len(percentage)),
                                      key=percentage.__getitem__)
    bg = colors[bgIndex]
    fg = colors[fgIndex]
    return [bg, fg]
Esempio n. 11
0
 def minFallingPathSum2(self, A):
     '''答案思路 '''
     while len(A) >= 2:
         row = A.pop()
         #想不到得到一个生成器对象
         for i in xrange(len(row)):
             A[-1][i] += min(row[max(0, i - 1):min(len(row), i + 2)])
     return min(A[0])
def sBoxLayer_dec(state):
    """Inverse SBox function for decryption

    Input:  64-bit integer
    Output: 64-bit integer"""
    output = 0
    for i in xrange(16):
        output += Sbox_inv[(state >> (i * 4)) & 0xF] << (i * 4)
    return output
Esempio n. 13
0
def prediction_process(test_data_x, test_data_y, w, b):
    success = 0

    # Prediction phase
    for i in xrange(len(test_data_x)):
        prediction = predict(test_data_x[i], w, b)
        if test_data_y[i] == prediction:
            success += 1
    return success
def pLayer_dec(state):
    """Permutation layer for decryption

    Input:  64-bit integer
    Output: 64-bit integer"""
    output = 0
    for i in xrange(64):
        output += ((state >> i) & 0x01) << PBox_inv[i]
    return output
Esempio n. 15
0
    def fit(self, data, normalize=True):
        self.data = data  # lista N-dimenzionalnih podataka
        self.klaster_indeksi = [0] * len(self.data)
        # print(self.klaster_indeksi)
        # TODO 4: normalizovati podatke pre primene k-means
        if normalize:
            self.data = self.normalize_data(self.data)

        # TODO 1: implementirati K-means algoritam za klasterizaciju podataka
        # kada algoritam zavrsi, u self.clusters treba da bude "n_clusters" klastera (tipa Cluster)
        dimensions = len(self.data[0])

        # napravimo N random tacaka i stavimo ih kao centar klastera
        for i in xrange(self.n_clusters):
            point = [random.random() for x in xrange(dimensions)]
            self.clusters.append(Cluster(point))

        iter_no = 0
        not_moves = False
        while iter_no <= self.max_iter and (
                not not_moves):  # dok god je not_moves False
            # ispraznimo podatke klastera, jer poske svake iteracije mogu da budu drugi elementi unutar grupe/klastera
            for cluster in self.clusters:
                cluster.data = []

            # iteriramo kroz N-dimenzione podatke
            for i, d in enumerate(self.data, start=0):
                # index klastera kom pripada tacka
                cluster_index = self.predict(d)
                self.klaster_indeksi[i] = cluster_index
                # dodamo tacku u klaster kako bi izracunali centar
                self.clusters[cluster_index].data.append(d)

            # TODO (domaci): prosiriti K-means da stane ako se u iteraciji centri klastera nisu pomerili
            # preracunavanje centra
            not_moves = True
            for cluster in self.clusters:
                old_center = copy.deepcopy(cluster.center)
                cluster.recalculate_center()

                not_moves = not_moves and (cluster.center == old_center)

            iter_no += 1
Esempio n. 16
0
def trans_normal(array):
    max_cols = array.max(axis=0)
    min_cols = array.min(axis=0)
    data_shape = array.shape
    data_rows = data_shape[0]
    data_cols = data_shape[1]
    normal_array = np.empty((data_rows, data_cols))
    for i in xrange(data_cols):
        normal_array[:, i] = (array[:, i] - min_cols[i]) / (max_cols[i] - min_cols[i])
    return normal_array
def sBoxLayer(state):
    """SBox function for encryption

    Input:  64-bit integer
    Output: 64-bit integer"""

    output = 0
    for i in xrange(16):
        output += Sbox[(state >> (i * 4)) & 0xF] << (i * 4)
    return output
Esempio n. 18
0
def prediction_process(test_data_x, test_data_y, w, b):
    success = 0

    # Prediction phase
    for i in xrange(len(test_data_x)):
        prediction = predict(test_data_x[i], w, b)
        if test_data_y[i] == prediction:
            success += 1

    print("Accuracy is: " + str((success / len(test_data_x)) * 100.0) + " %")
    return success
    def train(self, inputs, outputs, training_iterations):
        for iteration in xrange(training_iterations):
            # Pass the training set through the network.
            output = self.learn(inputs)

            # Calculate the error
            error = outputs - output

            # Adjust the weights by a factor
            factor = dot(inputs.T, error * self.__sigmoid_derivative(output))
            self.synaptic_weights += factor
Esempio n. 20
0
def training_process(fx, fy, alpha, cycles):
    # the . in the first element is for creating float array
    w = np.array([0., 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])  # 13 weights for 13 features
    b = 0

    # Training phase
    for iteration in xrange(cycles):
        gradient_b = np.mean(1 * ((sigmoid(fx, w, b)) - fy))
        gradient_w = np.dot((sigmoid(fx, w, b) - fy), fx) * 1 / len(fy)
        b -= alpha * gradient_b
        w -= alpha * gradient_w

    return w, b
    def decrypt(self, block):
        """Decrypt 1 block (8 bytes)

        Input:  ciphertext block as raw string
        Output: plaintext block as raw string
        """
        state = string2number(block)
        for i in xrange(self.rounds - 1):
            state = addRoundKey(state, self.roundkeys[-i - 1])
            state = pLayer_dec(state)
            state = sBoxLayer_dec(state)
        decipher = addRoundKey(state, self.roundkeys[0])
        return number2string_N(decipher, 8)
def implicit_als(sparse_data,
                 alpha_val=40,
                 iterations=10,
                 lambda_val=0.1,
                 features=10):
    confidence = sparse_data * alpha_val
    user_size, item_size = sparse_data.shape
    X = sparse.csr_matrix(np.random.normal(size=(user_size, features)))
    Y = sparse.csr_matrix(np.random.normal(size=(item_size, features)))
    X_I = sparse.eye(user_size)
    Y_I = sparse.eye(item_size)

    I = sparse.eye(features)
    lI = lambda_val * I
    for i in xrange(iterations):
        print('iteration %d of %d' % (i + 1, iterations))
        yTy = Y.T.dot(Y)
        xTx = X.T.dot(X)
        for u in xrange(user_size):
            u_row = confidence[u, :].toarray()
            p_u = u_row.copy()
            p_u[p_u != 0] = 1.0
            CuI = sparse.diags(u_row, [0])
            Cu = CuI + Y_I
            yT_CuI_y = Y.T.dot(CuI).dot(Y)
            yT_Cu_pu = Y.T.dot(Cu).dot(p_u.T)
            X[u] = spsolve(yTy + yT_CuI_y + lI, yT_Cu_pu)

        for i in xrange(item_size):
            i_row = confidence[:, i].T.toarray()
            p_i = i_row.copy()
            p_i[p_i != 0] = 1.0
            CiI = sparse.diags(i_row, [0])
            Ci = CiI + X_I
            xT_CiI_x = X.T.dot(CiI).dot(X)
            xT_Ci_pi = X.T.dot(Ci).dot(p_i.T)
            Y[i] = spsolve(xTx + xT_CiI_x + lI, xT_Ci_pi)
    return X, Y
def implicit_als_cg(Cui, features=20, iterations=20, lambda_val=0.1):
    user_size, item_size = Cui.shape

    X = np.random.rand(user_size, features) * 0.01
    Y = np.random.rand(item_size, features) * 0.01

    Cui, Ciu = Cui.tocsr(), Cui.T.tocsr()

    for iteration in xrange(iterations):
        print 'iteration %d of %d' % (iteration + 1, iterations)
        least_squares_cg(Cui, X, Y, lambda_val)
        least_squares_cg(Ciu, Y, X, lambda_val)

    return sparse.csr_matrix(X), sparse.csr_matrix(Y)
Esempio n. 24
0
    def predict(self, datum):
        # TODO 1: implementirati odredjivanje kom klasteru odredjeni podatak pripada
        # podatak pripada onom klasteru cijem je centru najblizi (po euklidskoj udaljenosti)
        # kao rezultat vratiti indeks klastera kojem pripada
        min_distance = None
        cluster_index = None

        # Iteriramo kroz sve klastere
        for index in xrange(len(self.clusters)):
            distance = self.euclidean_distance(datum,
                                               self.clusters[index].center)
            if min_distance is None or distance < min_distance:
                cluster_index = index
                min_distance = distance

        return cluster_index
Esempio n. 25
0
def normalizacija(data):
    # mean-std normalizacija
    cols = len(data[0])

    for col in xrange(cols):
        column_data = []
        for row in data:
            column_data.append(row[col])

        mean = numpy.mean(column_data)
        std = numpy.std(column_data)

        for row in data:
            row[col] = (row[col] - mean) / std

    return data
Esempio n. 26
0
    def __init__(self, C, num_classes, layers, auxiliary, genotype):
        super(NetworkImageNet, self).__init__()
        self._layers = layers
        self._auxiliary = auxiliary

        self.stem0 = nn.Sequential(
            nn.Conv2d(3,
                      C // 2,
                      kernel_size=3,
                      stride=2,
                      padding=1,
                      bias=False),
            nn.BatchNorm2d(C // 2),
            nn.ReLU(inplace=True),
            nn.Conv2d(C // 2, C, 3, stride=2, padding=1, bias=False),
            nn.BatchNorm2d(C),
        )

        self.stem1 = nn.Sequential(
            nn.ReLU(inplace=True),
            nn.Conv2d(C, C, 3, stride=2, padding=1, bias=False),
            nn.BatchNorm2d(C),
        )

        C_prev_prev, C_prev, C_curr = C, C, C

        self.cells = nn.ModuleList()
        reduction_prev = True
        for i in xrange(layers):
            if i in [layers // 3, 2 * layers // 3]:
                C_curr *= 2
                reduction = True
            else:
                reduction = False
            cell = Cell(genotype, C_prev_prev, C_prev, C_curr, reduction,
                        reduction_prev)
            reduction_prev = reduction
            self.cells += [cell]
            C_prev_prev, C_prev = C_prev, cell.multiplier * C_curr
            if i == 2 * layers // 3:
                C_to_auxiliary = C_prev

        if auxiliary:
            self.auxiliary_head = AuxiliaryHeadImageNet(
                C_to_auxiliary, num_classes)
        self.global_pooling = nn.AvgPool2d(7)
        self.classifier = nn.Linear(C_prev, num_classes)
def generateRoundkeys128(key, rounds):
    """Generate the roundkeys for a 128-bit key

    Input:
            key:    the key as a 128-bit integer
            rounds: the number of rounds as an integer
    Output: list of 64-bit roundkeys as integers"""
    roundkeys = []
    for i in xrange(1, rounds + 1):  # (K1 ... K32)
        # rawkey: used in comments to show what happens at bitlevel
        roundkeys.append(key >> 64)
        # 1. Shift
        key = ((key & (2**67 - 1)) << 61) + (key >> 67)
        # 2. SBox
        key = ((Sbox[key >> 124] << 124) + (Sbox[(key >> 120) & 0xF] << 120) +
               (key & (2**120 - 1)))
        # 3. Salt
        # rawKey[62:67] ^ i
        key ^= i << 62
    return roundkeys
Esempio n. 28
0
def lcs(X, Y):
    # find the length of the strings
    m = len(X)
    n = len(Y)

    # declaring the array for storing the dp values
    L = [[None] * (n + 1) for i in xrange(m + 1)]
    """Following steps build L[m + 1][n + 1] in bottom up fashion 
    Note: L[i][j] contains length of LCS of X[0..i-1] 
    and Y[0..j-1]"""
    for i in range(m + 1):
        for j in range(n + 1):
            if i == 0 or j == 0:
                L[i][j] = 0
            elif X[i - 1] == Y[j - 1]:
                L[i][j] = L[i - 1][j - 1] + 1
            else:
                L[i][j] = max(L[i - 1][j], L[i][j - 1])

                # L[m][n] contains the length of LCS of X[0..n-1] & Y[0..m-1]
    return L[m][n]
def generateRoundkeys80(key, rounds):
    """Generate the roundkeys for a 80-bit key

    Input:
            key:    the key as a 80-bit integer
            rounds: the number of rounds as an integer
    Output: list of 64-bit roundkeys as integers"""
    roundkeys = []
    for i in xrange(1, rounds + 1):  # (K1 ... K32)
        # rawkey: used in comments to show what happens at bitlevel
        # rawKey[0:64]
        roundkeys.append(
            key >> 16)  ## key is 80 bit so round key is left most 64 bit
        # 1. Shift
        # rawKey[19:len(rawKey)]+rawKey[0:19]
        key = ((key & (2**19 - 1)) << 61) + (key >> 19)
        # 2. SBox
        # rawKey[76:80] = S(rawKey[76:80])
        key = (Sbox[key >> 76] << 76) + (key & (2**76 - 1))
        # 3. Salt
        # rawKey[15:20] ^ i
        key ^= i << 15
    return roundkeys
            state = addRoundKey(state, self.roundkeys[-i - 1])
            state = pLayer_dec(state)
            state = sBoxLayer_dec(state)
        decipher = addRoundKey(state, self.roundkeys[0])
        return number2string_N(decipher, 8)

    def get_block_size(self):
        return 8


#        0   1   2   3   4   5   6   7   8   9   a   b   c   d   e   f
Sbox = [
    0xC, 0x5, 0x6, 0xB, 0x9, 0x0, 0xA, 0xD, 0x3, 0xE, 0xF, 0x8, 0x4, 0x7, 0x1,
    0x2
]
Sbox_inv = [Sbox.index(x) for x in xrange(16)]
PBox = [
    0,
    16,
    32,
    48,
    1,
    17,
    33,
    49,
    2,
    18,
    34,
    50,
    3,
    19,