Esempio n. 1
0
def design_thetas(nodes):

    return_obj = {
        'L': 0,
        'dimensions': [],
        'thetas': [],
        # 'empty_DIJ' : []
    }

    r6 = sqrt(6)
    # initializing empty theta and DIJ
    thetas = [np.array([0])]
    # empty_DIJ = [np.array([0])]

    # dimensions for the theta(l) matrices
    dimen_mat = [[y, x + 1] for x, y in zip(nodes, nodes[1:])]
    return_obj['dimensions'] = dimen_mat
    return_obj['L'] = len(dimen_mat)

    for i in dimen_mat:
        epsilon = r6 / sqrt(i[0] + (i[-1] - 1))
        thetas.append(-epsilon + (2 * epsilon) * np.random.rand(i[0], i[-1]))
        # empty_DIJ.append(np.zeros((i[0], i[-1])))

    return_obj['thetas'] = thetas
    # return_obj['empty_DIJ'] = empty_DIJ
    return helper.unpack(return_obj)
Esempio n. 2
0
 def parse(buff):
     mh = MinidumpHeader()
     mh.Signature = buff.read(4).decode()[::-1]
     if mh.Signature != 'PMDM':
         raise MinidumpHeaderSignatureMismatchException(mh.Signature)
     mh.Version = unpack(buff.read(4))
     mh.NumberOfStreams = unpack(buff.read(4))
     mh.StreamDirectoryRva = unpack(buff.read(4))
     mh.CheckSum = unpack(buff.read(4))
     mh.TimeDateStamp = unpack(buff.read(4))
     try:
         mh.Flags = unpack(buff.read(8))
     except Exception as e:
         raise MinidumpHeaderFlagsException('Could not parse header flags!')
     return mh
 def parse(buff):
     ms = MINIDUMP_STRING()
     ms.Length = unpack(buff.read(4))
     ms.Buffer = buff.read(ms.Length)
     return ms
 def parse(buff):
     mld = MINIDUMP_LOCATION_DESCRIPTOR64()
     mld.DataSize = unpack(buff.read(8))
     mld.Rva = unpack(buff.read(8))
     return mld
Esempio n. 5
0
    def parse(buff):
        md = MINIDUMP_DIRECTORY()
	md.StreamType = unpack(buff.read(4))
	md.Location = MINIDUMP_LOCATION_DESCRIPTOR.parse(buff)
        return md
Esempio n. 6
0
import socket 
import struct
import binascii
import os
import helper

i = 0

s=socket.socket(socket.PF_PACKET, socket.SOCK_RAW, socket.ntohs(0x0800))

print '[*] packet get'

while True:
    pkt=s.recvfrom(65565)
    unpack=helper.unpack()

    print "\nEthernet Header"
    for i in unpack.eth_h(pkt[0][0:14]).iteritems():
        a,b=i
        print "{} : {}\n".format(a,b),

    print "\nIP Header"
    for i in unpack.ip_h(pkt[0][14:34]).iteritems():
        a,b=i
        print "{} : {}\n".format(a,b),

    print "\nTcp Header"
    for  i in unpack.tcp_h(pkt[0][34:54]).iteritems():
        a,b=i
        print "{} : {}\n".format(a,b),
 def __init__(self, reader):
     self.value = unpack(reader.read(4))
Esempio n. 8
0
def calc_gradient(thetas, raw_data, lambd=0):

    return_obj = {'Dij': [], 'cost': 0, 'lambda': lambd}

    # copying the dataset to prevent change
    # initalizing useful params
    dataset = raw_data
    Dij = []
    is_empty = True
    cost = 0
    m = len(dataset)

    # iterating over the dataset,
    # performing FP and BP
    for i in dataset:

        # these layers are used to prevent redundant calc for BP
        activation_layers = [[]]

        # inital layer taken from the dataset
        initial_layer = np.matrix([1] + i[0])
        yi = np.matrix(i[-1]).T  # expected output

        # inital activation value set to input data
        current_activation = initial_layer.T

        # adding inital activation a(1) -> input
        activation_layers.append(current_activation)

        # iterating over the layers performing FP
        for theta_L in thetas[1:]:

            # creating raw non-activated
            current_raw = theta_L.dot(current_activation)

            # sigmoiding the previous output prior to adding one
            current_activated = sigmoid(current_raw)

            # adding bias unit
            next_layer = np.r_[[[1]], current_activated]
            activation_layers.append(next_layer)

            # for the next iteration after adding bias unit
            current_activation = next_layer

        # removing final layer bias unit
        activation_layers[-1] = activation_layers[-1][1:, :]

        # error associated with the output
        delta_L = activation_layers[-1] - yi

        # these deltas are required every iteration
        delta_layers = [delta_L]

        # setting delta for current iteration
        previous_delta = delta_L

        # L required for certain index calc.
        L = len(activation_layers) - 1

        # accumilating the cost cost
        cost += (np.multiply(yi, np.log(activation_layers[-1])) + \
         np.multiply((1 - yi), np.log(1 - activation_layers[-1])))

        # performing backpropogation
        for idx, l in enumerate(activation_layers[-2:1:-1]):

            # calculating current layer delta
            current_delta = ((thetas[L - (idx + 1)][:,
                                                    1:]).T).dot(previous_delta)
            active_factor = np.multiply(l[1:, :], (1 - l[1:, :]))
            current_delta = np.multiply(current_delta, active_factor)
            # print("D%d" % (L - (idx + 1)), current_delta)

            # adding the delta layer to the legend
            delta_layers.append(current_delta)

            # continuing for next iteration
            previous_delta = current_delta

        # accumilating calculated delta for all layers

        # setting up the activation layers

        activation_layers = activation_layers[
            1:-1]  # removing final activation layer
        activation_layers = activation_layers[::
                                              -1]  #reversing list to have a(L - 1) a(L - 2) ... a1
        # deltas will be delta(L) delta(L - 1) ... D2

        # partial derivative calculated for the current training example
        current_Dij = [del_current_plus_one * np.matrix(active_current).T \
         for del_current_plus_one, active_current in zip(delta_layers, activation_layers)]

        # accumilating the partial derivative over the training examples
        Dij = [a + b for a, b in zip(Dij, current_Dij)]
        # initializes the Dij matrix with the first calculated cost if empty
        if is_empty:
            Dij = current_Dij
            is_empty = False

    # adding regularization and averaging partials

    Dij = [(1 / m) * x for x in Dij][::-1]
    cost = (-1 / m) * cost[0, 0]

    if lambd:
        # regularizing cost cost
        # summing the thetas .^ 2 except theta_0
        regular_cost = (lambd / (2 * m)) * sum(
            [np.sum(np.multiply(y[:, 1:], y[:, 1:])) for y in thetas[1:]])
        cost += regular_cost

        # regularizing the partial derivatives except D0
        regular_factor = (lambd / m)
        regular_matrix = [ np.matrix([[regular_factor * x if j else 0 for j, x in enumerate(row)] \
         for row in mat]) for mat in thetas[1:] ]
        Dij = [a + b for a, b in zip(Dij, regular_matrix)]
        # print("R", regular_matrix, "d", Dij, "t", thetas, sep = "\n")

    return_obj['cost'] = cost
    return_obj['Dij'] = Dij
    return helper.unpack(return_obj)