Esempio n. 1
0
File: cls.py Progetto: eqv/usercorn
    def map_segments(self):
        # merge overlapping segments
        segments = [align(addr, size, grow=True) for addr, size, _ in self.loader.segments()]
        merged = []
        for addr, size in segments:
            left = addr
            right = addr + size
            for a2, s2 in merged:
                if (addr >= a2 and addr <= a2 + s2) or (addr < a2 and addr + size > a2):
                    left = min(a2, addr)
                    right = max(right, a2 + s2)
                    merged.remove((a2, s2))
            merged.append((left, right - left))

        for addr, size in merged:
            self.uc.mem_map(addr, size)

        for addr, size, data in self.loader.segments():
            # FIXME: weird, if I don't touch the data before write it segfaults on ARM
            # Issue #15
            binascii.hexlify(data)
            self.uc.mem_write(addr, data)
        # TODO: ask loader for stack size/location
        self.stack = self.uc.mmap(STACK_SIZE, addr_hint=STACK_BASE)
        self.uc.reg_write(self.arch.sp, self.stack + STACK_SIZE - self.bsz)
Esempio n. 2
0
    # test_path = 'data/mini_mnist'
    test_path = 'data/mnist.test'

    X_train, Y_train = read_dense_data(open(train_path))
    print >> sys.stderr, 'read training data done.'
    X_train = np.matrix(X_train)
    Y_train = [int(y) for y in Y_train]
    Y_train = np.matrix(Y_train).T
    print >> sys.stderr, 'create training matrix done.'

    X_test, Y_test = read_dense_data(open(test_path))
    print >> sys.stderr, 'read test data done'
    X_test = np.matrix(X_test)
    Y_test = [int(y) for y in Y_test]
    Y_test = np.matrix(Y_test).T
    print >> sys.stderr, 'create test matrix done.'

    X_train, X_test = align(X_train, X_test)
    X_train, X_test = normalize(X_train, X_test)

    clf = NeuralNetwork()
    clf.train(X_train, Y_train)
    # clf.train(X_test, Y_test)
    acc_train = clf.test(X_train, Y_train)
    acc_test = clf.test(X_test, Y_test)

    print >> sys.stderr, 'Training accuracy for Neural Network : %lf%%' % (
        100.0 * acc_train)
    print >> sys.stderr, 'Test accuracy for Neural Network : %lf%%' % (
        100.0 * acc_test)
Esempio n. 3
0
    def _dump(self):
        """
        Returns all data in this U8 archive as bytes
        """
        header = self.U8Header()
        rootnode = self.U8Node()

        # constants
        header.tag = b'U\xAA8-'
        header.rootnode_offset = 0x20
        header.zeroes = b'\x00' * 16
        rootnode.type = 0x0100

        nodes = []
        strings = b'\x00'
        data = b''

        for item, value in self.files:
            node = self.U8Node()

            recursion = item.count('/')
            if recursion < 0:
                recursion = 0
            name = item.split('/')[-1]

            node.name_offset = len(strings)
            strings += name.encode('latin-1') + b'\x00'

            if value is None:  # directory
                node.type = 0x0100
                node.data_offset = recursion

                node.size = len(nodes) + 1
                for one, two in self.files:
                    if one[:len(item)] == item:  # find nodes in the folder
                        node.size += 1
            else:  # file
                node.type = 0x0000
                node.data_offset = len(data)
                data += value + (
                    b'\x00' * (align(len(value), 32) - len(value))
                )  # 32 seems to work best for fuzzyness? I'm still really not sure
                node.size = len(value)
            nodes.append(node)

        header.header_size = ((len(nodes) + 1) * len(rootnode)) + len(strings)
        header.data_offset = align(header.header_size + header.rootnode_offset,
                                   64)
        rootnode.size = len(nodes) + 1

        for i in range(len(nodes)):
            if nodes[i].type == 0x0000:
                nodes[i].data_offset += header.data_offset

        fd = b''
        fd += header.pack()
        fd += rootnode.pack()
        for node in nodes:
            fd += node.pack()
        fd += strings
        fd += b'\x00' * (header.data_offset - header.rootnode_offset -
                         header.header_size)
        fd += data

        return fd
Esempio n. 4
0
    X_train, Y_train = read_dense_data(open(train_path)) 
    print >> sys.stderr, 'read training data done.'
    X_train = np.matrix(X_train)
    Y_train = [int(y) for y in Y_train]
    Y_train = np.matrix(Y_train).T
    print >> sys.stderr, 'create training matrix done.'

    X_test, Y_test = read_dense_data(open(test_path))
    print >> sys.stderr, 'read test data done'
    X_test = np.matrix(X_test)
    Y_test = [int(y) for y in Y_test]    
    Y_test = np.matrix(Y_test).T
    print >> sys.stderr, 'create test matrix done.'

    X_train, X_test = align(X_train, X_test)

    '''
    X_all = np.row_stack([X_train, X_test])
    print X_all.shape
    mean = X_all.mean(0)
    std = X_all.std(0)
    del X_all
    
    X_train = 1.0 * (X_train - mean) / (std + 0.0001)
    X_test = 1.0 * (X_test - mean) / (std + 0.0001)
    '''

    clf = SoftmaxRegression()
    clf.train(X_train, Y_train) 
    # clf.train(X_test, Y_test)