예제 #1
0
def kodo_init():
    # Create list of encoder and decoder triples
    global nodes
    global data_in
    global simple_data_out
    global greedy_data_out
    global heuristic_data_out
    global master_data_in

    nodes = []
    data_in = []
    simple_data_out = []
    greedy_data_out = []
    heuristic_data_out = []
    master_data_in = []

    # init one encoder
    master_encoder.set_seed(SEED_VALUE)

    for _ in range(NUM_OF_NODES):
        seed = np.random.randint(SEED_VALUE)

        # init decoders
        simple_decoder = kodo.RLNCDecoder(field, symbols, symbol_size)
        simple_decoder.set_seed(seed)

        greedy_decoder = kodo.RLNCDecoder(field, symbols, symbol_size)
        greedy_decoder.set_seed(seed)

        heuristic_decoder = kodo.RLNCDecoder(field, symbols, symbol_size)
        heuristic_decoder.set_seed(seed)

        # decoder.set_log_callback(callback_function)
        nodes.append([simple_decoder, greedy_decoder, heuristic_decoder])
예제 #2
0
 def __init__(self, name='S', coding=None, fieldsize=1, random=None):
     if random is None:
         np.random.seed(1)
     else:
         np.random.seed(random)
     symbol_size = 8
     self.coder = None
     self.field = selfieldsize(fieldsize)
     if name == 'S':
         self.coder = kodo.RLNCEncoder(self.field, coding, symbol_size)
         self.data = bytearray(os.urandom(self.coder.block_size()))
         self.coder.set_symbols_storage(self.data)
     else:
         self.coder = kodo.RLNCDecoder(self.field, coding, symbol_size)
         self.data = bytearray(self.coder.block_size())
         self.coder.set_symbols_storage(self.data)
     self.name = name
     self.fieldsize = 2**fieldsize
     self.incbuffer = []
     self.coding = coding
     self.symbol_size = symbol_size
     self.batch = 0
     self.eotx = float('inf')
     self.deotx = float('inf')
     self.creditcounter = 0. if self.name != 'S' else float('inf')
     self.credit = 0.
     self.sent = False
     self.working = True
     self.priority = 0.
     self.quiet = False
     self.history = []
     self.sendhistory = []
     self.rank = 0 if self.name != 'S' else coding
     self.action = None
예제 #3
0
def receive_data(settings, role):
    """Receive data from the other node."""
    # Setup kodo decoder
    decoder = kodo.RLNCDecoder(
        field = kodo.field.binary,
        symbols=settings['symbols'],
        symbol_size=settings['symbol_size'])

    data_out = bytearray(decoder.block_size())
    decoder.set_symbols_storage(data_out)

    send_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)

    # Set receiving sockets
    data_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    data_socket.settimeout(settings['timeout'])
    data_socket.bind(('', settings['data_port']))

    if role == 'client':
        address = (settings['server_ip'], settings['server_control_port'])
        send_settings(settings)
    else:  # server
        address = (settings['client_ip'], settings['client_control_port'])
        send(send_socket, "settings OK, receiving", address)

    # Decode coded packets
    received = 0
    start = time.clock()
    end = None
    while 1:
        try:
            packet = data_socket.recv(settings['symbol_size'] + 100)

            if not decoder.is_complete():
                decoder.consume_payload(bytearray(packet))
                received += 1

            if decoder.is_complete():
                if end is None:
                    end = time.clock()  # stopping time once
                send(send_socket, "Stop sending", address)

        except socket.timeout:
            break  # no more data arriving

    # in case we did not complete
    if end is None:
        end = time.clock()

    data_socket.close()

    if not decoder.is_complete():
        print("Decoding failed")

    size = decoder.block_size() * (float(received) / settings['symbols'])
    microseconds = 1e6 * (end - start)
    print("Received {0} packets, {1} kB in {2:.0f} microseconds at "
          "{3:.2f} Mbit/s.".format(received, size / 1000, microseconds,
                                   decoder.block_size() * 8 / microseconds))
예제 #4
0
 def newbatch(self):
     """Make destination awaiting new batch."""
     self.batch += 1
     self.rank = self.coding if self.name == 'S' else 0
     if self.name != 'S':
         self.coder = kodo.RLNCDecoder(self.field, self.coding,
                                       self.symbol_size)
         self.data = bytearray(self.coder.block_size())
         self.coder.set_symbols_storage(self.data)
     self.quiet = False
def main():
    """Simple example showing how to encode and decode a block of memory."""
    # Choose the finite field, the number of symbols (i.e. generation size)
    # and the symbol size in bytes
    field = kodo.field.binary
    symbols = 8
    symbol_size = 160

    # Create an encoder and a decoder
    encoder = kodo.RLNCEncoder(field, symbols, symbol_size)
    decoder = kodo.RLNCDecoder(field, symbols, symbol_size)

    # Generate some random data to encode. We create a bytearray of the same
    # size as the encoder's block size and assign it to the encoder.
    # This bytearray must not go out of scope while the encoder exists!
    data_in = bytearray(os.urandom(encoder.block_size()))
    encoder.set_symbols_storage(data_in)

    # Define the data_out bytearray where the symbols should be decoded
    # This bytearray must not go out of scope while the encoder exists!
    data_out = bytearray(decoder.block_size())
    decoder.set_symbols_storage(data_out)

    packet_number = 0
    while not decoder.is_complete():
        # Generate an encoded packet
        packet = encoder.produce_payload()
        print("Packet {} encoded!".format(packet_number))

        # Pass that packet to the decoder
        decoder.consume_payload(packet)
        print("Packet {} decoded!".format(packet_number))
        packet_number += 1
        print("rank: {}/{}".format(decoder.rank(), decoder.symbols()))

    print("Coding finished")

    # The decoder is complete, the decoded symbols are now available in
    # the data_out buffer: check if it matches the data_in buffer
    print("Checking results...")
    if data_out == data_in:
        print("Data decoded correctly")
    else:
        print("Unable to decode please file a bug report :)")
        sys.exit(1)
예제 #6
0
 def rcvpacket(self):
     """Add received Packet to buffer. Do this at end of time slot."""
     while len(self.incbuffer):
         batch, coding, preveotx, prevdeotx, special = self.incbuffer.pop()
         if self.name == 'S':  # Cant get new information if you're source
             continue
         elif batch < self.batch:
             continue
         elif batch > self.batch or not self.coder.rank(
         ):  # Delete it if you're working on deprecated batch
             self.batch = batch
             self.sent = False
             self.coder = kodo.RLNCDecoder(self.field, self.coding,
                                           self.symbol_size)
             self.data = bytearray(self.coder.block_size())
             self.coder.set_symbols_storage(self.data)
             self.coder.consume_payload(coding)
             self.rank = self.coder.rank()
             self.creditcounter = 0.
             if self.quiet:
                 self.quiet = False
             if special and not self.credit:
                 self.creditcounter += 1
         else:  # Just add new information if its new
             if not self.coder.is_complete():
                 self.coder.consume_payload(coding)
                 newrank = self.coder.rank()
             else:
                 newrank = self.rank  # Full coder does not get new information
             if self.rank <= newrank:
                 self.rank = newrank
                 if special and not self.credit:
                     self.creditcounter += 1
         if preveotx > self.eotx or prevdeotx > self.deotx or (
                 self.priority != 0. and self.priority > preveotx):
             self.creditcounter += self.credit
예제 #7
0
def main():
    """
    Encode on the fly example.

    This example shows how to use a storage aware encoder which will
    allow you to encode from a block before all symbols have been
    specified. This can be useful in cases where the symbols that
    should be encoded are produced on-the-fly.
    """
    # Choose the finite field, the number of symbols (i.e. generation size)
    # and the symbol size in bytes
    field = kodo.field.binary
    symbols = 10
    symbol_size = 160

    # Create an encoder and a decoder
    encoder = kodo.RLNCEncoder(field, symbols, symbol_size)
    decoder = kodo.RLNCDecoder(field, symbols, symbol_size)

    # Generate some random data to encode. We create a bytearray of the same
    # size as the encoder's block size
    data_in = bytearray(os.urandom(encoder.block_size()))

    # Define the data_out bytearray where the symbols should be decoded
    # This bytearray must not go out of scope while the encoder exists!
    data_out = bytearray(decoder.block_size())
    decoder.set_symbols_storage(data_out)

    # Let's split the data into symbols and feed the encoder one symbol at a
    # time
    symbol_storage = [
        data_in[i:i + symbol_size] for i in range(0, len(data_in), symbol_size)
    ]

    while not decoder.is_complete():

        # Randomly choose to insert a symbol
        if random.choice([True, False]) and encoder.rank() < symbols:
            # For an encoder the rank specifies the number of symbols
            # it has available for encoding
            rank = encoder.rank()
            encoder.set_symbol_storage(symbol_storage[rank], rank)
            print("Symbol {} added to the encoder".format(rank))

        # Encode a packet into the payload buffer
        packet = encoder.produce_payload()
        print("Packet encoded")

        # Send the data to the decoders, here we just for fun
        # simulate that we are loosing 50% of the packets
        if random.choice([True, False]):
            print("Packet dropped on channel")
            continue

        # Packet got through - pass that packet to the decoder
        decoder.consume_payload(packet)
        print("Decoder received packet")
        print("Encoder rank = {}".format(encoder.rank()))
        print("Decoder rank = {}".format(decoder.rank()))
        decoded_symbol_indces = []
        for i in range(decoder.symbols()):
            if decoder.is_symbol_decoded(i):
                decoded_symbol_indces.append(str(i))

        print("Decoder decoded = {} ({}) symbols".format(
            decoder.symbols_decoded(),
            " ".join(decoded_symbol_indces)))
        print("Decoder partially decoded = {}".format(
            decoder.symbols_partially_decoded()))

    print("Coding finished")

    # The decoder is complete, the decoded symbols are now available in
    # the data_out buffer: check if it matches the data_in buffer
    print("Checking results...")
    if data_out == data_in:
        print("Data decoded correctly")
    else:
        print("Unexpected failure to decode please file a bug report :)")
        sys.exit(1)
def main():
    """
    Pure recode example using the Payload API.

    This example is very similar to encode_recode_decode_simple.py.
    The only difference is that this example uses a pure recoder instead of
    a decoder acting as a recoder. By "pure", we mean that the recoder will
    not decode the incoming data, it will only re-encode it.
    This example shows how to use an encoder, recoder, and decoder to
    simulate a simple relay network as shown below. For simplicity,
    we have error free links, i.e. no data packets are lost when being
    sent from encoder to recoder to decoder:

            +-----------+      +-----------+      +------------+
            |  encoder  |+---->| recoder   |+---->|  decoder   |
            +-----------+      +-----------+      +------------+

    The pure recoder does not need to decode all symbols, and its "capacity"
    for storing symbols can be limited. These stored coded symbols are
    linear combinations of previously received symbols. When a new symbol is
    received, the pure recoder will combine it with its existing symbols
    using random coefficients.
    The pure recoder can produce a random linear combination of the stored
    coded symbols, which can be processed by a regular decoder.
    """
    # Choose the finite field, the number of symbols (i.e. generation size)
    # and the symbol size in bytes
    field = kodo.field.binary8
    symbols = 5
    symbol_size = 160

    encoder = kodo.RLNCEncoder(field, symbols, symbol_size)

    # Set the pure recoder "capacity" to be less than "symbols"
    recoder = kodo.RLNCPureRecoder(field, symbols, symbol_size, 3)

    print("Recoder properties:\n"
          "  Symbols: {}\n"
          "  Recoder symbols: {}".format(recoder.symbols(),
                                         recoder.recoder_symbols()))

    decoder = kodo.RLNCDecoder(field, symbols, symbol_size)

    # Generate some random data to encode. We create a bytearray of the same
    # size as the encoder's block size and assign it to the encoder.
    # This bytearray must not go out of scope while the encoder exists!
    data_in = bytearray(os.urandom(encoder.block_size()))
    encoder.set_symbols_storage(data_in)

    # Define the data_out bytearrays where the symbols should be decoded
    # These bytearrays must not go out of scope while the encoder exists!
    data_out = bytearray(decoder.block_size())
    decoder.set_symbols_storage(data_out)

    while not decoder.is_complete():

        # Encode a packet into the payload buffer
        packet = encoder.produce_payload()
        print("Encoded packet generated and passed to the recoder")

        # Pass that packet to the recoder
        recoder.consume_payload(packet)

        # Now produce a new recoded packet from the current decoding buffer
        recoded_packet = recoder.produce_payload()

        print("Recoded packet generated and passed to the decoder")

        # Pass the recoded packet to the decoder
        decoder.consume_payload(recoded_packet)

        print("Decoder rank: {}/{}\n".format(decoder.rank(), symbols))

    # Check if we properly decoded the data
    if data_out == data_in:
        print("Data decoded correctly")
    else:
        print("Unexpected failure to decode please file a bug report :)")
        sys.exit(1)
예제 #9
0
def main():
    """An example for using the trace functionality."""
    # Choose the finite field, the number of symbols (i.e. generation size)
    # and the symbol size in bytes
    field = kodo.field.binary8
    symbols = 5
    symbol_size = 16

    # Create an encoder and a decoder
    encoder = kodo.RLNCEncoder(field, symbols, symbol_size)
    decoder = kodo.RLNCDecoder(field, symbols, symbol_size)

    # Generate some random data to encode. We create a bytearray of the same
    # size as the encoder's block size and assign it to the encoder.
    # This bytearray must not go out of scope while the encoder exists!
    data_in = bytearray(os.urandom(encoder.block_size()))
    encoder.set_symbols_storage(data_in)

    # Define the data_out bytearray where the symbols should be decoded
    # This bytearray must not go out of scope while the encoder exists!
    data_out = bytearray(decoder.block_size())
    decoder.set_symbols_storage(data_out)

    # Setup tracing

    # Enable the stdout trace function of the encoder
    encoder.set_log_stdout()
    encoder.set_zone_prefix("encoder")

    # Define a custom trace function for the decoder which filters the
    # trace message based on their zones
    def callback_function(zone, message):
        if zone in [
                "decoder_state", "symbol_coefficients_before_consume_symbol"
        ]:
            print("{}:".format(zone))
            print(message)

    decoder.set_log_callback(callback_function)

    while not decoder.is_complete():

        # Encode a packet into the payload buffer
        packet = encoder.produce_payload()

        # Here we "simulate" a packet loss of approximately 50%
        # by dropping half of the encoded packets.
        # When running this example you will notice that the initial
        # symbols are received systematically (i.e. decoded). After
        # sending all symbols once decoded, the encoder will switch
        # to full coding, in which case you will see the full encoding
        # vectors being sent and received.
        if random.choice([True, False]):
            print("Packet dropped.\n")
            continue

        # Pass that packet to the decoder
        decoder.consume_payload(packet)

    # The decoder is complete, the decoded symbols are now available in
    # the data_out buffer: check if it matches the data_in buffer
    print("Checking results...")
    if data_out == data_in:
        print("Data decoded correctly")
    else:
        print("Unexpected failure to decode please file a bug report :)")
        sys.exit(1)
def main():
    """
    Encode-decode using coefficients example.

    This example shows how to use the Symbol API with direct coefficient
    access. Using this approach the we have full control over where
    coefficients are stored, however we also have to manage things such as
    systematic symbols ourselves.
    """
    # Choose the finite field, the number of symbols (i.e. generation size)
    # and the symbol size in bytes
    field = kodo.field.binary8
    symbols = 5
    symbol_size = 160

    # Create an encoder and a decoder
    encoder = kodo.RLNCEncoder(field, symbols, symbol_size)
    decoder = kodo.RLNCDecoder(field, symbols, symbol_size)

    # Generate some random data to encode. We create a bytearray of the same
    # size as the encoder's block size and assign it to the encoder.
    # This bytearray must not go out of scope while the encoder exists!
    data_in = bytearray(os.urandom(encoder.block_size()))
    encoder.set_symbols_storage(data_in)

    # Define the data_out bytearray where the symbols should be decoded
    # This bytearray must not go out of scope while the encoder exists!
    data_out = bytearray(decoder.block_size())
    decoder.set_symbols_storage(data_out)

    # Define a custom trace function for the decoder which filters the
    # trace message based on their zones
    def callback_function(zone, message):
        if zone in [
                "decoder_state", "symbol_coefficients_before_consume_symbol"
        ]:
            print("{}:".format(zone))
            print(message)

    # We want to follow the decoding process step-by-step
    decoder.set_log_callback(callback_function)

    # In the first phase, we will transfer some systematic symbols from
    # the encoder to the decoder.
    # Randomly select 2 symbols from the 5 original symbols
    for index in sorted(random.sample(range(symbols), 2)):
        # Get the original symbol from the encoder
        symbol = encoder.produce_systematic_symbol(index)
        # Insert the symbol to the decoder using the raw symbol data,
        # no additional headers or coefficients are needed for this
        print("Adding Systematic Symbol {}:\n".format(index))
        decoder.consume_systematic_symbol(symbol, index)

    # In the second phase, we will generate coded symbols to fill in the gaps
    # and complete the decoding process
    packet_number = 0
    while not decoder.is_complete():
        # Generate some random coefficients for encoding
        coefficients = encoder.generate()
        # We can print the individual coefficients here, because we use the
        # binary8 field where each byte corresponds to a single coefficient.
        # For other fields, we would need fifi-python to do this!
        print("Coding coefficients:")
        print(" ".join(str(c) for c in coefficients))
        # Write a coded symbol to the symbol buffer
        symbol = encoder.produce_symbol(coefficients)

        print("Coded Symbol {} encoded!\n".format(packet_number))

        # Pass that symbol and the corresponding coefficients to the decoder
        print("Processing Coded Symbol {}:\n".format(packet_number))
        decoder.consume_symbol(symbol, coefficients)

        packet_number += 1
        print("Decoder rank: {}/{}\n".format(decoder.rank(), symbols))

    print("Coding finished")

    # The decoder is complete, the decoded symbols are now available in
    # the data_out buffer: check if it matches the data_in buffer
    print("Checking results...")
    if data_out == data_in:
        print("Data decoded correctly")
    else:
        print("Unable to decode, please file a bug report :)")
        sys.exit(1)
def main():
    """
    Encode recode decode example.

    In Network Coding applications, one of the key features is the
    ability of intermediate nodes in the network to recode packets
    as they traverse them. In Kodo it is possible to recode packets
    in decoders which provide the produce_payload() function.

    This example shows how to use one encoder and two decoders to
    simulate a simple relay network as shown below (for simplicity
    we have error free links, i.e. no data packets are lost when being
    sent from encoder to decoder1 and decoder1 to decoder2):

            +-----------+     +-----------+     +-----------+
            |  encoder  |+---.| decoder1  |+---.|  decoder2 |
            +-----------+     | (recoder) |     +-----------+
                              +-----------+
    In a practical application recoding can be used in several different
    ways and one must consider several different factors, such as
    reducing linear dependency by coordinating several recoding nodes
    in the network.
    Suggestions for dealing with such issues can be found in current
    research literature (e.g. MORE: A Network Coding Approach to
    Opportunistic Routing).
    """
    # Choose the finite field, the number of symbols (i.e. generation size)
    # and the symbol size in bytes
    field = kodo.field.binary
    symbols = 42
    symbol_size = 160

    # Create an encoder and two decoders
    encoder = kodo.RLNCEncoder(field, symbols, symbol_size)
    decoder1 = kodo.RLNCDecoder(field, symbols, symbol_size)
    decoder2 = kodo.RLNCDecoder(field, symbols, symbol_size)

    # Generate some random data to encode. We create a bytearray of the same
    # size as the encoder's block size and assign it to the encoder.
    # This bytearray must not go out of scope while the encoder exists!
    data_in = bytearray(os.urandom(encoder.block_size()))
    encoder.set_symbols_storage(data_in)

    # Define the data_out bytearrays where the symbols should be decoded
    # These bytearrays must not go out of scope while the encoder exists!
    data_out1 = bytearray(decoder1.block_size())
    data_out2 = bytearray(decoder1.block_size())
    decoder1.set_symbols_storage(data_out1)
    decoder2.set_symbols_storage(data_out2)

    while not decoder2.is_complete():

        # Encode a packet into the payload buffer
        packet = encoder.produce_payload()

        # Pass that packet to decoder1
        decoder1.consume_payload(packet)

        # Now produce a new recoded packet from the current
        # decoding buffer, and place it into the payload buffer
        packet = decoder1.produce_payload()

        # Pass the recoded packet to decoder2
        decoder2.consume_payload(packet)

    # Both decoder1 and decoder2 should now be complete,
    # check if the output buffers match the data_in buffer
    if data_out1 == data_in and data_out2 == data_in:
        print("Data decoded correctly")
    else:
        print("Unexpected failure to decode please file a bug report :)")
        sys.exit(1)
예제 #12
0
def main():

    # Setup canvas and viewer
    size = 512
    canvas = kodo_helpers.CanvasScreenEngine(size * 2, size)

    encoder_viewer = kodo_helpers.EncodeStateViewer(size=size, canvas=canvas)

    decoder_viewer = kodo_helpers.DecodeStateViewer(size=size,
                                                    canvas=canvas,
                                                    canvas_position=(size, 0))

    canvas.start()
    try:
        field = kodo.field.binary8
        symbols = 64
        symbol_size = 16

        encoder = kodo.RLNCEncoder(field, symbols, symbol_size)
        decoder = kodo.RLNCDecoder(field, symbols, symbol_size)

        data_in = bytearray(os.urandom(encoder.block_size()))
        encoder.set_symbols_storage(data_in)

        data_out = bytearray(decoder.block_size())
        decoder.set_symbols_storage(data_out)

        def decoder_callback(zone, msg):
            decoder_viewer.log_callback(zone, msg)

        decoder.set_log_callback(decoder_callback)

        def encoder_callback(zone, msg):
            encoder_viewer.log_callback(zone, msg)

        encoder_viewer.set_symbols(encoder.symbols())
        encoder.set_log_callback(encoder_callback)

        while not decoder.is_complete():
            # Encode a packet into the payload buffer
            packet = encoder.produce_payload()

            # Here we "simulate" a packet loss of approximately 50%
            # by dropping half of the encoded packets.
            # When running this example you will notice that the initial
            # symbols are received systematically (i.e. decoded). After
            # sending all symbols once decoded, the encoder will switch
            # to full coding, in which case you will see the full encoding
            # vectors being sent and received.
            if random.choice([True, False]):
                continue

            # Pass that packet to the decoder
            decoder.consume_payload(packet)

        time.sleep(1)
    finally:
        # What ever happens, make sure we stop the viewer.
        canvas.stop()

    # Check we properly decoded the data
    if data_out == data_in:
        print("Data decoded correctly")
    else:
        print("Unexpected failure to decode please file a bug report :)")
        sys.exit(1)
예제 #13
0
def main():
    """Example showing the result of enabling the symbol status updater."""
    # Choose the finite field, the number of symbols (i.e. generation size)
    # and the symbol size in bytes
    field = kodo.field.binary
    symbols = 50
    symbol_size = 160

    # Create an encoder
    encoder = kodo.RLNCEncoder(field, symbols, symbol_size)

    # Create two decoders, one which has the status updater turned on, and one
    # which has it off.
    decoder1 = kodo.RLNCDecoder(field, symbols, symbol_size)
    decoder2 = kodo.RLNCDecoder(field, symbols, symbol_size)

    decoder2.set_status_updater_on()

    print("decoder 1 status updater: {}".format(
        decoder1.is_status_updater_enabled()))
    print("decoder 2 status updater: {}".format(
        decoder2.is_status_updater_enabled()))

    # Generate some random data to encode. We create a bytearray of the same
    # size as the encoder's block size and assign it to the encoder.
    # This bytearray must not go out of scope while the encoder exists!
    data_in = bytearray(os.urandom(encoder.block_size()))
    encoder.set_symbols_storage(data_in)

    # Define the data_out bytearray where the symbols should be decoded
    # This bytearray must not go out of scope while the encoder exists!
    data_out1 = bytearray(decoder1.block_size())
    data_out2 = bytearray(decoder1.block_size())
    decoder1.set_symbols_storage(data_out1)
    decoder2.set_symbols_storage(data_out2)

    # Skip the systematic phase as the effect of the symbol status decoder is
    # only visible when reading coded packets.
    encoder.set_systematic_off()

    print("Processing")
    while not decoder1.is_complete():
        # Generate an encoded packet
        payload = encoder.produce_payload()
        payload_copy = copy.copy(payload)

        # Pass that packet to the decoder
        decoder1.consume_payload(payload)
        decoder2.consume_payload(payload_copy)
        print("decoder 1: {}".format(decoder1.symbols_decoded()))
        print("decoder 2: {}".format(decoder2.symbols_decoded()))
        print("-----------------")

    print("Processing finished")

    # Check if both decoders properly decoded the original data
    print("Checking results")
    if data_out1 == data_in and data_out2 == data_in:
        print("Data decoded correctly")
    else:
        print("Unable to decode please file a bug report :)")
        sys.exit(1)
예제 #14
0
def main():

    # Get directory of this file
    directory = os.path.dirname(os.path.realpath(__file__))

    # The name of the file to use for the test
    filename = 'lena.jpg'

    # Open the image convert it to RGB and get the height and width
    image = Image.open(os.path.join(directory, filename)).convert("RGB")
    image_width = image.size[0]
    image_height = image.size[1]

    # The canvas should be able to contain both the image and the decoding
    # state. Note the decoding state is the same width as the image height.
    canvas_width = image_height + image_width + image_height

    # Create the canvas
    canvas = kodo_helpers.CanvasScreenEngine(
        width=canvas_width,
        height=image_height)

    # Create the decoding coefficient viewer
    encoding_state_viewer = kodo_helpers.EncodeStateViewer(
        size=image_height,
        canvas=canvas)

    # Create the image viewer
    image_viewer = kodo_helpers.ImageViewer(
        width=image_width,
        height=image_height,
        canvas=canvas,
        canvas_position=(image_width, 0))

    # Create the decoding coefficient viewer
    decoding_state_viewer = kodo_helpers.DecodeStateViewer(
        size=image_height,
        canvas=canvas,
        canvas_position=(image_width * 2, 0))

    # Pick a symbol size (image_width * 3 will create a packet for each
    # horizontal line of the image, that is three bytes per pixel (RGB))
    symbol_size = image_width * 3

    # Based on the size of the image and the symbol size, calculate the number
    # of symbols needed for containing the image in a single generation.
    symbols = int(math.ceil(image_width * image_height * 3.0 / symbol_size))

    field = kodo.field.binary8

    encoder = kodo.RLNCEncoder(field, symbols, symbol_size)
    decoder = kodo.RLNCDecoder(field, symbols, symbol_size)

    # Connect the tracing callback to the decode state viewer
    def encoding_callback(zone, msg):
        encoding_state_viewer.log_callback(zone, msg)
    encoding_state_viewer.set_symbols(encoder.symbols())
    encoder.set_log_callback(encoding_callback)

    def decoding_callback(zone, msg):
        decoding_state_viewer.log_callback(zone, msg)

    decoder.set_log_callback(decoding_callback)

    # Create a bytearray from the image to use in the encoding (only pick the
    # data we have room for).
    data_in = bytearray(image.tobytes()[-encoder.block_size():])

    # Set the converted image data
    encoder.set_symbols_storage(data_in)

    # Define the data_out bytearray where the symbols should be decoded
    # This bytearray must not go out of scope while the encoder exists!
    data_out = bytearray(decoder.block_size())
    decoder.set_symbols_storage(data_out)

    # Create an image viwer and run the following code in a try catch;
    # this prevents the program from locking up, as the finally clause will
    # close down the image viewer.
    canvas.start()
    try:
        while not decoder.is_complete():
            packet = encoder.produce_payload()

            # Drop some packets
            if random.choice([True, False]):
                decoder.consume_payload(packet)

            # The data_out buffer is continuously updated
            image_viewer.set_image(data_out)

        # Let the user see the complete photo before closing the application
        for i in range(100):
            image_viewer.set_image(data_out)
    finally:
        canvas.stop()

    # Check we properly decoded the data
    if data_out[:len(data_in)] == data_in:
        print("Data decoded correctly")
    else:
        print("Unexpected failure to decode please file a bug report :)")
        sys.exit(1)
예제 #15
0
def main():
    """
    Switch systematic off example.

    This example shows how to enable or disable systematic coding for
    coding stacks that support it.
    Systematic coding is used to reduce the amount of work done by an
    encoder and a decoder. This is achieved by initially sending all
    symbols which has not previously been sent decoded. Kodo allows this
    feature to be optionally turn of or off.
    """
    # Choose the finite field, the number of symbols (i.e. generation size)
    # and the symbol size in bytes
    field = kodo.field.binary
    symbols = 10
    symbol_size = 160

    # Create an encoder and a decoder
    encoder = kodo.RLNCEncoder(field, symbols, symbol_size)
    decoder = kodo.RLNCDecoder(field, symbols, symbol_size)

    # Generate some random data to encode. We create a bytearray of the same
    # size as the encoder's block size and assign it to the encoder.
    # This bytearray must not go out of scope while the encoder exists!
    data_in = bytearray(os.urandom(encoder.block_size()))
    encoder.set_symbols_storage(data_in)

    # Define the data_out bytearray where the symbols should be decoded
    # This bytearray must not go out of scope while the encoder exists!
    data_out = bytearray(decoder.block_size())
    decoder.set_symbols_storage(data_out)

    print("Starting encoding / decoding...")

    while not decoder.is_complete():

        # If the chosen codec stack supports systematic coding
        if 'is_systematic_on' in dir(encoder):

            # Toggle systematic mode with 50% probability
            if random.choice([True, False]):

                if encoder.is_systematic_on():
                    print("Turning systematic OFF")
                    encoder.set_systematic_off()
                else:
                    print("Turning systematic ON")
                    encoder.set_systematic_on()

        # Encode a packet into the payload buffer
        packet = encoder.produce_payload()

        if random.choice([True, False]):
            print("Packet dropped.")
            continue

        # Pass that packet to the decoder
        decoder.consume_payload(packet)

        print("Rank of decoder {}".format(decoder.rank()))

        # Symbols that were received in the systematic phase correspond
        # to the original source symbols and are therefore marked as
        # decoded
        print("Symbols decoded {}".format(decoder.symbols_decoded()))

    # The decoder is complete, the decoded symbols are now available in
    # the data_out buffer: check if it matches the data_in buffer
    print("Checking results...")
    if data_out == data_in:
        print("Data decoded correctly")
    else:
        print("Unexpected failure to decode please file a bug report :)")
        sys.exit(1)
예제 #16
0
def main():
    """
    Pure recode example using the Symbol API.

    This example is very similar to pure_recode_payload_api.py.
    The difference is that this example uses the low-level Symbol API of the
    pure recoder instead of the high-level Payload API.
    Similarly, the pure recoder will not decode the incoming data, it will
    only re-encode it and we use the same relay network as shown below.
    For simplicity, we have error free links, i.e. no data packets are lost
    when being sent from encoder to recoder to decoder:

            +-----------+      +-----------+      +------------+
            |  encoder  |+---->| recoder   |+---->|  decoder   |
            +-----------+      +-----------+      +------------+

    The pure recoder does not need to decode all symbols, and its "capacity"
    for storing symbols can be limited. These stored coded symbols are
    linear combinations of previously received symbols. When a new symbol is
    received, the pure recoder will combine it with its existing symbols
    using random coefficients.
    The pure recoder can produce a random linear combination of the stored
    coded symbols, which can be processed by a regular decoder.
    """
    # Choose the finite field, the number of symbols (i.e. generation size)
    # and the symbol size in bytes
    field = kodo.field.binary8
    symbols = 5
    symbol_size = 160

    encoder = kodo.RLNCEncoder(field, symbols, symbol_size)

    # Set the pure recoder "capacity" to be less than "symbols"
    recoder = kodo.RLNCPureRecoder(field, symbols, symbol_size, 3)

    print("Recoder properties:\n"
          "  Symbols: {}\n"
          "  Recoder symbols: {}".format(recoder.symbols(),
                                         recoder.recoder_symbols()))

    decoder = kodo.RLNCDecoder(field, symbols, symbol_size)

    # Generate some random data to encode. We create a bytearray of the same
    # size as the encoder's block size and assign it to the encoder.
    # This bytearray must not go out of scope while the encoder exists!
    data_in = bytearray(os.urandom(encoder.block_size()))
    encoder.set_symbols_storage(data_in)

    # Define the data_out bytearrays where the symbols should be decoded
    # These bytearrays must not go out of scope while the encoder exists!
    data_out = bytearray(decoder.block_size())
    decoder.set_symbols_storage(data_out)

    while not decoder.is_complete():
        # Generate some random coefficients for encoding
        coefficients = encoder.generate()
        # We can print the individual coefficients here, because we use the
        # binary8 field where each byte corresponds to a single coefficient.
        # For other fields, we would need fifi-python to do this!
        print("Encoding coefficients:")
        print(" ".join(str(c) for c in coefficients))
        # Write a coded symbol to the symbol buffer
        symbol = encoder.produce_symbol(coefficients)

        print("Encoded symbol generated and passed to the recoder")
        # Pass that symbol and the corresponding coefficients to the recoder
        recoder.consume_symbol(symbol, coefficients)

        # Generate some random coefficients for recoding
        recoding_coefficients = recoder.recoder_generate()

        print("Recoding coefficients:")
        print(" ".join(str(c) for c in recoding_coefficients))

        # Produce an encoded symbol based on the recoding coefficients
        # Note that the resulting recoded_symbol_coefficients will not be
        # the same as the recoding_coefficients
        recoded_symbol, recoded_symbol_coefficients = \
            recoder.recoder_produce_symbol(recoding_coefficients)

        print("Recoded symbol coefficients:")
        print(" ".join(str(c) for c in recoded_symbol_coefficients))

        print("Recoded symbol generated and passed to the decoder")

        # Pass that symbol and the corresponding coefficients to the decoder
        decoder.consume_symbol(recoded_symbol, recoded_symbol_coefficients)

        print("Decoder rank: {}/{}\n".format(decoder.rank(), symbols))

    # Check if we properly decoded the data
    if data_out == data_in:
        print("Data decoded correctly")
    else:
        print("Unexpected failure to decode please file a bug report :)")
        sys.exit(1)
예제 #17
0
def main():

    # Define the "symbol id" size (storing the PRNG seed), which in this
    # case is 2 bytes.
    id_size = 2

    # Choose the finite field, the number of symbols (i.e. generation size)
    # and the symbol size in bytes
    field = kodo.field.binary8
    symbols = 42
    symbol_size = 160

    # Let's calculate the header overhead we are adding due to the
    # "symbol id". Notice that I use the word header overhead: this is to
    # mean we only concentrate on the overhead caused by the added
    # header, in this case the "symbol id" only. There are other ways to
    # define overhead, e.g. excess generated redundancy or similar, we will
    # not look at those here.
    #
    # We are adding 2 bytes per encoded symbol, so this is the total
    # per-symbol overhead.
    #
    # If we generate 100% redundancy, we would produce in total 84
    # symbols. Those 84 symbols would each have 2 bytes of overhead so
    # in total 168 bytes.
    #
    # If we wanted to store the 84 encoded symbols in memory or on disk
    # they would take up: 84*(160+2) = 13608 bytes (out of which 168
    # bytes would be overhead from the "symbol id"). One minor note here
    # is that in a communication scenario we would typically store and
    # send a single encoded symbol at a time (which would need 160+2 bytes).
    # In both cases the input data also needs to be in memory.
    #
    # From a receiver's perspective only approx. 42 symbols would need to
    # be received in order to be able to decode the original data. So a
    # receiver would need to download 42*(160+2) = 6804 bytes to decode
    # the 42*160 = 6720 bytes of original data. This overhead in that case
    # is 42*2 = 84 bytes.
    #
    # Note that we said approx. 42 symbols is needed to decode. This
    # number depends on the exact type of code used and its parameters,
    # e.g. the size of the finite field. If less than 42 symbols are received,
    # only partial decoding is possible, which means that parts of the
    # file may be decoded, but not all.
    #
    # With RLNC we can generate as much redundancy as we want, in this
    # case we are limited by the 2-byte "symbol id". If instead we used
    # a 4-byte "symbol id", we could have generated 2^32 encoded symbols.
    # In a network, this may make sense, since it allows us to produce a
    # virtually unlimited number of encoded packets.

    # Create an encoder using the specified parameters
    encoder = kodo.RLNCEncoder(field, symbols, symbol_size)

    # Generate some random data to encode. We create a bytearray of the same
    # size as the encoder's block size and assign it to the encoder.
    # This bytearray must not go out of scope while the encoder exists!
    data_in = bytearray(os.urandom(encoder.block_size()))

    # Assign the data buffer to the encoder so that we may start
    # to produce encoded symbols from it
    encoder.set_symbols_storage(data_in)

    # We create twice the symbols so 100% redundancy.
    #
    # The amount of redundancy we generate depends on the amount of
    # erasures (packet loss) we expect to see. For example with a loss
    # rate of 50% i.e. loosing half the packets sent. We need to send at
    # least twice the packets i.e. 100% redundancy. How to tune/adapt
    # these numbers are system dependent and dependens on the desired
    # decoding probability.
    #
    # Specifically for the above example we add 100% redundancy, which
    # means from the 42 symbols we get 84 encoded symbols. If we have an
    # loss rate of 50% it means that on average we get the 42 symbols
    # needed to decode. So half of the time we will decode because we get
    # 42 symbols or more and half of the time we will fail because we get
    # less than the 42 needed symbols. Depending on the chosen finite
    # field size we also get an effect from linear dependency, which means
    # we can expect to decode less than half of the time. We can calculate
    # the redundancy needed to ensure decoding 99% of the time using
    # probability theory.
    # In communication scenarios the amount of redundancy often times
    # feedback will be used to control redundancy levels, how relaxed we
    # can be depends on the cost of asking for more redundancy (which
    # is system dependent).
    # In systems without feedback, often times the best we can do is to
    # simply blindly generate redundancy (for a given period of time).
    # In this case the rate-less feature of RLNC is handy. Rate less
    # simply means that there is no upper limit to the amount of
    # redundancy we can generate.
    #
    # It is often the case that in storage scenarios a fixed redundancy
    # level is used, whereas in communication scenarios an adaptive
    # redundancy level is used (RLNC supports both modes). The adaptive
    # mode is demonstrated e.g. in examples/encode_decode_simple where
    # the encoder keeps producing symbols until the decoder is complete.
    #
    # One subtle thing to notice is that we are using produce_symbol
    # (symbol API) instead of produce_payload (payload API) which is
    # used in many other examples (such as encode_decode_simple).
    #
    # The payload API is a "higher level" API, which allows people to
    # get going quickly. It has its own internal header format, which
    # uses some bytes for signaling various state between an encoder and
    # decoder.
    #
    # The symbol API is "low-level" in the sense that we fully control what
    # is written. As such, a call to produce_symbol(...) will write only
    # the encoded symbol to the buffer passed. We have to write the
    # coefficients and the seed values ourselves as shown below.
    payloads = []
    for seed in range(symbols * 2):
        payload = bytearray()

        # Set the seed value to use
        encoder.set_seed(seed)

        # Write the seed value as two bytes in big endian
        two_bytes_seed = struct.pack('>H', seed)
        # Add the seed as the first part of the payload
        payload.extend(two_bytes_seed)

        # Generate an encoding vector using the current seed value
        coefficients = encoder.generate()

        # Write a symbol according to the generated coefficients
        symbol = encoder.produce_symbol(coefficients)

        # Add the symbol data as the second part of the payload
        payload.extend(symbol)

        # Store the payload
        payloads.append(payload)

    # Now let's create the decoder
    decoder = kodo.RLNCDecoder(field, symbols, symbol_size)

    # Define the data_out bytearray where the symbols should be decoded
    # This bytearray must not go out of scope while the encoder exists!
    data_out = bytearray(decoder.block_size())
    decoder.set_symbols_storage(data_out)

    for payload in payloads:
        # Extract the seed and symbol data from the payload
        seed = struct.unpack_from('>H', payload, 0)[0]
        symbol_data = payload[2:]

        # Set the seed to use
        decoder.set_seed(seed)

        # Generate the encoding vector using the current seed value
        coefficients = decoder.generate()

        # Read a symbol data according to the generated coefficients
        decoder.consume_symbol(symbol_data=symbol_data,
                               coefficients=coefficients)

        if decoder.is_complete():
            break

    print("Coding finished")

    # The decoder is complete, the decoded symbols are now available in
    # the data_out buffer: check if it matches the data_in buffer
    print("Checking results...")
    if data_out == data_in:
        print("Data decoded correctly")
    else:
        print("Unable to decode please file a bug report :)")
        sys.exit(1)
def main():
    """
    Multicast example, reciever part.

    An example where data is received, decoded, and finally written to a file.
    """
    parser = argparse.ArgumentParser(description=main.__doc__)
    parser.add_argument('--output-file',
                        type=str,
                        help='Path to the file which should be received.',
                        default='output_file')

    parser.add_argument('--ip',
                        type=str,
                        help='The IP address to use.',
                        default=MCAST_GRP)

    parser.add_argument('--port',
                        type=int,
                        help='The port to use.',
                        default=MCAST_PORT)

    parser.add_argument('--dry-run',
                        action='store_true',
                        help='Run without network use.')

    args = parser.parse_args()

    field = kodo.field.binary
    symbols = 64
    symbol_size = 1400

    decoder = kodo.RLNCDecoder(field, symbols, symbol_size)

    data_out = bytearray(decoder.block_size())
    decoder.set_symbols_storage(data_out)

    sock = socket.socket(family=socket.AF_INET,
                         type=socket.SOCK_DGRAM,
                         proto=socket.IPPROTO_UDP)

    sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)

    sock.bind(('', args.port))
    mreq = struct.pack("4sl", socket.inet_aton(args.ip), socket.INADDR_ANY)

    sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)

    if args.dry_run:
        sys.exit(0)

    print("Processing...")
    while not decoder.is_complete():
        time.sleep(0.2)
        packet = sock.recv(10240)

        decoder.consume_payload(bytearray(packet))
        print("Packet received!")
        print("Decoder rank: {}/{}".format(decoder.rank(), decoder.symbols()))

    # Write the decoded data to the output file
    f = open(args.output_file, 'wb')
    f.write(data_out)
    f.close()

    print("Processing finished.")