def handle_mod_neural(self, pdu):
     t0 = time.time()
     self.mod_packet_cnt += 1
     tag_dict = pmt.to_python(pmt.car(pdu))
     vec = pmt.to_python(pmt.cdr(pdu))
     exploration = 'exploit' if self.run_mode == 'freeze' else 'explore'
     symbs = self.mod.modulate(
         util_data.bits_to_integers(vec, self.bits_per_symbol), exploration)
     self.past_actions = symbs
     ###DEBUG###
     self.actions_hist[self.mod_packet_cnt] = symbs
     ###DEBUG###
     # Spy and header fields
     if not self.use_shared:
         self.gen_preamble()
     classic, _ = self.assemble_packet_neural(vec[self.preamble.size:],
                                              (1 << 16) - 1,
                                              valid=False)
     classic_si = util_data.bits_to_integers(classic, self.bits_per_symbol)
     symbs = np.concatenate([self.spy_mod.modulate(classic_si), symbs])
     self.message_port_pub(
         self.port_id_mod_out,
         pmt.cons(pmt.PMT_NIL, pmt.to_pmt(symbs.astype(np.complex64))))
     t1 = time.time()
     self.logger.debug("neural mod {} handled {} bits in {} seconds".format(
         self.uuid_str, vec.size, t1 - t0))
 def gen_preamble(self):
     self.preamble = np.random.randint(0, 2, self.npreamble)
     self.preamble_si = util_data.bits_to_integers(self.preamble,
                                                   self.bits_per_symbol)
     np.save("preamble_si_{}".format(self.mod_packet_cnt), self.preamble_si)
     ###DEBUG###
     self.preamble_hist[self.mod_update_cnt] = self.preamble
Exemple #3
0
    def test_001_t(self):
        # set up fg
        pkt_tag = gr.tag_t()
        pkt_tag.key = pmt.intern("pkt")
        mod_types = ['BPSK', 'QPSK', '8PSK', 'QAM16', 'QAM64']
        for mod_type in mod_types:
            print("Testing {}...".format(mod_type))
            modulator = modulator_classic(mod_type)
            src_data = get_random_bits(100 * modulator.bits_per_symbol)
            pkt_tag.value = pmt.to_pmt(len(src_data))
            expected_data = modulator.modulate(
                bits_to_integers(data_b=src_data,
                                 bits_per_symbol=modulator.bits_per_symbol))
            src = blocks.vector_source_b(src_data, False, 1, [pkt_tag])
            tag2pdu = blocks.tagged_stream_to_pdu(blocks.byte_t, "pkt")
            pdu2tag = blocks.pdu_to_tagged_stream(blocks.complex_t, "pkt")
            snk = blocks.vector_sink_c()
            self.tb.connect(src, tag2pdu)
            self.tb.msg_connect(tag2pdu, "pdus", modulator, "bits")
            self.tb.msg_connect(modulator, "symbols", pdu2tag, "pdus")
            self.tb.connect(pdu2tag, snk)

            self.tb.start()
            while modulator.packet_cnt < 1:
                time.sleep(0.1)
            self.tb.stop()
            self.tb.wait()
            # check data
            result_data = snk.data()
            self.assertComplexTuplesAlmostEqual(expected_data, result_data, 6)
 def handle_feedback(self, pdu):
     t0 = time.time()
     self.train_cnt += 1
     tag_dict = pmt.to_python(pmt.car(pdu))
     vec = pmt.to_python(pmt.cdr(pdu))
     # Split packet into functional parts
     new_echo = vec[:self.preamble.size]
     my_echo = vec[self.preamble.size:2 * self.preamble.size]
     # rest_of_packet = vec[2 * self.preamble.size:]
     # Do not perform updates while frozen
     if self.run_mode == "freeze":
         # Save every round trip BER while frozen to measure average performance
         ber = sum(numpy.abs(self.preamble - my_echo)) * 1.0 / self.preamble.size
         with open("ber_echo_{}.csv".format(self.uuid_str), "a") as f:
             f.write("{},{}\n".format(self.train_cnt, ber))
     else:  # run_mode == train
         labels_si_g = bits_to_integers(my_echo, self.bits_per_symbol)
         # Only use preamble from feedback, at least for now
         reward, _, _, loss = self.update(preamble_si=self.preamble_si,  # labels
                                          actions=self.past_actions[0:self.preamble_si.shape[0]],  # actions
                                          labels_si_g=labels_si_g[0:self.preamble_si.shape[0]])  # demodulator guesses
         self.version_dirty = False
         if self.train_cnt % self.log_constellation_interval == 0:
             print("Saving mod constellation @{}".format(self.train_cnt))
             numpy.save("neural_mod_constellation_{:05d}_{}".format(self.train_cnt,
                        time.strftime('%Y%m%d_%H%M%S')),
                        get_constellation(self))
             ber = sum(numpy.abs(self.preamble - my_echo)) * 1.0 / self.preamble.size
             with open("ber_echo_{}.csv".format(self.uuid_str), "a") as f:
                 f.write("{},{}\n".format(self.train_cnt, ber))
     # Send the echo with a new preamble to continue the training cycle
     echo_packet = numpy.concatenate([self.preamble, new_echo])
     echo_packet_si = bits_to_integers(echo_packet, self.bits_per_symbol)
     exploration = 'exploit' if self.run_mode == 'freeze' else 'explore' 
     echo = self.modulate(echo_packet_si, exploration)
     # Condition the symbols to reduce IQ imbalance issues
     echo = self.center_iq(echo)
     self.past_actions = echo
     self.message_port_pub(self.port_id_out,
                           pmt.cons(pmt.PMT_NIL,
                                    pmt.to_pmt(echo.astype(numpy.complex64))))
     self.version_dirty = True
     # Logging
     t1 = time.time()
     self.logger.debug("neural mod {} updated with {} bits in {} seconds".format(
                       self.uuid_str, vec.size, t1 - t0))
Exemple #5
0
 def handle_packet(self, pdu):
     t0 = time.time()
     self.packet_cnt += 1
     tag_dict = pmt.car(pdu)
     vec = pmt.to_python(pmt.cdr(pdu))
     data_si = util_data.bits_to_integers(vec, self.bits_per_symbol)
     symbs = self.modulate(data_si).astype(numpy.complex64)
     self.message_port_pub(self.port_id_out,
                           pmt.cons(pmt.PMT_NIL,
                                    pmt.to_pmt(symbs)))
     t1 = time.time()
     self.logger.debug("classic mod {} handled {} bits in {} seconds".format(
                       self.uuid_str, vec.size, t1 - t0))
 def handle_packet(self, pdu):
     t0 = time.time()
     if self.version_dirty:
         pass  # Some packets may be dropped, need to keep going
     self.packet_cnt += 1
     tag_dict = pmt.to_python(pmt.car(pdu))
     vec = pmt.to_python(pmt.cdr(pdu))
     if self.run_mode == "freeze":
         symbs = self.modulate(bits_to_integers(vec, self.bits_per_symbol), 'exploit')
     else:
         symbs = self.modulate(bits_to_integers(vec, self.bits_per_symbol), 'explore')
     # Condition the symbols to reduce IQ imbalance issues
     symbs = self.center_iq(symbs)
     # self.logger.info("symbs[{}] {}: {}".format(type(symbs[0]), symbs.shape, symbs))
     self.past_actions = symbs
     self.message_port_pub(self.port_id_out,
                           pmt.cons(pmt.PMT_NIL,
                                    pmt.to_pmt(symbs.astype(numpy.complex64))))
     self.version_dirty = True
     t1 = time.time()
     self.logger.debug("neural mod {} handled {} bits in {} seconds".format(
                       self.uuid_str, vec.size, t1 - t0))
Exemple #7
0
 def handle_packet(self, pdu):
     t0 = time.time()
     if self.version_dirty:
         pass  # Some packets may be dropped, need to keep going
     self.packet_cnt += 1
     tag_dict = pmt.to_python(pmt.car(pdu))
     vec = pmt.to_python(pmt.cdr(pdu))
     exploration = 'exploit' if self.run_mode == 'freeze' else 'explore' 
     symbs = self.modulate(bits_to_integers(vec, self.bits_per_symbol), exploration)
     self.past_actions = symbs
     # Spy and header fields
     classic, _ = self.assemble_packet(vec[self.preamble.size:], valid=False)
     classic_si = bits_to_integers(classic, self.bits_per_symbol)
     symbs = np.concatenate([self.spy_mod.modulate(classic_si), symbs])
     # self.logger.info("symbs[{}] {}: {}".format(type(symbs[0]), symbs.shape, symbs))
     self.message_port_pub(self.port_id_out,
                           pmt.cons(pmt.PMT_NIL,
                                    pmt.to_pmt(symbs.astype(np.complex64))))
     self.version_dirty = True
     t1 = time.time()
     self.logger.debug("neural mod {} handled {} bits in {} seconds".format(
                       self.uuid_str, vec.size, t1 - t0))
 def handle_mod_feedback_classic(self, vec):
     t0 = time.time()
     self.mod_packet_cnt += 1
     self.mod_update_cnt += 1
     spy, hdr, new_echo, my_echo = self.split_packet_bits(vec)
     spy_ber = sum(spy != self.spy_master) * 1.0 / self.spy_length
     if hdr is not None:
         idxpre = hdr[0]
         idxecho = hdr[1]
         valid = hdr[2]
         if idxecho != self.mod_update_cnt - 1:
             print("Update cnt {} echo idx {}".format(
                 self.mod_update_cnt, idxecho))
     else:
         valid = False
         idxpre = (1 << 16) - 1
         idxecho = (1 << 16) - 1
     if ((self.mod_update_cnt % self.log_interval == 0
          or self.run_mode == 'freeze') and valid
             and spy_ber < self.spy_threshold):
         try:
             preamble = self.get_preamble_hist(idxecho, pop=True)
             print("{} classic offset {}".format(
                 self.uuid_str, self.mod_update_cnt - idxecho))
             ber = sum(my_echo != preamble) * 1.0 / self.preamble.size
             with open("ber_echo_{}.csv".format(self.uuid_str), "a") as f:
                 f.write("{},{}\n".format(self.mod_update_cnt, ber))
         except KeyError as e:
             self.logger.info(
                 "DEBUG::Unable to calculate BER with stored index {}".
                 format(idxecho))
     if not self.use_shared:
         self.gen_preamble()
     bits = self.assemble_packet(new_echo,
                                 idxpre,
                                 valid=spy_ber < self.spy_threshold)
     data_si = util_data.bits_to_integers(bits, self.bits_per_symbol)
     symbs = self.mod.modulate(data_si).astype(np.complex64)
     if self.mod_update_cnt - idxecho == 1 or idxecho == 65535:
         self.message_port_pub(self.port_id_mod_out,
                               pmt.cons(pmt.PMT_NIL, pmt.to_pmt(symbs)))
     else:
         print("Not sending new pkt because {} or {}".format(
             self.mod_update_cnt - idxecho, idxecho != 65535))
     t1 = time.time()
     self.logger.debug(
         "classic mod {} handled {} bits in {} seconds".format(
             self.uuid_str, bits.size, t1 - t0))
Exemple #9
0
    def __init__(self,
                 bits_per_symbol,
                 preamble=None,
                 log_ber_interval=10,
                 spy_length=64,
                 spy_threshold=0.1):
        ModulatorClassic.__init__(self, bits_per_symbol, max_amplitude=0.09)
        gr.basic_block.__init__(self,
                                name="modulator_classic",
                                in_sig=None,
                                out_sig=None)
        # Echo protocol variables
        assert preamble is not None, "Preamble must be provided"
        if preamble is not np.ndarray:
            preamble = np.array(preamble)
        self.preamble = preamble
        self.preamble_si = util_data.bits_to_integers(self.preamble,
                                                      self.bits_per_symbol)
        self.log_ber_interval = log_ber_interval

        # Message port setup and variables
        self.port_id_in = pmt.intern("bits")
        self.port_id_update = pmt.intern("update")
        self.port_id_out = pmt.intern("symbols")
        self.message_port_register_in(self.port_id_in)
        self.message_port_register_in(self.port_id_update)
        self.message_port_register_out(self.port_id_out)
        self.set_msg_handler(self.port_id_in, self.handle_packet)
        self.set_msg_handler(self.port_id_update, self.handle_update)
        self.packet_cnt = 0
        self.ber_cnt = 0

        # Packet header and spy variables
        self.spy_length = spy_length
        assert self.spy_length % self.bits_per_symbol == 0
        self.spy_threshold = spy_threshold
        self.reedsolomon = reedsolo.RSCodec(4)
        self.rs_length = 4 * 2 * 8  # 4 bytes data, 4 bytes parity, 8 bits per byte

        # Logging stuff
        self.uuid = uuid.uuid4()
        self.uuid_str = str(self.uuid)[-6:]
        self.logger = gr.logger("log_debug")
        self.logger.set_level("DEBUG")
        self.logger.info("classic mod {}: {} bits per symbol".format(
            self.uuid_str, self.bits_per_symbol))
        with open("ber_echo_{}.csv".format(self.uuid_str), "w") as f:
            f.write("train_iter,BER\n")
Exemple #10
0
 def handle_mod_classic(self, pdu):
     t0 = time.time()
     self.mod_packet_cnt += 1
     tag_dict = pmt.car(pdu)
     vec = pmt.to_python(pmt.cdr(pdu))
     if not self.use_shared:
         self.gen_preamble()
     bits = self.assemble_packet(vec[self.preamble.size:], (1 << 16) - 1,
                                 valid=False)
     data_si = util_data.bits_to_integers(bits, self.bits_per_symbol)
     symbs = self.mod.modulate(data_si).astype(np.complex64)
     self.message_port_pub(self.port_id_mod_out,
                           pmt.cons(pmt.PMT_NIL, pmt.to_pmt(symbs)))
     t1 = time.time()
     self.logger.debug(
         "classic mod {} handled {} bits in {} seconds".format(
             self.uuid_str, bits.size, t1 - t0))
Exemple #11
0
 def handle_update(self, pdu):
     t0 = time.time()
     self.packet_cnt += 1
     self.ber_cnt += 1
     tag_dict = pmt.car(pdu)
     vec = pmt.to_python(pmt.cdr(pdu))
     spy, hdr, new_echo, my_echo = self.split_packet(vec)
     spy_ber = sum(
         spy != self.preamble[:self.spy_length]) * 1.0 / self.spy_length
     if hdr is not None:
         valid = hdr[0]
         pktidx = hdr[1]
     else:
         valid = False
     if (self.ber_cnt % self.log_ber_interval == 0 and valid
             and spy_ber < self.spy_threshold):
         ###DEBUG###
         np.save("clmod_preamble_{}".format(pktidx), new_echo)
         np.save("clmod_echo_{}".format(pktidx), my_echo)
         #np.save("clmod_preamble_{}".format(self.ber_cnt * 2 + 1), new_echo)
         #np.save("clmod_echo_{}".format(self.ber_cnt * 2 + 1), my_echo)
         ###DEBUG###
         ber = sum(my_echo != self.preamble) * 1.0 / self.preamble.size
         with open("ber_echo_{}.csv".format(self.uuid_str), "a") as f:
             f.write("{},{}\n".format(self.ber_cnt, ber))
     bits = self.assemble_packet(new_echo,
                                 valid=spy_ber < self.spy_threshold)
     data_si = util_data.bits_to_integers(bits, self.bits_per_symbol)
     symbs = self.modulate(data_si).astype(np.complex64)
     ###DEBUG###
     if (self.ber_cnt % self.log_ber_interval == 0 and valid
             and spy_ber < self.spy_threshold):
         np.save("clmod_symbs_{}".format(self.ber_cnt * 2 + 1),
                 symbs[-2 * self.preamble_si.size:])
     ###DEBUG###
     self.message_port_pub(self.port_id_out,
                           pmt.cons(pmt.PMT_NIL, pmt.to_pmt(symbs)))
     t1 = time.time()
     self.logger.debug(
         "classic mod {} handled {} bits in {} seconds".format(
             self.uuid_str, bits.size, t1 - t0))
Exemple #12
0
 def demod_train_preamble(self, data_c, idxecho):
     try:
         preamble_si = util_data.bits_to_integers(
             self.get_preamble_hist(idxecho), self.bits_per_symbol)
         self.demod.update(inputs=preamble_si,
                           actions=[],
                           data_for_rewards=data_c,
                           mode='echo')
         if self.demod_packet_cnt % self.log_interval == 0:
             print("Saving demod constellation @{}".format(
                 self.demod_packet_cnt))
             data_vis = gen_demod_grid(points_per_dim=100,
                                       min_val=-2.5,
                                       max_val=2.5)['data']
             labels_si_g = self.demod.demodulate(data_c=data_vis,
                                                 mode='exploit')
             np.savez("neural_demod_constellation_{:05d}_{}".format(
                 self.demod_packet_cnt, time.strftime('%Y%m%d_%H%M%S')),
                      iq=data_vis,
                      labels=labels_si_g)
     except KeyError as e:
         self.logger.info(
             "Unable to train demodulator with stored index {}".format(
                 idxecho))
Exemple #13
0
 def __init__(
     self,
     seed=0,
     hidden_layers=(64, ),
     bits_per_symbol=2,
     preamble=None,
     log_constellation_interval=10,
     init_weights="",
     activation_fn_hidden='tanh',
     # kernel_initializer_hidden=normc_initializer(1.0),
     # bias_initializer_hidden=tf.glorot_uniform_initializer(),
     activation_fn_output=None,
     # kernel_initializer_output=normc_initializer(1.0),
     # bias_initializer_output=tf.glorot_uniform_initializer(),
     optimizer=torch.optim.Adam,
     # initial_eps=1e-1,
     # max_eps=2e-1,
     # min_eps=1e-4,
     lambda_prob=1e-10,
     loss_type='l2',
     # explore_prob=0.5,
     # strong_supervision_prob=0.,
     stepsize_mu=1e-3,
     # stepsize_eps=1e-5,
     stepsize_cross_entropy=1e-3,
     cross_entropy_weight=1.0,
 ):
     gr.basic_block.__init__(self,
                             name="demodulator_neural",
                             in_sig=None,
                             out_sig=None)
     DemodulatorNeural.__init__(
         self,
         seed=seed,
         hidden_layers=hidden_layers,
         bits_per_symbol=bits_per_symbol,
         activation_fn_hidden=activation_fn_hidden,
         # kernel_initializer_hidden=kernel_initializer_hidden,
         # bias_initializer_hidden=bias_initializer_hidden,
         activation_fn_output=activation_fn_output,
         # kernel_initializer_output=kernel_initializer_output,
         # bias_initializer_output=bias_initializer_output,
         optimizer=optimizer,
         # initial_eps=initial_eps,
         # max_eps=max_eps,
         # min_eps=min_eps,
         lambda_prob=lambda_prob,
         loss_type=loss_type,
         # explore_prob=explore_prob,
         # strong_supervision_prob=strong_supervision_prob,
         stepsize_mu=stepsize_mu,
         # stepsize_eps=stepsize_eps,
         stepsize_cross_entropy=stepsize_cross_entropy,
         cross_entropy_weight=cross_entropy_weight)
     if preamble is None:
         raise Exception("You must provide a preamble")
     if preamble is not numpy.ndarray:
         preamble = numpy.array(preamble)
     assert len(
         preamble.shape
     ) == 1, "Preamble must be a vector, not a matrix with a dimension of size 1"
     self.preamble = preamble
     self.preamble_si = bits_to_integers(numpy.array(self.preamble),
                                         self.bits_per_symbol)
     self.run_mode = "train"  # Be careful not to clobber the parent class' mode here!
     if len(init_weights) > 0:
         self.model.load_state_dict(torch.load(init_weights))
         self.run_mode = "freeze"
     # Message ports
     self.port_id_in = pmt.intern("symbols")
     self.port_id_out = pmt.intern("bits")
     self.port_id_ctrl = pmt.intern("control")
     self.message_port_register_in(self.port_id_in)
     self.message_port_register_in(self.port_id_ctrl)
     self.message_port_register_out(self.port_id_out)
     self.set_msg_handler(self.port_id_in, self.handle_packet)
     self.set_msg_handler(self.port_id_ctrl, self.handle_control)
     # Counters
     self.packet_cnt = 0
     self.log_constellation_interval = log_constellation_interval
     self.uuid = uuid.uuid4()
     self.uuid_str = str(self.uuid)[-6:]
     self.logger = gr.logger("log_debug")
     self.logger.set_level("DEBUG")
     self.logger.info("neural demod {}: {} bits per symbol".format(
         self.uuid_str, self.bits_per_symbol))
     if len(init_weights) > 0:
         self.logger.info(
             "neural demod {}: initialized weights from {}".format(
                 self.uuid_str, init_weights))
     with open("ber_{}.csv".format(self.uuid_str), "w") as f:
         f.write("iter,BER\n")
Exemple #14
0
    def __init__(self, seed=189, hidden_layers=[64], bits_per_symbol=2,
                 preamble=None, log_constellation_interval=10,
                 spy_length=64, spy_threshold=0.1,
                 init_weights="",
                 lambda_p=0.0,
                 max_std=8e-1,
                 min_std=1e-1,
                 initial_std=2.0e-1,
                 restrict_energy=3,
                 activation_fn_hidden='tanh',
                 # kernel_initializer_hidden={'normc_std': 1.0},
                 # bias_initializer_hidden=None,
                 activation_fn_output=None,
                 # kernel_initializer_output=normc_initializer(1.0),
                 # bias_initializer_output=tf.glorot_uniform_initializer(),
                 optimizer='sgd',
                 lambda_prob=1e-10,
                 stepsize_mu=1e-1,
                 stepsize_sigma=1e-3
                 ):
        gr.basic_block.__init__(self,
                                name="modulator_neural",
                                in_sig=None,
                                out_sig=None)
        ModulatorNeural.__init__(self,
                                 seed=seed, hidden_layers=hidden_layers,
                                 bits_per_symbol=bits_per_symbol,
                                 lambda_p=lambda_p,
                                 max_std=max_std,
                                 min_std=min_std,
                                 initial_std=initial_std,
                                 restrict_energy=restrict_energy,
                                 activation_fn_hidden=activation_fn_hidden,
                                 # kernel_initializer_hidden=kernel_initializer_hidden,
                                 # bias_initializer_hidden=bias_initializer_hidden,
                                 activation_fn_output=activation_fn_output,
                                 # kernel_initializer_output=kernel_initializer_output,
                                 # bias_initializer_output=bias_initializer_output,
                                 optimizer=optimizer,
                                 lambda_prob=lambda_prob,
                                 stepsize_mu=stepsize_mu,
                                 stepsize_sigma=stepsize_sigma)
        ###DEBUG###
        self.seed = seed
        #self.model.load_state_dict(torch.load("pretrained.mdl"))
        torch.save(self.model.state_dict(), "initialization.mdl")
        torch.save(self.model.state_dict(), "model_0.mdl")
        np.save("const0", self.get_constellation())
        ###DEBUG###

        # Echo protocol variables
        if preamble is None:
            raise Exception("Preamble must be provided")
        if preamble is not np.ndarray:
            preamble = np.array(preamble)
        assert len(preamble.shape) == 1, "Preamble must be a vector, not a matrix with a dimension of size 1"
        self.preamble = preamble
        self.preamble_si = bits_to_integers(self.preamble, self.bits_per_symbol)
        self.preamble_len = len(self.preamble)
        np.save("preamble_si", self.preamble_si)
        self.past_actions = None

        self.run_mode = "train"
        if len(init_weights) > 0:
            self.model.load_state_dict(torch.load(init_weights))
            self.run_mode = "freeze"
        
        # Message ports
        self.port_id_in = pmt.intern("bits")
        self.port_id_out = pmt.intern("symbols")
        self.port_id_feedback = pmt.intern("feedback")
        self.port_id_ctrl = pmt.intern("control")
        self.message_port_register_in(self.port_id_in)
        self.message_port_register_in(self.port_id_feedback)
        self.message_port_register_in(self.port_id_ctrl)
        self.message_port_register_out(self.port_id_out)
        self.set_msg_handler(self.port_id_in, self.handle_packet)
        self.set_msg_handler(self.port_id_feedback, self.handle_feedback)
        self.set_msg_handler(self.port_id_ctrl, self.handle_control)
        
        # Meta variables
        self.packet_cnt = 0
        self.train_cnt = 0
        self.version = 0
        self.version_dirty = False

        # Spy and packet info
        self.spy_length = spy_length
        self.spy_threshold = spy_threshold
        assert self.spy_length % self.bits_per_symbol == 0
        self.spy_mod = ModulatorClassic(self.bits_per_symbol)
        # | 1byte valid flag | 2byte sequence number |
        self.reedsolomon = reedsolo.RSCodec(4)
        self.rs_length = 4 * 2 * 8  # 4 bytes data, 4 bytes parity, 8 bits per byte

        # Logging
        self.log_constellation_interval = log_constellation_interval
        self.uuid = uuid.uuid4()
        self.uuid_str = str(self.uuid)[-6:]
        self.logger = gr.logger("log_debug")
        self.logger.set_level("DEBUG")
        self.logger.info("neural mod {}: {} bits per symbol".format(self.uuid_str, self.bits_per_symbol))
        if len(init_weights) > 0:
            self.logger.info("neural mod {}: initialized weights from {}".format(self.uuid_str, init_weights))
        with open("ber_echo_{}.csv".format(self.uuid_str), "w") as f:
            f.write("train_iter,BER\n")
        ###DEBUG###
        #self.neur_noise = np.load("neur_noise.npy", allow_pickle=True)
        #self.class_noise = np.load("class_noise.npy", allow_pickle=True)
        self.demod = DemodulatorClassic(self.bits_per_symbol)
        self.seeds = np.load("python-inputs/seeds.npy")
Exemple #15
0
    def handle_feedback(self, pdu):
        t0 = time.time()
        self.train_cnt += 1
        self.packet_cnt += 1
        tag_dict = pmt.to_python(pmt.car(pdu))
        vec = pmt.to_python(pmt.cdr(pdu))
        # Split packet into functional parts
        spy, hdr, new_echo, my_echo = self.split_packet(vec)
        spy_ber = sum(spy != self.preamble[:self.spy_length]) * 1.0 / self.spy_length
        if hdr is not None:
            valid = hdr[0]
            pktidx = hdr[1]
        else:
            valid = False
        # Do not perform updates while frozen
        if self.run_mode == "freeze" and spy_ber < self.spy_threshold and valid:
            # Save every round trip BER while frozen to measure average performance
            ber = sum(np.abs(self.preamble - my_echo)) * 1.0 / self.preamble.size
            with open("ber_echo_{}.csv".format(self.uuid_str), "a") as f:
                f.write("{},{}\n".format(self.train_cnt, ber))
        elif spy_ber < self.spy_threshold and valid:  # run_mode == "train"
            labels_si_g = bits_to_integers(my_echo, self.bits_per_symbol)
            labels_all = bits_to_integers(np.concatenate([new_echo, my_echo]), 
                                          self.bits_per_symbol)
            p_actions = self.past_actions[0:self.preamble_si.shape[0]]  # actions
            
            ###DEBUG###
            np.save("labels_si_g_{}".format(pktidx), labels_all)
            np.save("past_actions_{}".format(pktidx), self.past_actions)

            #labels_perfect = np.load("outputs.sgd.0/labels_si_g_{}.npy".format(self.train_cnt))

            labels_perfect = self.demod.demodulate(p_actions + (np.random.randn(p_actions.size) + 1j * np.random.randn(p_actions.size)) * 0.2)
            #errors = sum(np.abs(labels_perfect - labels_si_g))
            #print("labels diff", errors)
            #if errors < 300:
            #    labels_perfect = labels_si_g

            #nidx = (self.train_cnt-1)  % self.class_noise.shape[0]# np.random.choice(self.neur_noise.shape[0])
            #print(nidx, self.class_noise.shape[0])
            #np.save("saved_clrx_{}".format(self.train_cnt), p_actions + self.class_noise[nidx])
            #cllabels = self.demod.demodulate(p_actions + self.class_noise[nidx])
            #if np.random.random() < 0.5:
            #    cllabels = np.random.randint(0, 4, cllabels.size)
            #np.save("saved_nerx_{}".format(self.train_cnt), ModulatorClassic(2).modulate(cllabels) + self.neur_noise[nidx])
            #labels_perfect = self.demod.demodulate(ModulatorClassic(2).modulate(cllabels) + 
            #        self.neur_noise[nidx])
            #np.save("labels_si_g_{}".format(self.train_cnt), labels_perfect)

            ###DEBUG###

            ###DEBUG###
            # Load a static dataset and train on it instead
            #labels_si_g = np.load("python-inputs/labels_si_g_{}.input.npy".format(self.train_cnt - 1))
            #p_actions = np.load("python-inputs/past_actions_{}.input.npy".format(self.train_cnt - 1))
            #torch.manual_seed(self.seeds[self.train_cnt])
            #np.random.seed(self.seeds[self.train_cnt])
            #random.seed(self.seeds[self.train_cnt])
            ###DEBUG###

            # Only use preamble from feedback, at least for now
            torch.save(self.model.state_dict(), "tmp.mdl")
            reward, std0, std1, loss = self.update(preamble_si=self.preamble_si,  # labels
                                             actions=p_actions,
                                             labels_si_g=labels_si_g)  # demodulator guesses
                                             #labels_si_g=labels_perfect)  # demodulator guesses

            ###DEBUG###
            np.save("reward_{}".format(pktidx), reward)
            np.save("std0_{}".format(pktidx), std0)
            np.save("std1_{}".format(pktidx), std1)
            np.save("loss_{}".format(pktidx), loss)
            torch.save(self.model.state_dict(), "model_{}.mdl".format(pktidx))
            ###DEBUG###
            self.version_dirty = False
            if self.train_cnt % self.log_constellation_interval == 0:
                print("Saving mod constellation @{}".format(pktidx))
                np.save("neural_mod_constellation_{:05d}_{}".format(pktidx,
                           time.strftime('%Y%m%d_%H%M%S')),
                           self.get_constellation())
                ber = sum(self.preamble != my_echo) * 1.0 / self.preamble.size
                with open("ber_echo_{}.csv".format(self.uuid_str), "a") as f:
                    f.write("{},{}\n".format(pktidx, ber))
        # Send the echo with a new preamble to continue the training cycle
        classic, neural = self.assemble_packet(new_echo, valid=spy_ber < self.spy_threshold) 
        
        cl_si = bits_to_integers(classic, self.bits_per_symbol)
        cl_symb = self.spy_mod.modulate(cl_si)
       
        nn_si = bits_to_integers(neural, self.bits_per_symbol)
        exploration = 'exploit' if self.run_mode == 'freeze' else 'explore' 
        nn_symb = self.modulate(nn_si, exploration)
        self.past_actions = nn_symb
      
        packet = np.concatenate([cl_symb, nn_symb])
        self.message_port_pub(self.port_id_out,
                              pmt.cons(pmt.PMT_NIL,
                                       pmt.to_pmt(packet.astype(np.complex64))))
        self.version_dirty = True
        # Logging
        t1 = time.time()
        self.logger.debug("neural mod {} updated with {} bits in {} seconds".format(
                          self.uuid_str, vec.size, t1 - t0))
Exemple #16
0
    def handle_mod_feedback_neural(self, vec):
        t0 = time.time()
        self.mod_packet_cnt += 1
        self.mod_update_cnt += 1
        # Split packet into functional parts
        spy, hdr, new_echo, my_echo = self.split_packet_bits(vec)
        spy_ber = sum(spy != self.spy_master) * 1.0 / self.spy_length
        if hdr is not None:
            idxpre = hdr[0]
            idxecho = hdr[1]
            valid = hdr[2]
        else:
            valid = False
            idxpre = (1 << 16) - 1
            idxecho = (1 << 16) - 1
        # Do not perform updates while frozen
        if self.run_mode == "freeze" and spy_ber < self.spy_threshold and valid:
            try:
                # Save every round trip BER while frozen to measure average performance
                preamble = self.get_preamble_hist(idxecho, pop=True)
                print("{} neural offset {}".format(
                    self.uuid_str, self.mod_update_cnt - idxecho))
                ber = sum(
                    np.abs(preamble - my_echo)) * 1.0 / self.preamble.size
                with open("ber_echo_{}.csv".format(self.uuid_str), "a") as f:
                    f.write("{},{}\n".format(self.mod_update_cnt, ber))
            except KeyError as e:
                self.logger.info(
                    "Unable to calculate BER with stored index {}".format(
                        idxecho))
        elif spy_ber < self.spy_threshold and valid:  # run_mode == "train"
            try:
                labels_si_g = util_data.bits_to_integers(
                    my_echo, self.bits_per_symbol)
                labels_all = util_data.bits_to_integers(
                    np.concatenate([new_echo, my_echo]), self.bits_per_symbol)
                #p_actions = self.past_actions[0:self.preamble_si.shape[0]]  # actions
                ###DEBUG###
                p_actions = self.get_actions_hist(
                    idxecho, pop=True)[:self.preamble_si.size]
                print("{} neural offset {}".format(
                    self.uuid_str, self.mod_update_cnt - idxecho))
                preamble = self.get_preamble_hist(idxecho, pop=True)
                ###DEBUG###

                # Only use preamble from feedback, at least for now
                preamble_si = util_data.bits_to_integers(
                    preamble, self.bits_per_symbol)
                reward, _, _, loss = self.mod.update(
                    preamble_si=preamble_si,  # labels
                    actions=p_actions,
                    labels_si_g=labels_si_g)  # demodulator guesses
                self.version_dirty = False
                if self.mod_update_cnt % self.log_interval == 0:
                    print("Saving mod constellation @{}".format(
                        self.mod_update_cnt))
                    np.save(
                        "neural_mod_constellation_{:05d}_{}".format(
                            self.mod_update_cnt,
                            time.strftime('%Y%m%d_%H%M%S')),
                        self.mod.get_constellation())
                    ber = sum(preamble != my_echo) * 1.0 / preamble.size
                    with open("ber_echo_{}.csv".format(self.uuid_str),
                              "a") as f:
                        f.write("{},{}\n".format(self.mod_update_cnt, ber))
            except KeyError as e:
                self.logger.info(
                    "Unable to train modulator with stored index {}".format(
                        idxecho))
        # Send the echo with a new preamble to continue the training cycle
        if not self.use_shared:
            self.gen_preamble()
        classic, neural = self.assemble_packet_neural(
            new_echo, idxpre, valid=spy_ber < self.spy_threshold)

        cl_si = util_data.bits_to_integers(classic, self.bits_per_symbol)
        cl_symb = self.spy_mod.modulate(cl_si)

        nn_si = util_data.bits_to_integers(neural, self.bits_per_symbol)
        exploration = 'exploit' if self.run_mode == 'freeze' else 'explore'
        nn_symb = self.mod.modulate(nn_si, exploration)
        self.past_actions = nn_symb
        ###DEBUG###
        self.actions_hist[self.mod_update_cnt] = nn_symb
        ###DEBUG###

        packet = np.concatenate([cl_symb, nn_symb])
        if self.mod_update_cnt - idxecho == 1 or idxecho == 65535:
            self.message_port_pub(
                self.port_id_mod_out,
                pmt.cons(pmt.PMT_NIL, pmt.to_pmt(packet.astype(np.complex64))))
        else:
            print("Not sending new pkt because {} or {}".format(
                self.mod_update_cnt - idxecho, idxecho != 65535))
        # Logging
        t1 = time.time()
        self.logger.debug(
            "neural mod {} updated with {} bits in {} seconds".format(
                self.uuid_str, vec.size, t1 - t0))
    def __init__(self, seed=189, hidden_layers=(64,), bits_per_symbol=2,
                 preamble=None, log_constellation_interval=10,
                 init_weights="",
                 lambda_p=0.0,
                 max_std=1e1,
                 min_std=1e-2,
                 initial_std=4e-2,
                 restrict_energy=3,
                 activation_fn_hidden='tanh',
                 # kernel_initializer_hidden={'normc_std': 1.0},
                 # bias_initializer_hidden=None,
                 activation_fn_output=None,
                 # kernel_initializer_output=normc_initializer(1.0),
                 # bias_initializer_output=tf.glorot_uniform_initializer(),
                 optimizer='adam',
                 lambda_prob=1e-9,
                 stepsize_mu=1e-2,
                 stepsize_sigma=5e-4
                 ):
        gr.basic_block.__init__(self,
                                name="modulator_neural",
                                in_sig=None,
                                out_sig=None)
        ModulatorNeural.__init__(self,
                                 seed=seed, hidden_layers=hidden_layers,
                                 bits_per_symbol=bits_per_symbol,
                                 lambda_p=lambda_p,
                                 max_std=max_std,
                                 min_std=min_std,
                                 initial_std=initial_std,
                                 restrict_energy=restrict_energy,
                                 activation_fn_hidden=activation_fn_hidden,
                                 # kernel_initializer_hidden=kernel_initializer_hidden,
                                 # bias_initializer_hidden=bias_initializer_hidden,
                                 activation_fn_output=activation_fn_output,
                                 # kernel_initializer_output=kernel_initializer_output,
                                 # bias_initializer_output=bias_initializer_output,
                                 optimizer=optimizer,
                                 lambda_prob=lambda_prob,
                                 stepsize_mu=stepsize_mu,
                                 stepsize_sigma=stepsize_sigma)
        if preamble is None:
            raise Exception("Preamble must be provided")
        if preamble is not numpy.ndarray:
            preamble = numpy.array(preamble)
        assert len(preamble.shape) == 1, "Preamble must be a vector, not a matrix with a dimension of size 1"
        self.preamble = preamble
        self.preamble_si = bits_to_integers(self.preamble, self.bits_per_symbol)
        self.preamble_len = len(self.preamble)

        self.run_mode = "train"
        self.init_weights = init_weights
        if len(init_weights) > 0:
            self.model.load_state_dict(torch.load(init_weights))
            self.run_mode = "freeze"
        # Message ports
        self.port_id_in = pmt.intern("bits")
        self.port_id_out = pmt.intern("symbols")
        self.port_id_feedback = pmt.intern("feedback")
        self.port_id_ctrl = pmt.intern("control")
        self.message_port_register_in(self.port_id_in)
        self.message_port_register_in(self.port_id_feedback)
        self.message_port_register_in(self.port_id_ctrl)
        self.message_port_register_out(self.port_id_out)
        self.set_msg_handler(self.port_id_in, self.handle_packet)
        self.set_msg_handler(self.port_id_feedback, self.handle_feedback)
        self.set_msg_handler(self.port_id_ctrl, self.handle_control)
        # Meta variables
        self.packet_cnt = 0
        self.train_cnt = 0
        self.version = 0
        self.version_dirty = False
        self.past_actions = None

        self.log_constellation_interval = log_constellation_interval
        self.uuid = uuid.uuid4()
        self.uuid_str = str(self.uuid)[-6:]
        self.logger = gr.logger("log_debug")
        self.logger.set_level("DEBUG")
        self.logger.info("neural mod {}: {} bits per symbol".format(self.uuid_str, self.bits_per_symbol))
        if len(init_weights) > 0:
            self.logger.info("neural mod {}: initialized weights from {}".format(self.uuid_str, init_weights))
        with open("ber_echo_{}.csv".format(self.uuid_str), "w") as f:
            f.write("train_iter,BER\n")
Exemple #18
0
    def __init__(self,
                 npreamble,
                 shared_preamble,
                 bits_per_symb,
                 modtype,
                 demodtype,
                 mod_seed=128,
                 demod_seed=256,
                 mod_hidden_layers=[64],
                 demod_hidden_layers=[64],
                 mod_init_weights="",
                 demod_init_weights="",
                 log_interval=20,
                 spy_length=64,
                 spy_threshold=0.1,
                 max_amplitude=0.,
                 lambda_center=0.1,
                 _alias=""):
        assert modtype in echo_mod_demod.MODTYPES, "modtype must be one of {}".format(
            echo_mod_demod.MODTYPES)
        assert demodtype in echo_mod_demod.MODTYPES, "demodtype must be one of {}".format(
            echo_mod_demod.MODTYPES)
        gr.basic_block.__init__(self,
                                name="echo-{}-mod-{}-demod".format(
                                    modtype, demodtype),
                                in_sig=None,
                                out_sig=None)
        # Debug variables
        self.alias = _alias

        # Echo protocol variables
        self.bits_per_symbol = bits_per_symb
        self.npreamble = npreamble
        # None or empty array ==> private rolling preambles
        self.use_shared = (shared_preamble is not None
                           and len(shared_preamble) > 0)
        if self.use_shared and not isinstance(shared_preamble, np.ndarray):
            self.preamble = np.load(shared_preamble)[:npreamble]
        elif self.use_shared:
            self.preamble = shared_preamble
        else:
            self.preamble = np.random.randint(0, 2, self.npreamble)
        self.preamble_si = util_data.bits_to_integers(self.preamble,
                                                      bits_per_symb)
        np.save("preamble_si_0", self.preamble_si)
        ###DEBUG###
        self.preamble_hist = {0: self.preamble}
        self.actions_hist = {}
        ###DEBUG###

        self.modtype = modtype
        self.demodtype = demodtype
        self.run_mode = "train"
        if modtype == 'classic':
            self.mod = ModulatorClassic(self.bits_per_symbol,
                                        max_amplitude=max_amplitude)
        else:
            self.mod = ModulatorNeural(seed=mod_seed,
                                       hidden_layers=list(mod_hidden_layers),
                                       bits_per_symbol=bits_per_symb,
                                       lampda_p=0.0,
                                       max_std=100,
                                       min_std=1e-1,
                                       initial_std=2.0e-1,
                                       restrict_energy=1,
                                       activation_fn_hidden='tanh',
                                       activation_fn_output=None,
                                       optimizer='adam',
                                       lambda_prob=1e-10,
                                       stepsize_mu=1e-3,
                                       stepsize_sigma=1e-4,
                                       max_amplitude=max_amplitude,
                                       lambda_center=lambda_center)
            # manage model weights
            if len(mod_init_weights) > 0:
                self.mod.model.load_state_dict(torch.load(mod_init_weights))
                torch.save(self.mod.model.state_dict(),
                           "./mod-init-weights.mdl")
                self.run_mode = "freeze"
            # create spy modulator
            self.spy_mod = ModulatorClassic(self.bits_per_symbol,
                                            max_amplitude=max_amplitude)
            self.past_actions = None
        if demodtype == 'classic':
            self.demod = DemodulatorClassic(self.bits_per_symbol,
                                            max_amplitude=max_amplitude)
        else:
            self.demod = DemodulatorNeural(
                seed=demod_seed,
                hidden_layers=list(demod_hidden_layers),
                bits_per_symbol=bits_per_symb,
                activation_fn_hidden='tanh',
                activation_fn_output=None,
                optimizer='adam',
                loss_type='l2',
                stepsize_cross_entropy=1e-2,
                cross_entropy_weight=1.0)
            if len(demod_init_weights) > 0:
                self.demod.model.load_state_dict(
                    torch.load(demod_init_weights))
                torch.save(self.demod.model.state_dict(),
                           "./demod-init-weights.mdl")
                self.run_mode = "freeze"
            self.spy_demod = DemodulatorClassic(self.bits_per_symbol,
                                                max_amplitude=max_amplitude)

        # Message port setup and variables
        self.port_id_mod_in = pmt.intern("mod_in")
        self.port_id_mod_out = pmt.intern("mod_out")
        self.port_id_demod_in = pmt.intern("demod_in")
        self.port_id_demod_out = pmt.intern("demod_out")
        self.port_id_ctrl = pmt.intern("control")
        self.message_port_register_in(self.port_id_mod_in)
        self.message_port_register_in(self.port_id_demod_in)
        self.message_port_register_in(self.port_id_ctrl)
        self.message_port_register_out(self.port_id_mod_out)
        self.message_port_register_out(self.port_id_demod_out)
        self.set_msg_handler(self.port_id_mod_in, self.handle_mod)
        self.set_msg_handler(self.port_id_demod_in, self.handle_demod)
        self.set_msg_handler(self.port_id_ctrl, self.handle_control)
        self.mod_packet_cnt = 0
        self.demod_packet_cnt = 0
        self.mod_update_cnt = 0
        self.demod_update_cnt = 0

        # Packet header and spy variables
        self.reedsolomon = reedsolo.RSCodec(6)
        self.rs_length = 6 * 2 * 8  # 6 bytes data, 6 bytes parity, 8 bits per byte
        self.spy_length = spy_length
        assert self.spy_length % self.bits_per_symbol == 0
        self.spy_threshold = spy_threshold
        if self.use_shared:
            self.spy_master = self.preamble[:spy_length]
            assert self.spy_master.size >= spy_length, "shared preamble did not contain sufficient data to fill the spy field"
        else:
            try:
                self.spy_master = np.load("spy_master.npy")[:self.spy_length]
            except IOError as e:
                print(
                    "If using private preambles, you must provide a spy_master.npy file containing the spy field bits"
                )
                raise e
            assert self.spy_master.size >= spy_length, "spy_master.npy did not contain sufficient data to fill the spy field"

        # Logging stuff
        self.log_interval = log_interval
        self.uuid = uuid.uuid4()
        self.uuid_str = str(self.uuid)[-6:]
        self.logger = gr.logger("log_debug")
        self.logger.set_level("DEBUG")
        self.logger.info("mod-demod {}: {} bits per symbol".format(
            self.uuid_str, self.bits_per_symbol))
        with open("ber_{}.csv".format(self.uuid_str), "w") as f:
            f.write("train_iter,BER\n")
        with open("ber_echo_{}.csv".format(self.uuid_str), "w") as f:
            f.write("train_iter,BER\n")
        if len(mod_init_weights) > 0:
            self.logger.info(
                "neural mod {}: initialized weights from {}".format(
                    self.uuid_str, mod_init_weights))
        if len(demod_init_weights) > 0:
            self.logger.info(
                "neural mod {}: initialized weights from {}".format(
                    self.uuid_str, demod_init_weights))