コード例 #1
0
    def test_big_endian_two_tags_not_aligned(self):
        # data
        src_data = (0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1)
        offset = 2
        src_tag1 = gr.tag_utils.python_to_tag([
            offset,
            pmt.intern("BURST"),
            pmt.from_uint64(0),
            pmt.intern("test_simple_source")
        ])
        offset = 10
        src_tag2 = gr.tag_utils.python_to_tag([
            offset,
            pmt.intern("BURST"),
            pmt.from_uint64(0),
            pmt.intern("test_simple_source")
        ])
        expected_result = (0xab, 0xcd)

        # blocks
        src = blocks.vector_source_b(src_data, False, 1, [src_tag1, src_tag2])
        tbb = sandia_utils.tagged_bits_to_bytes("BURST", True, 0, 1)
        dst = blocks.vector_sink_b()
        self.tb.connect(src, tbb)
        self.tb.connect(tbb, dst)

        # execute
        self.tb.run()
        result_data = dst.data()

        # assert
        #print("test big endian two tags not aligned got {}, expected {}".format(result_data, expected_result))
        self.assertEqual(expected_result, result_data)
コード例 #2
0
    def button_clicked(self):
        # Add metadata elements to PDU
        meta = pmt.make_dict()
        try:
            v_txt = str(self.textinputs[1].text().toUtf8())
            if len(v_txt) > 0:
                meta_dict = ast.literal_eval(v_txt)
                for key, val in meta_dict.items():
                    if type(val) == list or type(val) == tuple:
                        t = pmt.make_vector(len(val), pmt.from_uint64(0))
                        meta = pmt.dict_add(meta, pmt.intern(str(key)),
                                            pmt.to_tuple(t))
                    else:  # Store as uint64
                        if isinstance(val, float):
                            if val.is_integer():
                                meta = pmt.dict_add(meta, pmt.intern(str(key)),
                                                    pmt.from_uint64(int(val)))
                            else:
                                meta = pmt.dict_add(meta, pmt.intern(str(key)),
                                                    pmt.from_double(val))
                        else:
                            meta = pmt.dict_add(meta, pmt.intern(str(key)),
                                                pmt.from_uint64(int(val)))
        except ValueError as err:
            print('PDU Parser Error: ', err)
            pass

        # Generate payload data
        v_txt = str(self.textinputs[0].text())
        vec = self.data_types[str(self.input_types.currentText())](v_txt)

        # Publish message
        self.message_port_pub(
            pmt.intern("pdu_out"),
            pmt.cons(meta, pmt.to_pmt(numpy.array(vec, dtype=numpy.uint8))))
コード例 #3
0
    def test_001_all_header_fields(self):
        with open('/tmp/file.csv', 'w') as f:
            # write header
            f.write('field0(string), , field1(bool), field2(float),' +
                    'field3(long), field4(uint64), field5(double),' +
                    'field6(complex),field7,field8(time),field9(time_tuple)\n')

            # add some data
            f.write(
                'field0, empty, True, 1.0,1234567890,987654321, 2.5,1+2j,string,1.0,1.0,1,2,3,4,5\n'
            )

        # start reader/
        reader = csv_reader(fname='/tmp/file.csv',
                            has_header=True,
                            period=10,
                            start_delay=0,
                            repeat=False)

        # expected pdu
        metadata = pmt.dict_add(pmt.make_dict(), pmt.intern('field0'),
                                pmt.intern('field0'))
        metadata = pmt.dict_add(metadata, pmt.intern('field1'),
                                pmt.from_bool(True))
        metadata = pmt.dict_add(metadata, pmt.intern('field2'),
                                pmt.from_float(1.0))
        metadata = pmt.dict_add(metadata, pmt.intern('field3'),
                                pmt.from_long(1234567890))
        metadata = pmt.dict_add(metadata, pmt.intern('field4'),
                                pmt.from_uint64(987654321))
        metadata = pmt.dict_add(metadata, pmt.intern('field5'),
                                pmt.from_double(2.5))
        metadata = pmt.dict_add(metadata, pmt.intern('field6'),
                                pmt.from_complex(1.0 + 2j))
        metadata = pmt.dict_add(metadata, pmt.intern('field7'),
                                pmt.intern('string'))
        metadata = pmt.dict_add(
            metadata, pmt.intern('field8'),
            pmt.cons(pmt.from_uint64(1), pmt.from_double(0)))
        metadata = pmt.dict_add(
            metadata, pmt.intern('field9'),
            pmt.make_tuple(pmt.from_uint64(1), pmt.from_double(0)))

        data = pmt.init_u8vector(5, [1, 2, 3, 4, 5])
        expected = pmt.cons(metadata, data)

        # run
        self.tb.msg_connect((reader, 'out'), (self.debug, 'store'))
        self.tb.start()
        time.sleep(.5)
        self.tb.stop()
        self.tb.wait()

        got = self.debug.get_message(0)

        self.assertTrue(pmt.equal(expected, got))
コード例 #4
0
    def test_003_double_eob_rej_tt_update(self):
        self.tb = gr.top_block()
        start_time = 0.0
        sob_tag = gr.tag_utils.python_to_tag(
            (51, pmt.intern("SOB"), pmt.PMT_T, pmt.intern("src")))
        eob_tag = gr.tag_utils.python_to_tag(
            (51 + (8 * 11), pmt.intern("EOB"), pmt.PMT_T, pmt.intern("src")))
        time_tuple = pmt.make_tuple(pmt.from_uint64(4), pmt.from_double(0.125),
                                    pmt.from_uint64(10000000),
                                    pmt.from_double(4000000.0))
        time_tag = gr.tag_utils.python_to_tag(
            (360, pmt.intern("rx_time"), time_tuple, pmt.intern("src")))
        sob_tag2 = gr.tag_utils.python_to_tag(
            (400, pmt.intern("SOB"), pmt.PMT_T, pmt.intern("src")))
        eob_tag2e = gr.tag_utils.python_to_tag(
            (409, pmt.intern("EOB"), pmt.PMT_T, pmt.intern("src")))
        eob_tag2 = gr.tag_utils.python_to_tag(
            (416, pmt.intern("EOB"), pmt.PMT_T, pmt.intern("src")))
        vs = blocks.vector_source_s(
            range(500), False, 1,
            [sob_tag, eob_tag, time_tag, sob_tag2, eob_tag2e, eob_tag2])
        t2p = pdu_utils.tags_to_pdu_s(pmt.intern('SOB'), pmt.intern('EOB'),
                                      1024, 1000000, ([]), False, 0,
                                      start_time)
        t2p.set_eob_parameters(8, 0)
        dbg = blocks.message_debug()
        self.tb.connect(vs, t2p)
        self.tb.msg_connect((t2p, 'pdu_out'), (dbg, 'store'))
        expected_vec1 = pmt.init_s16vector((8 * 11), range(51, 51 + (8 * 11)))
        expected_vec2 = pmt.init_s16vector(16, list(range(400, 409)) + [0] * 7)
        expected_time1 = start_time + (51 / 1000000.0)
        expected_time2 = 4.125 + ((400 - 360) / 1000000.0)

        self.tb.run()

        self.assertEqual(dbg.num_messages(), 2)
        self.assertTrue(pmt.equal(pmt.cdr(dbg.get_message(0)), expected_vec1))
        self.assertTrue(pmt.equal(pmt.cdr(dbg.get_message(1)), expected_vec2))
        time_tuple1 = pmt.dict_ref(pmt.car(dbg.get_message(0)),
                                   pmt.intern("burst_time"), pmt.PMT_NIL)
        time_tuple2 = pmt.dict_ref(pmt.car(dbg.get_message(1)),
                                   pmt.intern("burst_time"), pmt.PMT_NIL)
        self.assertAlmostEqual(
            pmt.to_uint64(pmt.tuple_ref(time_tuple1, 0)) +
            pmt.to_double(pmt.tuple_ref(time_tuple1, 1)), expected_time1)
        self.assertAlmostEqual(
            pmt.to_uint64(pmt.tuple_ref(time_tuple2, 0)) +
            pmt.to_double(pmt.tuple_ref(time_tuple2, 1)), expected_time2)

        self.tb = None
コード例 #5
0
 def timemsg(self, abstime, fmt):
     if fmt == "sample":
         return pmt.from_uint64(int(abstime * self.rate))
     elif fmt in ["pair", "tuple"]:
         t_int = int(abstime)
         t_frac = abstime - t_int
         if t_frac > 1:
             t_int += 1
             t_frac -= 1.0
         if fmt == "pair":
             return pmt.cons(pmt.from_uint64(t_int),
                             pmt.from_double(t_frac))
         else:
             return pmt.make_tuple(pmt.from_uint64(t_int),
                                   pmt.from_double(t_frac))
コード例 #6
0
 def test_001_t (self):
     # set up fg
     self.tb.start()
     # Create N messages with an id field
     p = pmt.make_dict()
     num_msgs = 20
     for i in range(num_msgs):
       pc = pmt.dict_add(p,pmt.intern("id"), pmt.from_uint64(i))
       self.emitter.emit(pc)
     
     # Sleep for a little bit to let the messages finish propagating
     time.sleep(.05)
     self.tb.stop()
     self.tb.wait()
     msg_count = 0
     for i in range(self.num_paths):
       target = i
       msg_count += self.debug[i].num_messages()
       for m in range(self.debug[i].num_messages()):
         msg = self.debug[i].get_message(m)
         msg_id = pmt.to_uint64(pmt.dict_ref(msg, pmt.intern("id"), pmt.PMT_NIL))
         assert(msg_id == target and msg_id < num_msgs)
         target += self.num_paths
     
     assert(msg_count == num_msgs)
コード例 #7
0
    def _queue_tags(self, sample, tags):
        """Queue stream tags to be attached to data in the work function.

        In addition to the tags specified in the `tags` dictionary, this will
        add `rx_time` and `rx_rate` tags giving the sample time and rate.


        Parameters
        ----------

        sample : int
            Sample index for the sample to tag, given in the number of samples
            since the epoch (time_since_epoch*sample_rate).

        tags : dict
            Dictionary containing the tags to add with keys specifying the tag
            name. The value is cast as an appropriate pmt type, while the name
            will be turned into a pmt string in the work function.

        """
        # add to current queued tags for sample if applicable
        tag_dict = self._tag_queue.get(sample, {})
        if not tag_dict:
            # add time and rate tags
            time = sample/self._sample_rate
            tag_dict['rx_time'] = pmt.make_tuple(
                pmt.from_uint64(int(np.uint64(time))),
                pmt.from_double(float(time % 1)),
            )
            tag_dict['rx_rate'] = self._sample_rate_pmt
        for k, v in tags.items():
            tag_dict[k] = pmt.to_pmt(v)
        self._tag_queue[sample] = tag_dict
コード例 #8
0
    def __init__(self,
                 satellite_id,
                 stream_id,
                 key_path,
                 root_cert_path="",
                 api_url="api.stellarstation.com"):
        """
        :param satellite_id: Satellite ID to connect to
        :param stream_id: Stream ID to connect to. Can be an empty string.
        :param key_path: Path to JSON API Key
        :param root_cert_path: Path to root certificate for development server.
        Leave blank when connecting to the real API
        :param api_url: API URL to connect to.
        """
        gr.hier_block2.__init__(
            self,
            "iq_source",
            gr.io_signature(0, 0, 0),  # Input signature
            gr.io_signature(1, 1, gr.sizeof_gr_complex))  # Output signature

        # Define blocks and connect them
        api_source = stellarstation_swig.api_source(satellite_id, stream_id,
                                                    key_path, root_cert_path,
                                                    api_url)
        pdu_filter = blocks.pdu_filter(
            pmt.intern("FRAMING"),
            pmt.from_uint64(2))  # Parse only packets with IQ Framing
        pdu_to_stream = stellarstation_swig.pdu_to_stream(gr.sizeof_gr_complex)

        self.msg_connect((api_source, 'out'), (pdu_filter, 'pdus'))
        self.msg_connect((pdu_filter, 'pdus'), (pdu_to_stream, 'pdu'))
        self.connect((pdu_to_stream, 0), (self, 0))
コード例 #9
0
    def test_008_time_tuple(self):
        emitter = pdu_utils.message_emitter()
        writer = csv_writer('/tmp/file.csv',
                            True,
                            'time(time_tuple)',
                            'uint8',
                            precision=4)

        # generate time pair pdu
        time_tuple = pmt.make_tuple(pmt.from_uint64(1), pmt.from_double(0.0))
        metadata = pmt.dict_add(pmt.make_dict(), pmt.intern('time'),
                                time_tuple)
        expected = pmt.cons(metadata, pmt.PMT_NIL)

        # run
        tb = gr.top_block()
        tb.msg_connect((emitter, 'msg'), (writer, 'in'))
        tb.start()
        emitter.emit(expected)
        time.sleep(.5)
        tb.stop()
        tb.wait()

        # read in csv
        self.assertTrue(
            self.check_file('/tmp/file.csv',
                            expected,
                            data_type='uint8',
                            has_header=True))
コード例 #10
0
    def test_pad_right(self):
        # data
        src_data = (0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1)
        offset = 2
        src_tag = gr.tag_utils.python_to_tag([
            offset,
            pmt.intern("BURST"),
            pmt.from_uint64(0),
            pmt.intern("test_simple_source")
        ])
        expected_result = (0x40, 0xab, 0xcd)

        # blocks
        src = blocks.vector_source_b(src_data, False, 1, [src_tag])
        tbb = sandia_utils.tagged_bits_to_bytes("BURST", False,
                                                sandia_utils.PAD_RIGHT, 1)
        dst = blocks.vector_sink_b()
        self.tb.connect(src, tbb)
        self.tb.connect(tbb, dst)

        # execute
        self.tb.run()
        result_data = dst.data()

        # assert
        #print("test pad right got {}, expected {}".format(result_data, expected_result))
        self.assertEqual(expected_result, result_data)
コード例 #11
0
    def test_one_tag_not_bye_aligned(self):
        # data
        src_data = (0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1)
        offset = 2
        src_tag = gr.tag_utils.python_to_tag([
            offset,
            pmt.intern("BURST"),
            pmt.from_uint64(0),
            pmt.intern("test_simple_source")
        ])
        expected_result = (0xab, 0xcd)

        # blocks
        src = blocks.vector_source_b(src_data, False, 1, [src_tag])
        tbb = sandia_utils.tagged_bits_to_bytes("BURST", False, 0, 1)
        dst = blocks.vector_sink_b()
        self.tb.connect(src, tbb)
        self.tb.connect(tbb, dst)

        # execute
        # This test will run forever, so we need to stop it manually
        self.tb.start()
        time.sleep(.005)
        self.tb.stop()
        result_data = dst.data()

        # assert
        #print("test one tag not byte aligned got {}, expected {}".format(result_data, expected_result))
        self.assertEqual(expected_result, result_data)
コード例 #12
0
    def test_big_vector_one_tag_not_aligned(self):
        # data
        src_data = (0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0,
                    1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1)
        offset = 2
        src_tag = gr.tag_utils.python_to_tag([
            offset,
            pmt.intern("BURST"),
            pmt.from_uint64(0),
            pmt.intern("test_simple_source")
        ])
        expected_result = ()  # vector is too big, should never get an output

        # blocks
        src = blocks.vector_source_b(src_data, False, 1, [src_tag])
        v_len = 8
        tbb = sandia_utils.tagged_bits_to_bytes("BURST", False, 0, v_len)
        dst = blocks.vector_sink_b(v_len)
        self.tb.connect(src, tbb)
        self.tb.connect(tbb, dst)

        # execute
        self.tb.start()
        time.sleep(.005)
        self.tb.stop()
        result_data = dst.data()

        # assert
        #print("test big vector output two tags not aligned got {}, expected {}".format(result_data, expected_result))
        self.assertEqual(expected_result, result_data)
コード例 #13
0
    def test1_cleanup(self):
      '''
      All files should be deleted by the monitor when complete
      '''
      # open a dummy file
      fname = '/tmp/foo.txt'
      if os.path.exists(fname):
        os.remove(fname)
      Path(fname).touch()

      # PMT
      p = pmt.dict_add(pmt.make_dict(),pmt.intern('rx_freq'), pmt.from_double(915e6))
      p = pmt.dict_add(p,pmt.intern('rx_rate'),pmt.from_double(30.72e6))
      p = pmt.dict_add(p,pmt.intern('rx_time'),pmt.make_tuple(
        pmt.from_uint64(0),pmt.from_double(0)))
      p = pmt.dict_add(p,pmt.intern('fname'),pmt.intern(fname))

      # blocks
      emitter = pdu_utils.message_emitter(pmt.PMT_NIL)
      debug = blocks.message_debug()
      monitor = file_monitor(10,'/tmp')

      # connect
      self.tb.msg_connect((emitter,'msg'),(monitor,'pdu'))

      # set up fg - terrible hacky way of doing this until we get
      # pdu utility message emitter working
      self.tb.start()
      emitter.emit(p)
      time.sleep(.05)
      self.tb.stop()
      self.tb.wait()

      # check data
      self.assertTrue(not os.path.exists(fname))
コード例 #14
0
    def test_001_t (self):
        #  set up fg
        fft_len = 256
        cp_len = 32
        samp_rate = 32000
        data = np.random.choice([-1, 1], [100, fft_len])

        timefreq = np.fft.ifft(data, axis=0)

        #add cp
        timefreq = np.hstack((timefreq[:, -cp_len:], timefreq))

        # msg (only 4th and 5th tuples are needed)
        id1 = pmt.make_tuple(pmt.intern("Signal"), pmt.from_uint64(0))
        name = pmt.make_tuple(pmt.intern("OFDM"), pmt.from_float(1.0));
        id2 = pmt.make_tuple(pmt.intern("xxx"), pmt.from_float(0.0))
        id3 = pmt.make_tuple(pmt.intern("xxx"), pmt.from_float(0.0))
        id4 = pmt.make_tuple(pmt.intern("xxx"), pmt.from_float(256))
        id5 = pmt.make_tuple(pmt.intern("xxx"), pmt.from_float(32))
        msg = pmt.make_tuple(id1, name, id2, id3, id4, id5)

        tx = np.reshape(timefreq, (1, -1))

        # GR time!
        src = blocks.vector_source_c(tx[0].tolist(), True, 1, [])
        freq_offset = analog.sig_source_c(1, analog.GR_SIN_WAVE,
                                          50.0/samp_rate, 1.0, 0.0)
        mixer = blocks.multiply_cc()
        sync = inspector.ofdm_synchronizer_cc(4096)
        dst = blocks.vector_sink_c()
        dst2 = blocks.vector_sink_c()
        msg_src = blocks.message_strobe(msg, 0)

        # connect
        self.tb.connect(src, (mixer, 0))
        self.tb.connect(freq_offset, (mixer, 1))
        self.tb.connect(mixer, sync)
        self.tb.msg_connect((msg_src, 'strobe'), (sync, 'ofdm_in'))
        self.tb.connect(sync, dst)
        self.tb.connect(src, dst2)

        self.tb.start()
        time.sleep(0.1)
        self.tb.stop()
        self.tb.wait()

        # check data
        output = dst.data()
        expect = dst2.data()

        # block outputs 0j until it has enough OFDM symbols to perform estimations
        k = (k for k in range(len(output)) if output[k] != 0j).next()

        # use 10,000 samples for comparison since block fails sometimes
        # for one work function
        output = output[k:k+10000]
        expect = expect[k:k+10000]

        self.assertComplexTuplesAlmostEqual2(expect, output, abs_eps = 0.001, rel_eps=10)
コード例 #15
0
 def makeTimeDict(self, timeval):
     pmtDict = pmt.make_dict()
     intt = int(timeval)
     fract = timeval - intt
     pmtDict = pmt.dict_add(
         pmtDict, pmt.intern("rx_time"),
         pmt.make_tuple(pmt.from_uint64(intt), pmt.from_double(fract)))
     return pmtDict
コード例 #16
0
ファイル: burst_scheduler.py プロジェクト: luwangg/gr-bitcoin
 def sched_pdu(self, pdu):
     # always schedule on a multiple of slot_length (because we can?)
     sched_time = int((self.nproduced_val + self.min_gap) /
                      self.slot_length) * self.slot_length
     pdu = pdu_arg_add(pdu, pmt.intern("event_time"),
                       pmt.from_uint64(sched_time))
     self.nproduced_val = self.nproduced_val + len(
         pmt.c32vector_elements(pmt.cdr(pdu)))
     self.message_port_pub(pmt.intern("scheduled_pdu"), pdu)
コード例 #17
0
 def _post_phase_inc_cmd(self, new_phase_inc, offset=None):
     """Post phase increment update command to the rotator block"""
     cmd = pmt.make_dict()
     cmd = pmt.dict_add(cmd, pmt.intern("inc"),
                        pmt.from_double(new_phase_inc))
     if (offset is not None):
         cmd = pmt.dict_add(cmd, pmt.intern("offset"),
                            pmt.from_uint64(offset))
     self.rotator_cc.insert_tail(pmt.to_pmt("cmd"), cmd)
コード例 #18
0
    def test_rx_time_pad_left(self):
        # data
        src_data = (0, ) * 8 * 100
        offsets = [16]
        for step in range(17, 24):
            offsets.append(offsets[-1] + step)
        print("offsets = {}".format(offsets))

        # generate tag list
        rx_time = pmt.make_tuple(pmt.from_uint64(1), pmt.from_double(.234))
        time_tag = gr.tag_utils.python_to_tag(
            [1, pmt.intern("rx_time"), rx_time,
             pmt.intern("time_stamper")])
        tags = [time_tag]
        for offset in offsets:
            tags.append(
                gr.tag_utils.python_to_tag([
                    offset,
                    pmt.intern("BURST"),
                    pmt.from_uint64(0),
                    pmt.intern("test_simple_source")
                ]))

        expected_result = (0x0, ) * 5

        # blocks
        src = blocks.vector_source_b(src_data, False, 1, tags)
        tbb = sandia_utils.tagged_bits_to_bytes("BURST", False, 2, 1)
        dst = blocks.vector_sink_b()
        tag_dbg = blocks.tag_debug(gr.sizeof_char * 1, '', "")
        tag_dbg.set_display(True)

        self.tb.connect(src, tbb)
        self.tb.connect(tbb, tag_dbg)
        self.tb.connect(tbb, dst)

        # execute
        self.tb.run()
        result_data = dst.data()

        # assert - should get both a BURST and burst_time tag at offsets = [2,5,8,11,14,17,20,23]
        print("test rx_time got {}, expected {}".format(
            result_data, expected_result))
コード例 #19
0
 def makeDict(self, **kwargs):
     pmtDict = pmt.make_dict()
     if "freq" in kwargs:
         pmtDict = pmt.dict_add(pmtDict, pmt.intern("rx_freq"), pmt.from_double(kwargs["freq"]))
     if "rate" in kwargs:
         pmtDict = pmt.dict_add(pmtDict, pmt.intern("rx_rate"), pmt.from_double(kwargs["rate"]))
     if "epoch_int" in kwargs and "epoch_frac" in kwargs:
         pmtDict = pmt.dict_add(pmtDict, pmt.intern("rx_time"),
                                pmt.make_tuple(pmt.from_uint64(kwargs["epoch_int"]), pmt.from_double(kwargs["epoch_frac"])))
     return pmtDict
コード例 #20
0
  def publish_result(self):
    meta = pmt.make_dict()
    meta = pmt.dict_add(meta, self.pmt_spectral_image, pmt.intern(f"spectral_image_{self.absolute_image_count}"))
    self.absolute_image_count += 1

    # publish event without the image so we know it's coming
    meta = pmt.dict_add(meta, self.pmt_type, self.pmt_detect)
    self.message_port_pub(self.pmt_pdu_out, pmt.cons(meta, pmt.init_u8vector(0,[])))

    # publish the event with the image
    meta = pmt.dict_add(meta, self.pmt_type, self.pmt_detect_image)
    nrows, ncols, b = self.build_jpeg(self.image_data, self.bursts)
    meta = pmt.dict_add(meta, self.pmt_x_length, pmt.from_uint64(nrows))
    meta = pmt.dict_add(meta, self.pmt_y_length, pmt.from_uint64(ncols))
    print(f"burst_tag_debug going to send a jpeg: #{self.absolute_image_count}")
    self.message_port_pub(self.pmt_pdu_out, pmt.cons(meta, pmt.init_u8vector(len(b),b)))
    
    # reset flags
    self.reset()
コード例 #21
0
ファイル: burst_scheduler2.py プロジェクト: icopavan/gr-burst
    def sched_pdu(self, pdu):
        sched_time = (self.nproduced_val + 10000); # pick a time in the future
        sched_time = sched_time - sched_time%5000; # round to nearest slot
        pdu = pdulib.pdu_arg_add(pdu, pmt.intern("event_time"), pmt.from_uint64(sched_time));
        pdu = pdulib.pdu_arg_add(pdu, pmt.intern("interp"), pmt.from_long(8));

        hop_offset = 10000;
        offset = (random.random()*self.fs-self.fs/2);
        offset = round(offset/hop_offset)*hop_offset; # quantize to nearest 1k offset
        pdu = pdulib.pdu_arg_add(pdu, pmt.intern("freq_offset"), pmt.from_double(offset));
        self.message_port_pub(pmt.intern("sched_pdu"), pdu);
コード例 #22
0
 def send_frame(self, timestamp, center_frequency, confidence):
     msg_meta = pmt.dict_add(pmt.make_dict(), pmt.intern('timestamp'),
                             pmt.from_uint64(timestamp))
     msg_meta = pmt.dict_add(msg_meta, pmt.intern('center_frequency'),
                             pmt.from_float(center_frequency))
     msg_meta = pmt.dict_add(msg_meta, pmt.intern('confidence'),
                             pmt.from_long(confidence))
     msg = pmt.cons(msg_meta, pmt.init_u8vector(2, range(2)))
     if timestamp > self.max_timestamp:
         self.max_timestamp = timestamp
     self.sorter.to_basic_block()._post(pmt.intern("pdus"), msg)
コード例 #23
0
 def generate(self):
     sleep(self.sleep_before)
     for i in xrange(0, 10):
         tx_time = pmt.make_tuple(
             pmt.from_uint64(int(self.base_time + i * self.burst_spacing)),
             pmt.from_double((self.base_time + i * self.burst_spacing) % 1))
         pdu_header = pmt.dict_add(pmt.make_dict(), pmt.intern('tx_time'),
                                   tx_time)
         pdu_header = pmt.PMT_NIL
         burst = pmt.cons(pdu_header, self.samples)
         self.message_port_pub(pmt.intern("bursts"), burst)
         sleep(self.sleep_between)
コード例 #24
0
 def setUp (self):
     self.tb = gr.top_block ()
     self.emitter = pdu_utils.message_emitter()
     self.emd = pdu_utils.extract_metadata(pmt.intern("test_key"), 1, 0)
     self.debug = blocks.message_debug()
     self.tb.msg_connect((self.emitter, 'msg'), (self.emd, 'dict'))
     self.tb.msg_connect((self.emd, 'msg'), (self.debug, 'store'))
     self.base_dict = pmt.make_dict()
     self.base_dict = pmt.dict_add(self.base_dict, pmt.intern("key1"), pmt.intern("value1"))
     self.base_dict = pmt.dict_add(self.base_dict, pmt.intern("key2"), pmt.intern("value2"))
     self.base_dict = pmt.dict_add(self.base_dict, pmt.intern("uint64_key"), pmt.from_uint64(1234567))
     self.base_dict = pmt.dict_add(self.base_dict, pmt.intern("double_key"), pmt.from_double(1.234567))
コード例 #25
0
 def work(self, input_items, output_items):
     number_of_consumed_items = len(output_items[0])
     for index in range(number_of_consumed_items):
         if (self.nitems_written(0) + index) % self._bytes_p_s == 0:
             for key in range(len(self.keys)):
                 self.add_item_tag(0,
                                   self.nitems_written(0) + index,
                                   self.keys[key], self.values[key])
             self.seqNum = (self.seqNum + 1) % BYTE
             self.values[self.snInd] = pmt.from_uint64(self.seqNum)
     output_items[0][:] = input_items[0]
     return number_of_consumed_items
コード例 #26
0
    def test_002_c_80 (self):
        self.source = blocks.vector_source_c(range(0,32*3), False, 1, [])
        self.ts_pdu = pdu_utils.take_skip_to_pdu_c(80, 32)
        self.debug = blocks.message_debug()
        self.tb.connect((self.source, 0), (self.ts_pdu, 0))
        self.tb.msg_connect((self.ts_pdu, 'pdu_out'), (self.debug, 'store'))

        dic = pmt.dict_add(pmt.make_dict(), pmt.intern("pdu_num"), pmt.from_uint64(0))
        vec = pmt.init_c32vector(80, range(0,80))
        expected = pmt.cons(dic,vec)
        self.tb.run ()
        actual = self.debug.get_message(0)
        self.assertEqualPDU(actual, expected)
コード例 #27
0
    def test_003_s_2_11_7 (self):
        self.source = blocks.vector_source_s(range(0,32*3), False, 1, [])
        self.ts_pdu = pdu_utils.take_skip_to_pdu_s(2, 11)
        self.debug = blocks.message_debug()
        self.tb.connect((self.source, 0), (self.ts_pdu, 0))
        self.tb.msg_connect((self.ts_pdu, 'pdu_out'), (self.debug, 'store'))

        dic = pmt.dict_add(pmt.make_dict(), pmt.intern("pdu_num"), pmt.from_uint64(7))
        vec = pmt.init_s16vector(2, list(range(91,93)))
        expected = pmt.cons(dic,vec)
        self.tb.run ()
        actual = self.debug.get_message(7)
        self.assertEqualPDU(actual, expected)
コード例 #28
0
    def test_004_b_512 (self):
        self.source = blocks.vector_source_b(list(range(0,256))*4, False, 1, [])
        self.ts_pdu = pdu_utils.take_skip_to_pdu_b(512,1)
        self.debug = blocks.message_debug()
        self.tb.connect((self.source, 0), (self.ts_pdu, 0))
        self.tb.msg_connect((self.ts_pdu, 'pdu_out'), (self.debug, 'store'))

        dic = pmt.dict_add(pmt.make_dict(), pmt.intern("pdu_num"), pmt.from_uint64(0))
        vec = pmt.init_u8vector(512, list(range(0,256))*2)
        expected = pmt.cons(dic,vec)
        self.tb.run ()
        actual = self.debug.get_message(0)
        self.assertEqualPDU(actual, expected)
コード例 #29
0
    def process_txtime_of_burst(self, msg):
        burst_with_header = pmt.to_python(pmt.cdr(msg))
        fn = burst_with_header[
            11] + burst_with_header[10] * 2**8 + burst_with_header[
                9] * 2**16 + burst_with_header[8] * 2**24
        ts_num = burst_with_header[3]
        if self.fn_ref is not None:
            fn_delta, txtime = fn_time_delta(self.fn_ref, self.time_ref, fn,
                                             self.time_hint, ts_num)
            txtime_corrected = txtime - self.delay_correction
            txtime_final = txtime_corrected - self.timing_advance

            txtime_secs = int(txtime_final)
            txtime_fracs = txtime_final - int(txtime_final)
            #print "txtime_secs",txtime_secs,"txtime_fracs",txtime_fracs
            tags_dict = pmt.dict_add(
                pmt.make_dict(), pmt.intern("tx_time"),
                pmt.make_tuple(pmt.from_uint64(txtime_secs),
                               pmt.from_double(txtime_fracs)))
            tags_dict = pmt.dict_add(tags_dict, pmt.intern("fn"),
                                     pmt.from_uint64(fn))
            new_msg = pmt.cons(tags_dict, pmt.cdr(msg))
            self.message_port_pub(pmt.intern("bursts"), new_msg)
コード例 #30
0
ファイル: qa_receiver_cc.py プロジェクト: jdemel/gr-gfdm
def create_frame(config, tag_key):
    symbols = get_random_qpsk(config.timeslots * config.active_subcarriers)
    d_block = modulate_mapped_gfdm_block(symbols,
                                         config.timeslots,
                                         config.subcarriers,
                                         config.active_subcarriers,
                                         2,
                                         0.2,
                                         dc_free=True)
    preamble = config.full_preambles[0]
    frame = add_cyclic_starfix(d_block, config.cp_len, config.cs_len)
    frame = np.concatenate((preamble, frame))

    tag = gr.tag_t()
    tag.key = pmt.string_to_symbol(tag_key)
    d = pmt.make_dict()
    d = pmt.dict_add(d, pmt.mp("xcorr_idx"), pmt.from_uint64(42))
    d = pmt.dict_add(d, pmt.mp("xcorr_offset"), pmt.from_uint64(4711))
    d = pmt.dict_add(d, pmt.mp("sc_rot"), pmt.from_complex(1.0 + 0.0j))
    # tag.offset = data.size + cp_len
    tag.srcid = pmt.string_to_symbol("qa")
    tag.value = d
    return frame, symbols, tag
コード例 #31
0
    def sched_pdu(self, pdu):
        sched_time = (self.nproduced_val + 10000)
        # pick a time in the future
        sched_time = sched_time - sched_time % 5000
        # round to nearest slot
        pdu = pdulib.pdu_arg_add(pdu, pmt.intern("event_time"),
                                 pmt.from_uint64(sched_time))
        pdu = pdulib.pdu_arg_add(pdu, pmt.intern("interp"), pmt.from_long(8))

        hop_offset = 10000
        offset = (random.random() * self.fs - self.fs / 2)
        offset = round(offset / hop_offset) * hop_offset
        # quantize to nearest 1k offset
        pdu = pdulib.pdu_arg_add(pdu, pmt.intern("freq_offset"),
                                 pmt.from_double(offset))
        self.message_port_pub(pmt.intern("sched_pdu"), pdu)
コード例 #32
0
    def test_004_uint64_scalea (self):
        self.emd.set_key(pmt.intern("uint64_key"))
        self.emd.set_scale(2)
        self.emd.set_offset(0)

        in_msg = self.base_dict
        expected_msg = pmt.cons(pmt.intern("uint64_key"), pmt.from_uint64(2469134))

        self.tb.start()
        time.sleep(.001)
        self.emitter.emit(in_msg)
        time.sleep(.01)
        self.tb.stop()
        self.tb.wait()

        self.assertTrue(pmt.equal(self.debug.get_message(0), expected_msg))
コード例 #33
0
    def sched_pdu(self, pdu):
        sched_time = self.nproduced_val + 10000
        # pick a time in the future
        if sched_time < self.sched_barrier:
            sched_time = self.sched_barrier
            print "delaying packet to sched barrier"

        sched_time = sched_time - sched_time % 1000
        # round to nearest slot
        event_length = pmt.length(pmt.cdr(pdu))
        # event_length = pmt.to_long(pmt.dict_ref(pmt.car(pdu), pmt.intern("event_length"), pmt.PMT_NIL));
        self.sched_barrier = sched_time + event_length + 1000
        print "SCHED_EVENT: time=%d, len=%d  " % (sched_time, event_length)

        pdu = pdulib.pdu_arg_add(pdu, pmt.intern("event_time"), pmt.from_uint64(sched_time))
        self.message_port_pub(pmt.intern("sched_pdu"), pdu)
コード例 #34
0
ファイル: mkheader.py プロジェクト: garverp/recipes
def update_timestamp(hdr,seg_size):
    if pmt.dict_has_key(hdr, pmt.string_to_symbol("rx_time")):
        r = pmt.dict_ref(hdr, pmt.string_to_symbol("rx_time"), pmt.PMT_NIL)
        secs = pmt.tuple_ref(r, 0)
        fracs = pmt.tuple_ref(r, 1)
        secs = float(pmt.to_uint64(secs))
        fracs = pmt.to_double(fracs)
        t = secs + fracs
    else:
        sys.stderr.write("Could not find key 'time': \
                invalid or corrupt data file.\n")
        sys.exit(1)
    new_hdr = pmt.dict_delete(hdr, pmt.intern("rx_time"))
    if pmt.dict_has_key(hdr, pmt.intern("rx_rate")):
        r = pmt.dict_ref(hdr, pmt.intern("rx_rate"), pmt.PMT_NIL)
        rate = pmt.to_double(r)
        new_t = t + float(seg_size)/rate
        new_secs = long(new_t)
        new_fracs = new_t - new_secs
        time_val = pmt.make_tuple(pmt.from_uint64(new_secs),
                             pmt.from_double(new_fracs))
        new_hdr = pmt.dict_add(new_hdr, pmt.intern("rx_time"), time_val)
        return new_hdr
コード例 #35
0
ファイル: burst_scheduler.py プロジェクト: osh/gr-bitcoin
 def sched_pdu(self, pdu):
     # always schedule on a multiple of slot_length (because we can?)
     sched_time = int((self.nproduced_val + self.min_gap)/self.slot_length)*self.slot_length;
     pdu = pdu_arg_add(pdu, pmt.intern("event_time"), pmt.from_uint64(sched_time));
     self.nproduced_val = self.nproduced_val + len(pmt.c32vector_elements(pmt.cdr(pdu)));
     self.message_port_pub(pmt.intern("scheduled_pdu"), pdu);
コード例 #36
0
ファイル: mkheader.py プロジェクト: garverp/recipes
def make_header(options, filename):
    extras_present = False
    if options.freq is not None:
        extras_present = True
    # Open the file and make the header
    hdr_filename = filename + '.hdr'
    hdr_file = open(hdr_filename, 'wb')
    header = pmt.make_dict()
    # Fill in header vals
    # TODO - Read this from blocks.METADATA_VERSION
    ver_val = pmt.from_long(long(0))
    rate_val = pmt.from_double(options.sample_rate)
    time_val = pmt.make_tuple(pmt.from_uint64(options.time_sec),
                             pmt.from_double(options.time_fsec))
    ft_to_sz = parse_file_metadata.ftype_to_size
    # Map shortname to properties
    enum_type = SNAME_TO_ENUM[options.format]
    type_props = SNAME_DEFS[enum_type]
    size_val = pmt.from_long(type_props[0])
    cplx_val = pmt.from_bool(type_props[1])
    type_val = pmt.from_long(type_props[2])
    fmt = type_props[2]
    file_samp_len = long(options.length)
    seg_size = long(options.seg_size)
    bytes_val = pmt.from_uint64(long(seg_size*ft_to_sz[fmt]))
    # Set header vals
    header = pmt.dict_add(header, pmt.intern("version"), ver_val)
    header = pmt.dict_add(header, pmt.intern("size"), size_val)
    header = pmt.dict_add(header, pmt.intern("type"), type_val)
    header = pmt.dict_add(header, pmt.intern("cplx"), cplx_val)
    header = pmt.dict_add(header, pmt.intern("rx_time"), time_val)
    header = pmt.dict_add(header, pmt.intern("rx_rate"), rate_val)
    header = pmt.dict_add(header, pmt.intern("bytes"), bytes_val)

    if extras_present:
        freq_key = pmt.intern("rx_freq")
        freq_val = pmt.from_double(options.freq)
        extras = pmt.make_dict()
        extras = pmt.dict_add(extras, freq_key, freq_val)
        extras_str = pmt.serialize_str(extras)
        start_val = pmt.from_uint64(blocks.METADATA_HEADER_SIZE
                + len(extras_str))
    else:
        start_val = pmt.from_uint64(blocks.METADATA_HEADER_SIZE)
    header = pmt.dict_add(header, pmt.intern("strt"), start_val)
    num_segments = file_samp_len/seg_size
    if options.verbose:
        print "Wrote %d headers to: %s (Version %d)" % (num_segments+1,
                hdr_filename,pmt.to_long(ver_val))
    for x in range(0,num_segments,1):
        # Serialize and write out file
        if extras_present:
            header_str = pmt.serialize_str(header) + extras_str
        else:
            header_str = pmt.serialize_str(header)
        hdr_file.write(header_str)
        # Update header based on sample rate and segment size
        header = update_timestamp(header,seg_size)
    
    # Last header is special b/c file size is probably not mult. of seg_size
    header = pmt.dict_delete(header,pmt.intern("bytes"))
    bytes_remaining = ft_to_sz[fmt]*(file_samp_len - num_segments*long(seg_size))
    bytes_val = pmt.from_uint64(bytes_remaining)
    header = pmt.dict_add(header,pmt.intern("bytes"),bytes_val)
    # Serialize and write out file
    if extras_present:
        header_str = pmt.serialize_str(header) + extras_str
    else:
        header_str = pmt.serialize_str(header)
    hdr_file.write(header_str)
    hdr_file.close()
コード例 #37
0
ファイル: fileman.py プロジェクト: garverp/recipes
def propagate_headers(options, args):
    infile = args[0]
    outfile = args[1]
    infile_hdr = infile + ".hdr"
    outfile_hdr = outfile + ".hdr"
    sample_cnt_end = 0
    sample_offset = long(options.start)
    # Open input header
    try:
        handle_in = open(infile_hdr, "rb")
    except IOError:
        sys.stderr.write("Unable to open input file header\n")
        sys.exit(1)
    # Open output header
    try:
        handle_out = open(outfile_hdr, "wb")
    except IOError:
        sys.stderr.write("Unable to open output file header\n")
        sys.exit(1)

    # Read first header separately to get file type
    hdr_in, hdr_extra_in, handle_in = read_single_header(handle_in)
    info_in = parse_file_metadata.parse_header(hdr_in, False)
    sample_cnt_end += info_in["nitems"]
    # Parse file type - ensure support for it
    shortname_intype = find_shortname(info_in["cplx"], info_in["type"], info_in["size"])
    if shortname_intype == SNAME_TO_ENUM["unknown"]:
        sys.stderr.write("Unsupported data type\n")
        sys.exit(1)
    if options.output_type == "unknown":
        shortname_outtype = shortname_intype
    else:
        shortname_outtype = SNAME_TO_ENUM[options.output_type]

    # Calc sample_len from file size if not specified
    if options.nsamples is not None:
        sample_len = long(options.nsamples)
    else:
        sample_len = os.path.getsize(infile) / SNAME_DEFS[shortname_intype][0]
    final_index = sample_offset + sample_len

    # Search input headers until we find the correct one
    while sample_cnt_end <= sample_offset:
        hdr_in, hdr_extra_in, handle_in = read_single_header(handle_in)
        info_in = parse_file_metadata.parse_header(hdr_in, False)
        sample_cnt_end += info_in["nitems"]
    time_in = info_in["rx_time"]
    # Starting sample of current segment
    sample_cnt_start = sample_cnt_end - info_in["nitems"]
    # Interpolate new timestamp
    delta = sample_offset - sample_cnt_start
    new_ts = time_in + delta / info_in["rx_rate"]
    # Calc new segment size (samples)
    if sample_cnt_end > final_index:
        first_seg_len = final_index - sample_offset
    else:
        first_seg_len = sample_cnt_end - sample_offset

    # Write the first output header
    hdr_out = hdr_in
    new_secs = long(new_ts)
    new_fracs = new_ts - new_secs
    time_val = pmt.make_tuple(pmt.from_uint64(new_secs), pmt.from_double(new_fracs))
    size_val = pmt.from_long(SNAME_DEFS[shortname_outtype][0])
    bytes_val = pmt.from_uint64(first_seg_len * SNAME_DEFS[shortname_outtype][0])
    type_val = pmt.from_long(SNAME_DEFS[shortname_outtype][2])
    hdr_out = pmt.dict_add(hdr_out, pmt.intern("rx_time"), time_val)
    hdr_out = pmt.dict_add(hdr_out, pmt.intern("bytes"), bytes_val)
    hdr_out = pmt.dict_add(hdr_out, pmt.intern("type"), type_val)
    hdr_out = pmt.dict_add(hdr_out, pmt.intern("size"), size_val)
    hdr_out_str = pmt.serialize_str(hdr_out) + pmt.serialize_str(hdr_extra_in)
    handle_out.write(hdr_out_str)

    # Continue reading headers, modifying, and writing
    last_seg_len = info_in["nitems"]
    print "sample_cnt_end=%d,final_index=%d" % (sample_cnt_end, final_index)
    # Iterate through remaining headers
    while sample_cnt_end < final_index:
        hdr_in, hdr_extra_in, handle_in = read_single_header(handle_in)
        info_in = parse_file_metadata.parse_header(hdr_in, False)
        nitems = info_in["nitems"]
        sample_cnt_start = sample_cnt_end
        sample_cnt_end += nitems
        hdr_out = hdr_in
        # For last header, adjust segment length accordingly
        if sample_cnt_end > final_index:
            last_seg_len = final_index - sample_cnt_start
        else:
            last_seg_len = nitems
        size_val = pmt.from_long(SNAME_DEFS[shortname_outtype][0])
        bytes_val = pmt.from_uint64(last_seg_len * SNAME_DEFS[shortname_outtype][0])
        type_val = pmt.from_long(SNAME_DEFS[shortname_outtype][2])
        hdr_out = pmt.dict_add(hdr_out, pmt.intern("bytes"), bytes_val)
        hdr_out = pmt.dict_add(hdr_out, pmt.intern("type"), type_val)
        hdr_out = pmt.dict_add(hdr_out, pmt.intern("size"), size_val)
        hdr_out_str = pmt.serialize_str(hdr_out) + pmt.serialize_str(hdr_extra_in)
        handle_out.write(hdr_out_str)

    if options.verbose:
        print "Input File:" + infile
        print "Input Header:" + infile_hdr
        print "Input Type:" + ENUM_TO_SNAME[shortname_intype]
        print "Output File:" + outfile
        print "Output File Length (Samples):%d" % (final_index - sample_offset)
        print "Output Header:" + outfile_hdr
        print "File subsection: [%d,%d]" % (sample_offset, final_index)
        print "Output Type:" + ENUM_TO_SNAME[shortname_outtype]
        print "First Segment Length: %e samples" % first_seg_len
        print "Last Segment Length: %e samples" % last_seg_len
        print "delta=%f,new ts=%f" % (delta, new_ts)

    # Clean up
    handle_in.close()
    handle_out.close()

    # Return header info
    return {
        "infile": infile,
        "intype": shortname_intype,
        "outfile": outfile,
        "outtype": shortname_outtype,
        "sample_offset": sample_offset,
        "sample_len": sample_len,
    }
コード例 #38
0
ファイル: qpsk_fh.py プロジェクト: CIG-SDR/CIG
 def __init__(
         self,
         n_bursts, n_channels,
         freq_delta, base_freq,
         burst_length, base_time, hop_time,
         post_tuning=True,
         tx_gain=0,
         verbose=False
     ):
     gr.hier_block2.__init__(self,
         "FrequencyHopperSrc",
         gr.io_signature(1, 1, gr.sizeof_gr_complex),
         gr.io_signature(1, 1, gr.sizeof_gr_complex),
     )
     n_samples_total = n_bursts * burst_length
     self.hop_sequence = numpy.arange(base_freq, base_freq + n_channels * freq_delta, freq_delta)
   # self.hop_sequence = 2440000000, 2450000000, 2435000000, 2430000000, 2445000000, 2420000000, 2425000000  #Specify the hopping pattern here, repeat from begining
     numpy.random.shuffle(self.hop_sequence)  #this randomly shuffels frequencies in the specified range
     self.hop_sequence = [self.hop_sequence[x % n_channels] for x in xrange(n_bursts)]
   # self.hop_sequence = [self.hop_sequence[x % 7]for x in xrange(n_bursts)]
     if verbose:
         print "Hop Frequencies  | Hop Pattern"
         print "=================|================================"
         for f in self.hop_sequence:
             print "{:6.3f} MHz      |  ".format(f/1e6),
             if n_channels < 50:
                 print " " * int((f - base_freq) / freq_delta) + "#"
             else:
                 print "\n"
         print "=================|================================"
     # There's no real point in setting the gain via tag for this application,
     # but this is an example to show you how to do it.
     gain_tag = gr.tag_t()
     gain_tag.offset = 0
     gain_tag.key = pmt.string_to_symbol('tx_command')
     gain_tag.value = pmt.cons(
             pmt.intern("gain"),
             # These are both valid:
             #pmt.from_double(tx_gain)
             pmt.cons(pmt.to_pmt(0), pmt.to_pmt(tx_gain))
     )
     tag_list = [gain_tag,]
     for i in xrange(n_bursts):
         tune_tag = gr.tag_t()
         tune_tag.offset = i * burst_length
         if i > 0 and post_tuning:
             tune_tag.offset -= 1 # Move it to last sample of previous burst
         tune_tag.key = pmt.string_to_symbol('tx_freq')
         tune_tag.value = pmt.to_pmt(self.hop_sequence[i])
         tag_list.append(tune_tag)
         length_tag = gr.tag_t()
         length_tag.offset = i * burst_length
         length_tag.key = pmt.string_to_symbol('packet_len')
         length_tag.value = pmt.from_long(burst_length)
         tag_list.append(length_tag)
         time_tag = gr.tag_t()
         time_tag.offset = i * burst_length
         time_tag.key = pmt.string_to_symbol('tx_time')
         time_tag.value = pmt.make_tuple(
                 pmt.from_uint64(int(base_time + i * hop_time)),
                 pmt.from_double((base_time + i * hop_time) % 1),
         )
         tag_list.append(time_tag)
     tag_source = blocks.vector_source_c((1.0,) * n_samples_total, repeat=False, tags=tag_list)
     mult = blocks.multiply_cc()
     self.connect(self, mult, self)
     self.connect(tag_source, (mult, 1))
コード例 #39
0
ファイル: freq_hopping.py プロジェクト: dl1ksv/gnuradio
 def __init__(
         self,
         n_bursts, n_channels,
         freq_delta, base_freq, dsp_tuning,
         burst_length, base_time, hop_time,
         post_tuning=False,
         tx_gain=0,
         verbose=False
     ):
     gr.hier_block2.__init__(
         self, "FrequencyHopperSrc",
         gr.io_signature(1, 1, gr.sizeof_gr_complex),
         gr.io_signature(1, 1, gr.sizeof_gr_complex),
     )
     n_samples_total = n_bursts * burst_length
     lowest_frequency = base_freq - numpy.floor(n_channels/2) * freq_delta
     self.hop_sequence = [lowest_frequency + n * freq_delta for n in range(n_channels)]
     numpy.random.shuffle(self.hop_sequence)
     # Repeat that:
     self.hop_sequence = [self.hop_sequence[x % n_channels] for x in range(n_bursts)]
     if verbose:
         print("Hop Frequencies  | Hop Pattern")
         print("=================|================================")
         for f in self.hop_sequence:
             print("{:6.3f} MHz      |  ".format(f/1e6), end='')
             if n_channels < 50:
                 print(" " * int((f - base_freq) / freq_delta) + "#")
             else:
                 print("\n")
         print("=================|================================")
     # There's no real point in setting the gain via tag for this application,
     # but this is an example to show you how to do it.
     gain_tag = gr.tag_t()
     gain_tag.offset = 0
     gain_tag.key = pmt.string_to_symbol('tx_command')
     gain_tag.value = pmt.to_pmt({'gain': tx_gain})
     tag_list = [gain_tag,]
     for i in range(len(self.hop_sequence)):
         time = pmt.cons(
             pmt.from_uint64(int(base_time + i * hop_time+0.01)),
             pmt.from_double((base_time + i * hop_time+0.01) % 1),
         )
         tune_tag = gr.tag_t()
         tune_tag.offset = i * burst_length
         # TODO dsp_tuning should also be able to do post_tuning
         if i > 0 and post_tuning and not dsp_tuning:
             tune_tag.offset -= 1 # Move it to last sample of previous burst
         if dsp_tuning:
             tune_tag.key = pmt.string_to_symbol('tx_command')
             tune_tag.value = pmt.to_pmt({'lo_freq': base_freq, 'dsp_freq': base_freq - self.hop_sequence[i]})
             tune_tag.value = pmt.dict_add(tune_tag.value, pmt.intern("time"),time)
         else:
             tune_tag.key = pmt.string_to_symbol('tx_command')
             tune_tag.value = pmt.to_pmt({'freq': self.hop_sequence[i]})
             tune_tag.value = pmt.dict_add(tune_tag.value, pmt.intern('time'), time)
         tag_list.append(tune_tag)
         length_tag = gr.tag_t()
         length_tag.offset = i * burst_length
         length_tag.key = pmt.string_to_symbol('packet_len')
         length_tag.value = pmt.from_long(burst_length)
         tag_list.append(length_tag)
         time_tag = gr.tag_t()
         time_tag.offset = i * burst_length
         time_tag.key = pmt.string_to_symbol('tx_time')
         time_tag.value = pmt.make_tuple(
                 pmt.car(time),
                 pmt.cdr(time)
         )
         tag_list.append(time_tag)
     tag_source = blocks.vector_source_c((1.0,) * n_samples_total, repeat=False, tags=tag_list)
     mult = blocks.multiply_cc()
     self.connect(self, mult, self)
     self.connect(tag_source, (mult, 1))
コード例 #40
0
    def __init__(
            self,
            n_bursts, n_channels,
            freq_delta, base_freq, dsp_tuning,
            burst_length, base_time, hop_time,
            seed,rate,
	    post_tuning=False, 
            tx_gain=0,
            verbose=False
        ):
        gr.hier_block2.__init__(self,
            "Hopping",
            gr.io_signature(1, 1, gr.sizeof_gr_complex),
            gr.io_signature(1, 1, gr.sizeof_gr_complex),
        )
        n_samples_total = n_bursts * burst_length
        lowest_frequency = base_freq - numpy.floor(n_channels/2) * freq_delta
        self.hop_sequence = [lowest_frequency + n * freq_delta for n in xrange(n_channels)]
	random.seed(seed)	
	lam = random.random()        
	random.shuffle(self.hop_sequence, lambda: lam)
        # Repeat that:
        self.hop_sequence = [self.hop_sequence[x % n_channels] for x in xrange(n_bursts)]
        if verbose:
            print "Hop Frequencies  | Hop Pattern"
            print "=================|================================"
            for f in self.hop_sequence:
                print "{:6.3f} MHz      |  ".format(f/1e6),
                if n_channels < 50:
                    print " " * int((f - base_freq) / freq_delta) + "#"
                else:
                    print "\n"
            print "=================|================================"
        # There's no real point in setting the gain via tag for this application,
        # but this is an example to show you how to do it.
        gain_tag = gr.tag_t()
        gain_tag.offset = 0
        gain_tag.key = pmt.string_to_symbol('tx_command')
        gain_tag.value = pmt.to_pmt({'gain': tx_gain})
        tag_list = [gain_tag,]
        for i in xrange(len(self.hop_sequence)):
            tune_tag = gr.tag_t()
            tune_tag.offset = i * burst_length
            if i > 0 and post_tuning and not dsp_tuning: # TODO dsp_tuning should also be able to do post_tuning
                tune_tag.offset -= 1 # Move it to last sample of previous burst
            if dsp_tuning:
                tune_tag.key = pmt.string_to_symbol('tx_command')
                tune_tag.value = pmt.to_pmt({'rf_freq_policy': int(ord('N')), 'lo_freq': base_freq, 'dsp_freq_policy': int(ord('M')),'dsp_freq': base_freq - self.hop_sequence[i] })
            else:
                tune_tag.key = pmt.string_to_symbol('tx_freq')
                tune_tag.value = pmt.to_pmt(self.hop_sequence[i])
            tag_list.append(tune_tag)
            length_tag = gr.tag_t()
            length_tag.offset = i * burst_length
            length_tag.key = pmt.string_to_symbol('packet_len')
            length_tag.value = pmt.from_long(burst_length)
            tag_list.append(length_tag)	
            time_tag = gr.tag_t()
            time_tag.offset = i * burst_length
            time_tag.key = pmt.string_to_symbol("tx_time")
            time_tag.value = pmt.make_tuple(
                    pmt.from_uint64(int(base_time + i * hop_time)),
                    pmt.from_double((base_time + i * hop_time) % 1),
            )
            tag_list.append(time_tag)
        #############################################
        # Old Version
        #############################################
        tag_source = blocks.vector_source_c((1.0,) * n_samples_total, repeat= True, tags=tag_list)
        mult = blocks.multiply_cc()
        self.connect(self, mult, self)
        self.connect(tag_source, (mult, 1))