def work(self, input_items, output_items):
        in0 = input_items[0]
        in1 = input_items[1]

        #if we have data to classify:
        if len(in0) >= 1 and len(self.bw_list) > self.sig_no:
            #create the feature vector
            x = [self.bw_list[self.sig_no]]
            x.extend(in0[0])
            x.extend(in1[0])
            x_tmp = []

            x_tmp.append(x)

            #predict!
            a = self.knn.predict(x_tmp)

            #print "Signal #"+ str(self.sig_no)+" :" +a[0]

            #write to the output message port.
            pmt_signal = pmt.to_pmt(("signal", self.sig_no))
            pmt_class = pmt.to_pmt((a[0], 0))
            pmt_tuple = pmt.make_tuple(pmt_signal, pmt_class)

            self.message_port_pub(pmt.intern("classification"), pmt_tuple)

        #consume, finish
        self.consume(0, len(in0))
        self.consume(1, len(in1))
        return len(input_items[0])
예제 #2
0
    def test1_cleanup(self):
      '''
      All files should be deleted by the monitor when complete
      '''
      # open a dummy file
      fname = '/tmp/foo.txt'
      if os.path.exists(fname):
        os.remove(fname)
      Path(fname).touch()

      # PMT
      p = pmt.dict_add(pmt.make_dict(),pmt.intern('rx_freq'), pmt.from_double(915e6))
      p = pmt.dict_add(p,pmt.intern('rx_rate'),pmt.from_double(30.72e6))
      p = pmt.dict_add(p,pmt.intern('rx_time'),pmt.make_tuple(
        pmt.from_uint64(0),pmt.from_double(0)))
      p = pmt.dict_add(p,pmt.intern('fname'),pmt.intern(fname))

      # blocks
      emitter = pdu_utils.message_emitter(pmt.PMT_NIL)
      debug = blocks.message_debug()
      monitor = file_monitor(10,'/tmp')

      # connect
      self.tb.msg_connect((emitter,'msg'),(monitor,'pdu'))

      # set up fg - terrible hacky way of doing this until we get
      # pdu utility message emitter working
      self.tb.start()
      emitter.emit(p)
      time.sleep(.05)
      self.tb.stop()
      self.tb.wait()

      # check data
      self.assertTrue(not os.path.exists(fname))
예제 #3
0
    def test_008_time_tuple(self):
        emitter = pdu_utils.message_emitter()
        writer = csv_writer('/tmp/file.csv',
                            True,
                            'time(time_tuple)',
                            'uint8',
                            precision=4)

        # generate time pair pdu
        time_tuple = pmt.make_tuple(pmt.from_uint64(1), pmt.from_double(0.0))
        metadata = pmt.dict_add(pmt.make_dict(), pmt.intern('time'),
                                time_tuple)
        expected = pmt.cons(metadata, pmt.PMT_NIL)

        # run
        tb = gr.top_block()
        tb.msg_connect((emitter, 'msg'), (writer, 'in'))
        tb.start()
        emitter.emit(expected)
        time.sleep(.5)
        tb.stop()
        tb.wait()

        # read in csv
        self.assertTrue(
            self.check_file('/tmp/file.csv',
                            expected,
                            data_type='uint8',
                            has_header=True))
예제 #4
0
    def test_003_double_eob_rej_tt_update (self):
        self.tb = gr.top_block ()
        start_time = 0.0
        sob_tag = gr.tag_utils.python_to_tag((51, pmt.intern("SOB"), pmt.PMT_T, pmt.intern("src")))
        eob_tag = gr.tag_utils.python_to_tag((51+(8*11), pmt.intern("EOB"), pmt.PMT_T, pmt.intern("src")))
        time_tuple = pmt.make_tuple(pmt.from_uint64(4), pmt.from_double(0.125), pmt.from_uint64(10000000), pmt.from_double(4000000.0))
        time_tag = gr.tag_utils.python_to_tag((360, pmt.intern("rx_time"), time_tuple, pmt.intern("src")))
        sob_tag2 = gr.tag_utils.python_to_tag((400, pmt.intern("SOB"), pmt.PMT_T, pmt.intern("src")))
        eob_tag2e = gr.tag_utils.python_to_tag((409, pmt.intern("EOB"), pmt.PMT_T, pmt.intern("src")))
        eob_tag2 = gr.tag_utils.python_to_tag((416, pmt.intern("EOB"), pmt.PMT_T, pmt.intern("src")))
        vs = blocks.vector_source_s(range(500), False, 1, [sob_tag, eob_tag, time_tag, sob_tag2, eob_tag2e, eob_tag2])
        t2p = pdu_utils.tags_to_pdu_s(pmt.intern('SOB'), pmt.intern('EOB'), 1024, 1000000, ([]), False, 0, start_time)
        t2p.set_eob_parameters(8, 0)
        dbg = blocks.message_debug()
        self.tb.connect(vs, t2p)
        self.tb.msg_connect((t2p, 'pdu_out'), (dbg, 'store'))
        expected_vec1 = pmt.init_s16vector((8*11), range(51,51+(8*11)))
        expected_vec2 = pmt.init_s16vector(16, list(range(400,409)) + [0]*7)
        expected_time1 = start_time + (51 / 1000000.0)
        expected_time2 = 4.125 + ((400-360) / 1000000.0)

        self.tb.run ()

        self.assertEqual(dbg.num_messages(), 2)
        self.assertTrue(pmt.equal(pmt.cdr(dbg.get_message(0)), expected_vec1))
        self.assertTrue(pmt.equal(pmt.cdr(dbg.get_message(1)), expected_vec2))
        time_tuple1 = pmt.dict_ref(pmt.car(dbg.get_message(0)), pmt.intern("burst_time"), pmt.PMT_NIL)
        time_tuple2 = pmt.dict_ref(pmt.car(dbg.get_message(1)), pmt.intern("burst_time"), pmt.PMT_NIL)
        self.assertAlmostEqual(pmt.to_uint64(pmt.tuple_ref(time_tuple1,0)) + pmt.to_double(pmt.tuple_ref(time_tuple1,1)), expected_time1)
        self.assertAlmostEqual(pmt.to_uint64(pmt.tuple_ref(time_tuple2,0)) + pmt.to_double(pmt.tuple_ref(time_tuple2,1)), expected_time2)

        self.tb = None
예제 #5
0
    def _queue_tags(self, sample, tags):
        """Queue stream tags to be attached to data in the work function.

        In addition to the tags specified in the `tags` dictionary, this will
        add `rx_time` and `rx_rate` tags giving the sample time and rate.


        Parameters
        ----------

        sample : int
            Sample index for the sample to tag, given in the number of samples
            since the epoch (time_since_epoch*sample_rate).

        tags : dict
            Dictionary containing the tags to add with keys specifying the tag
            name. The value is cast as an appropriate pmt type, while the name
            will be turned into a pmt string in the work function.

        """
        # add to current queued tags for sample if applicable
        tag_dict = self._tag_queue.get(sample, {})
        if not tag_dict:
            # add time and rate tags
            time = sample/self._sample_rate
            tag_dict['rx_time'] = pmt.make_tuple(
                pmt.from_uint64(int(np.uint64(time))),
                pmt.from_double(float(time % 1)),
            )
            tag_dict['rx_rate'] = self._sample_rate_pmt
        for k, v in tags.items():
            tag_dict[k] = pmt.to_pmt(v)
        self._tag_queue[sample] = tag_dict
예제 #6
0
def phy_tag_create(nitems=0,
                   rate=0,
                   flag=0,
                   rx_time=0,
                   payload_sample_index=0):
    return pmt.make_tuple(pmt.from_long(nitems), pmt.from_long(rate),
                          pmt.from_long(flag), pmt.from_long(rx_time),
                          pmt.from_long(payload_sample_index))
 def makeTimeDict(self, timeval):
     pmtDict = pmt.make_dict()
     intt = int(timeval)
     fract = timeval - intt
     pmtDict = pmt.dict_add(
         pmtDict, pmt.intern("rx_time"),
         pmt.make_tuple(pmt.from_uint64(intt), pmt.from_double(fract)))
     return pmtDict
    def test_001_t (self):
        #  set up fg
        fft_len = 256
        cp_len = 32
        samp_rate = 32000
        data = np.random.choice([-1, 1], [100, fft_len])

        timefreq = np.fft.ifft(data, axis=0)

        #add cp
        timefreq = np.hstack((timefreq[:, -cp_len:], timefreq))

        # msg (only 4th and 5th tuples are needed)
        id1 = pmt.make_tuple(pmt.intern("Signal"), pmt.from_uint64(0))
        name = pmt.make_tuple(pmt.intern("OFDM"), pmt.from_float(1.0));
        id2 = pmt.make_tuple(pmt.intern("xxx"), pmt.from_float(0.0))
        id3 = pmt.make_tuple(pmt.intern("xxx"), pmt.from_float(0.0))
        id4 = pmt.make_tuple(pmt.intern("xxx"), pmt.from_float(256))
        id5 = pmt.make_tuple(pmt.intern("xxx"), pmt.from_float(32))
        msg = pmt.make_tuple(id1, name, id2, id3, id4, id5)

        tx = np.reshape(timefreq, (1, -1))

        # GR time!
        src = blocks.vector_source_c(tx[0].tolist(), True, 1, [])
        freq_offset = analog.sig_source_c(1, analog.GR_SIN_WAVE,
                                          50.0/samp_rate, 1.0, 0.0)
        mixer = blocks.multiply_cc()
        sync = inspector.ofdm_synchronizer_cc(4096)
        dst = blocks.vector_sink_c()
        dst2 = blocks.vector_sink_c()
        msg_src = blocks.message_strobe(msg, 0)

        # connect
        self.tb.connect(src, (mixer, 0))
        self.tb.connect(freq_offset, (mixer, 1))
        self.tb.connect(mixer, sync)
        self.tb.msg_connect((msg_src, 'strobe'), (sync, 'ofdm_in'))
        self.tb.connect(sync, dst)
        self.tb.connect(src, dst2)

        self.tb.start()
        time.sleep(0.1)
        self.tb.stop()
        self.tb.wait()

        # check data
        output = dst.data()
        expect = dst2.data()

        # block outputs 0j until it has enough OFDM symbols to perform estimations
        k = (k for k in range(len(output)) if output[k] != 0j).next()

        # use 10,000 samples for comparison since block fails sometimes
        # for one work function
        output = output[k:k+10000]
        expect = expect[k:k+10000]

        self.assertComplexTuplesAlmostEqual2(expect, output, abs_eps = 0.001, rel_eps=10)
예제 #9
0
    def test_001_all_header_fields(self):
        with open('/tmp/file.csv', 'w') as f:
            # write header
            f.write('field0(string), , field1(bool), field2(float),' +
                    'field3(long), field4(uint64), field5(double),' +
                    'field6(complex),field7,field8(time),field9(time_tuple)\n')

            # add some data
            f.write(
                'field0, empty, True, 1.0,1234567890,987654321, 2.5,1+2j,string,1.0,1.0,1,2,3,4,5\n'
            )

        # start reader/
        reader = csv_reader(fname='/tmp/file.csv',
                            has_header=True,
                            period=10,
                            start_delay=0,
                            repeat=False)

        # expected pdu
        metadata = pmt.dict_add(pmt.make_dict(), pmt.intern('field0'),
                                pmt.intern('field0'))
        metadata = pmt.dict_add(metadata, pmt.intern('field1'),
                                pmt.from_bool(True))
        metadata = pmt.dict_add(metadata, pmt.intern('field2'),
                                pmt.from_float(1.0))
        metadata = pmt.dict_add(metadata, pmt.intern('field3'),
                                pmt.from_long(1234567890))
        metadata = pmt.dict_add(metadata, pmt.intern('field4'),
                                pmt.from_uint64(987654321))
        metadata = pmt.dict_add(metadata, pmt.intern('field5'),
                                pmt.from_double(2.5))
        metadata = pmt.dict_add(metadata, pmt.intern('field6'),
                                pmt.from_complex(1.0 + 2j))
        metadata = pmt.dict_add(metadata, pmt.intern('field7'),
                                pmt.intern('string'))
        metadata = pmt.dict_add(
            metadata, pmt.intern('field8'),
            pmt.cons(pmt.from_uint64(1), pmt.from_double(0)))
        metadata = pmt.dict_add(
            metadata, pmt.intern('field9'),
            pmt.make_tuple(pmt.from_uint64(1), pmt.from_double(0)))

        data = pmt.init_u8vector(5, [1, 2, 3, 4, 5])
        expected = pmt.cons(metadata, data)

        # run
        self.tb.msg_connect((reader, 'out'), (self.debug, 'store'))
        self.tb.start()
        time.sleep(.5)
        self.tb.stop()
        self.tb.wait()

        got = self.debug.get_message(0)

        self.assertTrue(pmt.equal(expected, got))
예제 #10
0
 def makeDict(self, **kwargs):
     pmtDict = pmt.make_dict()
     if "freq" in kwargs:
         pmtDict = pmt.dict_add(pmtDict, pmt.intern("rx_freq"), pmt.from_double(kwargs["freq"]))
     if "rate" in kwargs:
         pmtDict = pmt.dict_add(pmtDict, pmt.intern("rx_rate"), pmt.from_double(kwargs["rate"]))
     if "epoch_int" in kwargs and "epoch_frac" in kwargs:
         pmtDict = pmt.dict_add(pmtDict, pmt.intern("rx_time"),
                                pmt.make_tuple(pmt.from_uint64(kwargs["epoch_int"]), pmt.from_double(kwargs["epoch_frac"])))
     return pmtDict
예제 #11
0
    def work(self, input_items, output_items):

        tensordata = []
        input_i = []
        shapev = np.array(input_items[0]).shape

        for item in range(shapev[0]):

            inp = np.array(input_items[0][item])

            if self.dtype == np.complex64:

                # complex data must be split into real
                # and imaginary floats for the ANN
                tensordata.append(np.array([[inp.real, inp.imag]]))

            elif self.dtype == np.float32:

                if np.mean(inp) == 0.0:
                    return len(input_items[0])

                ## Normalise data
                inp = (inp - np.mean(inp)) / np.std(inp)

                ## Reshape data as specified
                if not self.reshape == ():
                    floats = np.reshape(inp, self.reshape)
                else:
                    floats = inp

                tensordata.append(np.array([floats]))

        ne = []
        for v in tensordata:
            try:
                #print("In: ",self.inp,"Out : ",self.out)
                #print("Inp ",v)
                outp = self.sess.run(self.out, feed_dict={self.inp: [v]})[0]
                #print("OUTP ",outp)
                ne.append(outp)
            except tf.errors.InvalidArgumentError:
                print("Invalid size of input vector to TensorFlow model")
                quit()

        pmtv = pmt.make_dict()
        for outp in ne:
            pmtv = pmt.make_tuple(
                pmt.to_pmt(("signal", self.signum)),
                pmt.to_pmt((self.classes[np.argmax(outp)],
                            outp[np.argmax(outp)].item())))

            self.message_port_pub(pmt.intern("classification"), pmtv)

        return len(input_items[0])
예제 #12
0
 def generate(self):
     sleep(self.sleep_before)
     for i in xrange(0, 10):
         tx_time = pmt.make_tuple(
             pmt.from_uint64(int(self.base_time + i * self.burst_spacing)),
             pmt.from_double((self.base_time + i * self.burst_spacing) % 1))
         pdu_header = pmt.dict_add(pmt.make_dict(), pmt.intern('tx_time'),
                                   tx_time)
         pdu_header = pmt.PMT_NIL
         burst = pmt.cons(pdu_header, self.samples)
         self.message_port_pub(pmt.intern("bursts"), burst)
         sleep(self.sleep_between)
예제 #13
0
    def work(self, input_items, output_items):

        tensordata = []
        input_i = []
        shapev = np.array(input_items[0]).shape

        for item in range(shapev[0]):

            inp = np.array(input_items[0][item])

            if self.dtype == np.complex64:

                # complex data must be split into real 
                # and imaginary floats for the ANN
                tensordata.append(np.array([[inp.real,inp.imag]]))

            elif self.dtype == np.float32:

                if np.mean(inp) == 0.0:
                    return len(input_items[0])

                ## Normalise data
                inp = (inp - np.mean(inp)) / np.std(inp)

                ## Reshape data as specified
                if not self.reshape == ():
                    floats = np.reshape(inp,self.reshape)
                else:
                    floats = inp

                tensordata.append(np.array([floats]))

        ne = []
        for v in tensordata:
            try:
                #print("In: ",self.inp,"Out : ",self.out)
                #print("Inp ",v)
                outp = self.sess.run(self.out, feed_dict={self.inp: [v]})[0]
                #print("OUTP ",outp)
                ne.append(outp)
            except tf.errors.InvalidArgumentError:
                print("Invalid size of input vector to TensorFlow model")
                quit()

        pmtv = pmt.make_dict()
        for outp in ne:
            pmtv = pmt.make_tuple(pmt.to_pmt(("signal",self.signum)),pmt.to_pmt((self.classes[np.argmax(outp)],outp[np.argmax(outp)].item())))

            self.message_port_pub(pmt.intern("classification"), pmtv)

        return len(input_items[0])
 def timemsg(self, abstime, fmt):
     if fmt == "sample":
         return pmt.from_uint64(int(abstime * self.rate))
     elif fmt in ["pair", "tuple"]:
         t_int = int(abstime)
         t_frac = abstime - t_int
         if t_frac > 1:
             t_int += 1
             t_frac -= 1.0
         if fmt == "pair":
             return pmt.cons(pmt.from_uint64(t_int),
                             pmt.from_double(t_frac))
         else:
             return pmt.make_tuple(pmt.from_uint64(t_int),
                                   pmt.from_double(t_frac))
예제 #15
0
    def test_003_double_msg(self):
        in_data1 = [0, 0, 0, 0, 0, 0, 0, 0]
        in_data2 = [1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1]
        in_data = in_data1 + in_data1 + in_data2
        tag_time = pmt.make_tuple(pmt.from_uint64(11),
                                  pmt.from_double(0.123456))
        in_dict = pmt.dict_add(pmt.make_dict(), pmt.intern("tx_time"),
                               tag_time)
        in_pdu1 = pmt.cons(in_dict, pmt.init_c32vector(len(in_data1),
                                                       in_data1))
        in_pdu2 = pmt.cons(pmt.make_dict(),
                           pmt.init_c32vector(len(in_data2), in_data2))
        e_tag_0 = gr.tag_utils.python_to_tag(
            (len(in_data1), pmt.intern("tx_sob"), pmt.PMT_T, pmt.PMT_NIL))
        e_tag_1 = gr.tag_utils.python_to_tag(
            (len(in_data1), pmt.intern("tx_time"), tag_time, pmt.PMT_NIL))
        e_tag_2 = gr.tag_utils.python_to_tag(
            (len(in_data) - 1, pmt.intern("tx_eob"), pmt.PMT_T, pmt.PMT_NIL))

        self.tb.start()
        time.sleep(.001)
        self.emitter.emit(pmt.intern("MALFORMED PDU"))
        time.sleep(.001)
        self.emitter.emit(in_pdu1)
        time.sleep(.005)
        self.emitter.emit(in_pdu1)
        self.emitter.emit(in_pdu2)
        time.sleep(.01)
        self.tb.stop()
        self.tb.wait()

        tags = self.vs.tags()
        for tag in tags:
            print tag.offset, tag.key, tag.value
        self.assertEqual(len(tags), 6)
        self.assertEqual(tags[3].offset, e_tag_0.offset)
        self.assertTrue(pmt.equal(tags[3].key, e_tag_0.key))
        self.assertTrue(pmt.equal(tags[3].value, e_tag_0.value))
        self.assertEqual(tags[4].offset, e_tag_1.offset)
        self.assertTrue(pmt.equal(tags[4].key, e_tag_1.key))
        self.assertTrue(pmt.equal(tags[4].value, e_tag_1.value))
        self.assertEqual(tags[5].offset, e_tag_2.offset)
        self.assertTrue(pmt.equal(tags[5].key, e_tag_2.key))
        self.assertTrue(pmt.equal(tags[5].value, e_tag_2.value))
        self.assertTrue((in_data == numpy.real(self.vs.data())).all())
예제 #16
0
    def test_rx_time_pad_left(self):
        # data
        src_data = (0, ) * 8 * 100
        offsets = [16]
        for step in range(17, 24):
            offsets.append(offsets[-1] + step)
        print("offsets = {}".format(offsets))

        # generate tag list
        rx_time = pmt.make_tuple(pmt.from_uint64(1), pmt.from_double(.234))
        time_tag = gr.tag_utils.python_to_tag(
            [1, pmt.intern("rx_time"), rx_time,
             pmt.intern("time_stamper")])
        tags = [time_tag]
        for offset in offsets:
            tags.append(
                gr.tag_utils.python_to_tag([
                    offset,
                    pmt.intern("BURST"),
                    pmt.from_uint64(0),
                    pmt.intern("test_simple_source")
                ]))

        expected_result = (0x0, ) * 5

        # blocks
        src = blocks.vector_source_b(src_data, False, 1, tags)
        tbb = sandia_utils.tagged_bits_to_bytes("BURST", False, 2, 1)
        dst = blocks.vector_sink_b()
        tag_dbg = blocks.tag_debug(gr.sizeof_char * 1, '', "")
        tag_dbg.set_display(True)

        self.tb.connect(src, tbb)
        self.tb.connect(tbb, tag_dbg)
        self.tb.connect(tbb, dst)

        # execute
        self.tb.run()
        result_data = dst.data()

        # assert - should get both a BURST and burst_time tag at offsets = [2,5,8,11,14,17,20,23]
        print("test rx_time got {}, expected {}".format(
            result_data, expected_result))
예제 #17
0
    def test_001_t(self):
        # set up fg
        Range = (2, 4, 22, 23)
        velocity = (3, 12, 19, 19)
        power = (10, 10, 10, 1)  # last value is thrown away with merging peaks
        pmt_time = pmt.list2(
            pmt.string_to_symbol('rx_time'),
            pmt.make_tuple(pmt.from_long(-1), pmt.from_double(0)))
        pmt_axisx = pmt.list2(pmt.string_to_symbol('axis_x'),
                              pmt.init_f32vector(len(Range), Range))
        pmt_axisy = pmt.list2(pmt.string_to_symbol('axis_y'),
                              pmt.init_f32vector(len(velocity), velocity))
        pmt_power = pmt.list2(pmt.string_to_symbol('power'),
                              pmt.init_f32vector(len(power), power))
        pmt_in = pmt.list4(pmt_time, pmt_axisx, pmt_axisy, pmt_power)

        src = blocks.message_strobe(pmt_in, 300)
        est = radar.estimator_ofdm('range', 30, (0, 40, -40, 10), 'velocity',
                                   20, (-5, 5))
        snk = blocks.message_debug()
        self.tb.msg_connect(src, "strobe", est, "Msg in")
        self.tb.msg_connect(est, "Msg out", snk, "store")
        self.tb.msg_connect(est, "Msg out", snk, "print")

        self.tb.start()
        sleep(0.5)
        self.tb.stop()
        self.tb.wait()

        # get ref data
        ref_range = (0 + 40 * 2 / 15.0, 0 + 40 * 4 / 15.0,
                     -40 + 50 * (22 - 15) / 15.0)
        ref_velocity = (-5 + 10 * 3 / 20.0, -5 + 10 * 12 / 20.0,
                        -5 + 10 * 19 / 20.0)

        # check data
        msg = snk.get_message(0)
        val_range = pmt.f32vector_elements(pmt.nth(1, pmt.nth(1, msg)))
        val_velocity = pmt.f32vector_elements(pmt.nth(1, pmt.nth(2, msg)))
        print val_range
        self.assertFloatTuplesAlmostEqual(val_velocity, ref_velocity, 4)
        self.assertFloatTuplesAlmostEqual(val_range, ref_range, 4)
예제 #18
0
    def _queue_tags(self, sample, tags):
        """Queue stream tags to be attached to data in the work function.

        In addition to the tags specified in the `tags` dictionary, this will
        add `rx_time` and `rx_rate` tags giving the sample time and rate.


        Parameters
        ----------
        sample : int
            Sample index for the sample to tag, given in the number of samples
            since the epoch (time_since_epoch*sample_rate).

        tags : dict
            Dictionary containing the tags to add with keys specifying the tag
            name. The value is cast as an appropriate pmt type, while the name
            will be turned into a pmt string in the work function.

        """
        # add to current queued tags for sample if applicable
        tag_dict = self._tag_queue.get(sample, {})
        if not tag_dict:
            # add time and rate tags
            time = sample / self._sample_rate
            tag_dict["rx_time"] = pmt.make_tuple(
                pmt.from_uint64(int(np.uint64(time))), pmt.from_double(float(time % 1))
            )
            tag_dict["rx_rate"] = self._sample_rate_pmt
        for k, v in tags.items():
            try:
                pmt_val = pmt.to_pmt(v)
            except ValueError:
                traceback.print_exc()
                errstr = (
                    "Can't add tag for '{0}' because its value of {1} failed"
                    " to convert to a pmt value."
                )
                print(errstr.format(k, v))
            else:
                tag_dict[k] = pmt_val
        self._tag_queue[sample] = tag_dict
예제 #19
0
    def test1_copy_file(self):
        # open a dummy file
        fname = '/tmp/foo.txt'
        fname_out = '/tmp/915_30_000000.txt'
        if os.path.exists(fname):
            os.remove(fname)
        Path(fname).touch()

        if os.path.exists(fname_out):
            os.remove(fname_out)

        # PMT
        p = pmt.dict_add(pmt.make_dict(), pmt.intern('rx_freq'),
                         pmt.from_double(915e6))
        p = pmt.dict_add(p, pmt.intern('rx_rate'), pmt.from_double(30.72e6))
        p = pmt.dict_add(
            p, pmt.intern('rx_time'),
            pmt.make_tuple(pmt.from_uint64(0), pmt.from_double(0)))
        p = pmt.dict_add(p, pmt.intern('fname'), pmt.intern(fname))

        # blocks
        emitter = pdu_utils.message_emitter(pmt.PMT_NIL)
        archiver = file_archiver('/tmp', '%fcM_%fsM_%H%M%S.txt', True)

        # connect
        self.tb.msg_connect((emitter, 'msg'), (archiver, 'pdu'))

        # run
        self.tb.start()
        emitter.emit(p)
        time.sleep(.1)
        # clean up
        self.tb.stop()
        self.tb.wait()

        # check data
        os.remove(fname)
        self.assertTrue(os.path.exists(fname_out))

        # cleanup
        os.remove(fname_out)
예제 #20
0
    def test_002_timed(self):
        in_data = [
            0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1,
            0, 1
        ]
        tag_time = pmt.make_tuple(pmt.from_uint64(11),
                                  pmt.from_double(0.123456))
        in_dict = pmt.dict_add(pmt.make_dict(), pmt.intern("tx_time"),
                               tag_time)
        in_pdu = pmt.cons(in_dict, pmt.init_c32vector(len(in_data), in_data))
        e_tag_0 = gr.tag_utils.python_to_tag(
            (0, pmt.intern("tx_sob"), pmt.PMT_T, pmt.PMT_NIL))
        e_tag_1 = gr.tag_utils.python_to_tag(
            (0, pmt.intern("tx_time"), tag_time, pmt.PMT_NIL))
        e_tag_2 = gr.tag_utils.python_to_tag(
            (len(in_data) - 1, pmt.intern("tx_eob"), pmt.PMT_T, pmt.PMT_NIL))

        self.tb.start()
        self.p2s.to_basic_block()._post(pmt.intern("pdus"),
                                        pmt.intern("MALFORMED PDU"))
        self.p2s.to_basic_block()._post(pmt.intern("pdus"), in_pdu)
        self.waitFor(lambda: len(self.vs.tags()) == 3,
                     timeout=1.0,
                     poll_interval=0.01)
        self.tb.stop()
        self.tb.wait()

        tags = self.vs.tags()
        self.assertEqual(len(tags), 3)
        self.assertEqual(tags[0].offset, e_tag_0.offset)
        self.assertTrue(pmt.equal(tags[0].key, e_tag_0.key))
        self.assertTrue(pmt.equal(tags[0].value, e_tag_0.value))
        self.assertEqual(tags[1].offset, e_tag_1.offset)
        self.assertTrue(pmt.equal(tags[1].key, e_tag_1.key))
        self.assertTrue(pmt.equal(tags[1].value, e_tag_1.value))
        self.assertEqual(tags[2].offset, e_tag_2.offset)
        self.assertTrue(pmt.equal(tags[2].key, e_tag_2.key))
        self.assertTrue(pmt.equal(tags[2].value, e_tag_2.value))
        self.assertTrue((in_data == np.real(self.vs.data())).all())
예제 #21
0
    def test_002_timed(self):
        in_data = [
            0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1,
            0, 1
        ]
        tag_time = pmt.make_tuple(pmt.from_uint64(11),
                                  pmt.from_double(0.123456))
        in_dict = pmt.dict_add(pmt.make_dict(), pmt.intern("tx_time"),
                               tag_time)
        in_pdu = pmt.cons(in_dict, pmt.init_c32vector(len(in_data), in_data))
        e_tag_0 = gr.tag_utils.python_to_tag(
            (0, pmt.intern("tx_sob"), pmt.PMT_T, pmt.PMT_NIL))
        e_tag_1 = gr.tag_utils.python_to_tag(
            (0, pmt.intern("tx_time"), tag_time, pmt.PMT_NIL))
        e_tag_2 = gr.tag_utils.python_to_tag(
            (len(in_data) - 1, pmt.intern("tx_eob"), pmt.PMT_T, pmt.PMT_NIL))

        self.tb.start()
        time.sleep(.001)
        self.emitter.emit(pmt.intern("MALFORMED PDU"))
        time.sleep(.001)
        self.emitter.emit(in_pdu)
        time.sleep(.01)
        self.tb.stop()
        self.tb.wait()

        tags = self.vs.tags()
        self.assertEqual(len(tags), 3)
        self.assertEqual(tags[0].offset, e_tag_0.offset)
        self.assertTrue(pmt.equal(tags[0].key, e_tag_0.key))
        self.assertTrue(pmt.equal(tags[0].value, e_tag_0.value))
        self.assertEqual(tags[1].offset, e_tag_1.offset)
        self.assertTrue(pmt.equal(tags[1].key, e_tag_1.key))
        self.assertTrue(pmt.equal(tags[1].value, e_tag_1.value))
        self.assertEqual(tags[2].offset, e_tag_2.offset)
        self.assertTrue(pmt.equal(tags[2].key, e_tag_2.key))
        self.assertTrue(pmt.equal(tags[2].value, e_tag_2.value))
        self.assertTrue((in_data == numpy.real(self.vs.data())).all())
예제 #22
0
    def process_txtime_of_burst(self, msg):
        burst_with_header = pmt.to_python(pmt.cdr(msg))
        fn = burst_with_header[
            11] + burst_with_header[10] * 2**8 + burst_with_header[
                9] * 2**16 + burst_with_header[8] * 2**24
        ts_num = burst_with_header[3]
        if self.fn_ref is not None:
            fn_delta, txtime = fn_time_delta(self.fn_ref, self.time_ref, fn,
                                             self.time_hint, ts_num)
            txtime_corrected = txtime - self.delay_correction
            txtime_final = txtime_corrected - self.timing_advance

            txtime_secs = int(txtime_final)
            txtime_fracs = txtime_final - int(txtime_final)
            #print "txtime_secs",txtime_secs,"txtime_fracs",txtime_fracs
            tags_dict = pmt.dict_add(
                pmt.make_dict(), pmt.intern("tx_time"),
                pmt.make_tuple(pmt.from_uint64(txtime_secs),
                               pmt.from_double(txtime_fracs)))
            tags_dict = pmt.dict_add(tags_dict, pmt.intern("fn"),
                                     pmt.from_uint64(fn))
            new_msg = pmt.cons(tags_dict, pmt.cdr(msg))
            self.message_port_pub(pmt.intern("bursts"), new_msg)
예제 #23
0
파일: mkheader.py 프로젝트: mjp5578/recipes
def update_timestamp(hdr, seg_size):
    if pmt.dict_has_key(hdr, pmt.string_to_symbol("rx_time")):
        r = pmt.dict_ref(hdr, pmt.string_to_symbol("rx_time"), pmt.PMT_NIL)
        secs = pmt.tuple_ref(r, 0)
        fracs = pmt.tuple_ref(r, 1)
        secs = float(pmt.to_uint64(secs))
        fracs = pmt.to_double(fracs)
        t = secs + fracs
    else:
        sys.stderr.write("Could not find key 'time': \
                invalid or corrupt data file.\n")
        sys.exit(1)
    new_hdr = pmt.dict_delete(hdr, pmt.intern("rx_time"))
    if pmt.dict_has_key(hdr, pmt.intern("rx_rate")):
        r = pmt.dict_ref(hdr, pmt.intern("rx_rate"), pmt.PMT_NIL)
        rate = pmt.to_double(r)
        new_t = t + float(seg_size) / rate
        new_secs = long(new_t)
        new_fracs = new_t - new_secs
        time_val = pmt.make_tuple(pmt.from_uint64(new_secs),
                                  pmt.from_double(new_fracs))
        new_hdr = pmt.dict_add(new_hdr, pmt.intern("rx_time"), time_val)
        return new_hdr
예제 #24
0
파일: mkheader.py 프로젝트: garverp/recipes
def update_timestamp(hdr,seg_size):
    if pmt.dict_has_key(hdr, pmt.string_to_symbol("rx_time")):
        r = pmt.dict_ref(hdr, pmt.string_to_symbol("rx_time"), pmt.PMT_NIL)
        secs = pmt.tuple_ref(r, 0)
        fracs = pmt.tuple_ref(r, 1)
        secs = float(pmt.to_uint64(secs))
        fracs = pmt.to_double(fracs)
        t = secs + fracs
    else:
        sys.stderr.write("Could not find key 'time': \
                invalid or corrupt data file.\n")
        sys.exit(1)
    new_hdr = pmt.dict_delete(hdr, pmt.intern("rx_time"))
    if pmt.dict_has_key(hdr, pmt.intern("rx_rate")):
        r = pmt.dict_ref(hdr, pmt.intern("rx_rate"), pmt.PMT_NIL)
        rate = pmt.to_double(r)
        new_t = t + float(seg_size)/rate
        new_secs = long(new_t)
        new_fracs = new_t - new_secs
        time_val = pmt.make_tuple(pmt.from_uint64(new_secs),
                             pmt.from_double(new_fracs))
        new_hdr = pmt.dict_add(new_hdr, pmt.intern("rx_time"), time_val)
        return new_hdr
예제 #25
0
    def test_tuple(self):
        cs = starcoder.command_source()
        snk = blocks.message_debug()
        self.tb.msg_connect((cs, 'out'), (snk, 'store'))

        msg = starcoder_pb2.BlockMessage()
        v = msg.list_value.value.add()
        v.symbol_value = "testtransmission"
        v = msg.list_value.value.add()
        v.double_value = 23.2
        msg.list_value.type = starcoder_pb2.List.TUPLE

        expected = pmt.make_tuple(pmt.intern("testtransmission"),
                                  pmt.from_double(23.2))

        self.tb.start()
        cs.push(msg.SerializeToString())
        time.sleep(0.1)
        self.tb.stop()
        self.tb.wait()

        self.assertEqual(snk.num_messages(), 1)
        self.assertTrue(pmt.is_tuple(snk.get_message(0)))
        self.assertTrue(pmt.equal(snk.get_message(0), expected))
예제 #26
0
파일: qpsk_fh.py 프로젝트: CIG-SDR/CIG
 def __init__(
         self,
         n_bursts, n_channels,
         freq_delta, base_freq,
         burst_length, base_time, hop_time,
         post_tuning=True,
         tx_gain=0,
         verbose=False
     ):
     gr.hier_block2.__init__(self,
         "FrequencyHopperSrc",
         gr.io_signature(1, 1, gr.sizeof_gr_complex),
         gr.io_signature(1, 1, gr.sizeof_gr_complex),
     )
     n_samples_total = n_bursts * burst_length
     self.hop_sequence = numpy.arange(base_freq, base_freq + n_channels * freq_delta, freq_delta)
   # self.hop_sequence = 2440000000, 2450000000, 2435000000, 2430000000, 2445000000, 2420000000, 2425000000  #Specify the hopping pattern here, repeat from begining
     numpy.random.shuffle(self.hop_sequence)  #this randomly shuffels frequencies in the specified range
     self.hop_sequence = [self.hop_sequence[x % n_channels] for x in xrange(n_bursts)]
   # self.hop_sequence = [self.hop_sequence[x % 7]for x in xrange(n_bursts)]
     if verbose:
         print "Hop Frequencies  | Hop Pattern"
         print "=================|================================"
         for f in self.hop_sequence:
             print "{:6.3f} MHz      |  ".format(f/1e6),
             if n_channels < 50:
                 print " " * int((f - base_freq) / freq_delta) + "#"
             else:
                 print "\n"
         print "=================|================================"
     # There's no real point in setting the gain via tag for this application,
     # but this is an example to show you how to do it.
     gain_tag = gr.tag_t()
     gain_tag.offset = 0
     gain_tag.key = pmt.string_to_symbol('tx_command')
     gain_tag.value = pmt.cons(
             pmt.intern("gain"),
             # These are both valid:
             #pmt.from_double(tx_gain)
             pmt.cons(pmt.to_pmt(0), pmt.to_pmt(tx_gain))
     )
     tag_list = [gain_tag,]
     for i in xrange(n_bursts):
         tune_tag = gr.tag_t()
         tune_tag.offset = i * burst_length
         if i > 0 and post_tuning:
             tune_tag.offset -= 1 # Move it to last sample of previous burst
         tune_tag.key = pmt.string_to_symbol('tx_freq')
         tune_tag.value = pmt.to_pmt(self.hop_sequence[i])
         tag_list.append(tune_tag)
         length_tag = gr.tag_t()
         length_tag.offset = i * burst_length
         length_tag.key = pmt.string_to_symbol('packet_len')
         length_tag.value = pmt.from_long(burst_length)
         tag_list.append(length_tag)
         time_tag = gr.tag_t()
         time_tag.offset = i * burst_length
         time_tag.key = pmt.string_to_symbol('tx_time')
         time_tag.value = pmt.make_tuple(
                 pmt.from_uint64(int(base_time + i * hop_time)),
                 pmt.from_double((base_time + i * hop_time) % 1),
         )
         tag_list.append(time_tag)
     tag_source = blocks.vector_source_c((1.0,) * n_samples_total, repeat=False, tags=tag_list)
     mult = blocks.multiply_cc()
     self.connect(self, mult, self)
     self.connect(tag_source, (mult, 1))
예제 #27
0
파일: mkheader.py 프로젝트: garverp/recipes
def make_header(options, filename):
    extras_present = False
    if options.freq is not None:
        extras_present = True
    # Open the file and make the header
    hdr_filename = filename + '.hdr'
    hdr_file = open(hdr_filename, 'wb')
    header = pmt.make_dict()
    # Fill in header vals
    # TODO - Read this from blocks.METADATA_VERSION
    ver_val = pmt.from_long(long(0))
    rate_val = pmt.from_double(options.sample_rate)
    time_val = pmt.make_tuple(pmt.from_uint64(options.time_sec),
                             pmt.from_double(options.time_fsec))
    ft_to_sz = parse_file_metadata.ftype_to_size
    # Map shortname to properties
    enum_type = SNAME_TO_ENUM[options.format]
    type_props = SNAME_DEFS[enum_type]
    size_val = pmt.from_long(type_props[0])
    cplx_val = pmt.from_bool(type_props[1])
    type_val = pmt.from_long(type_props[2])
    fmt = type_props[2]
    file_samp_len = long(options.length)
    seg_size = long(options.seg_size)
    bytes_val = pmt.from_uint64(long(seg_size*ft_to_sz[fmt]))
    # Set header vals
    header = pmt.dict_add(header, pmt.intern("version"), ver_val)
    header = pmt.dict_add(header, pmt.intern("size"), size_val)
    header = pmt.dict_add(header, pmt.intern("type"), type_val)
    header = pmt.dict_add(header, pmt.intern("cplx"), cplx_val)
    header = pmt.dict_add(header, pmt.intern("rx_time"), time_val)
    header = pmt.dict_add(header, pmt.intern("rx_rate"), rate_val)
    header = pmt.dict_add(header, pmt.intern("bytes"), bytes_val)

    if extras_present:
        freq_key = pmt.intern("rx_freq")
        freq_val = pmt.from_double(options.freq)
        extras = pmt.make_dict()
        extras = pmt.dict_add(extras, freq_key, freq_val)
        extras_str = pmt.serialize_str(extras)
        start_val = pmt.from_uint64(blocks.METADATA_HEADER_SIZE
                + len(extras_str))
    else:
        start_val = pmt.from_uint64(blocks.METADATA_HEADER_SIZE)
    header = pmt.dict_add(header, pmt.intern("strt"), start_val)
    num_segments = file_samp_len/seg_size
    if options.verbose:
        print "Wrote %d headers to: %s (Version %d)" % (num_segments+1,
                hdr_filename,pmt.to_long(ver_val))
    for x in range(0,num_segments,1):
        # Serialize and write out file
        if extras_present:
            header_str = pmt.serialize_str(header) + extras_str
        else:
            header_str = pmt.serialize_str(header)
        hdr_file.write(header_str)
        # Update header based on sample rate and segment size
        header = update_timestamp(header,seg_size)
    
    # Last header is special b/c file size is probably not mult. of seg_size
    header = pmt.dict_delete(header,pmt.intern("bytes"))
    bytes_remaining = ft_to_sz[fmt]*(file_samp_len - num_segments*long(seg_size))
    bytes_val = pmt.from_uint64(bytes_remaining)
    header = pmt.dict_add(header,pmt.intern("bytes"),bytes_val)
    # Serialize and write out file
    if extras_present:
        header_str = pmt.serialize_str(header) + extras_str
    else:
        header_str = pmt.serialize_str(header)
    hdr_file.write(header_str)
    hdr_file.close()
예제 #28
0
 def __init__(self,
              n_bursts,
              n_channels,
              freq_delta,
              base_freq,
              burst_length,
              base_time,
              hop_time,
              post_tuning=True,
              tx_gain=0,
              verbose=False):
     gr.hier_block2.__init__(
         self,
         "FrequencyHopperSrc",
         gr.io_signature(1, 1, gr.sizeof_gr_complex),
         gr.io_signature(1, 1, gr.sizeof_gr_complex),
     )
     n_samples_total = n_bursts * burst_length
     #  self.hop_sequence = numpy.arange(base_freq, base_freq + n_channels * freq_delta, freq_delta)
     self.hop_sequence = 2440000000, 2450000000, 2435000000, 2430000000, 2445000000, 2420000000, 2425000000
     #  numpy.random.shuffle(self.hop_sequence)  #this randomly shuffels frequencies in the specified range
     #  self.hop_sequence = [self.hop_sequence[x % n_channels] for x in xrange(n_bursts)]
     self.hop_sequence = [
         self.hop_sequence[x % 7] for x in xrange(n_bursts)
     ]
     if verbose:
         print "Hop Frequencies  | Hop Pattern"
         print "=================|================================"
         for f in self.hop_sequence:
             print "{:6.3f} MHz      |  ".format(f / 1e6),
             if n_channels < 50:
                 print " " * int((f - base_freq) / freq_delta) + "#"
             else:
                 print "\n"
         print "=================|================================"
     # There's no real point in setting the gain via tag for this application,
     # but this is an example to show you how to do it.
     gain_tag = gr.tag_t()
     gain_tag.offset = 0
     gain_tag.key = pmt.string_to_symbol('tx_command')
     gain_tag.value = pmt.cons(
         pmt.intern("gain"),
         # These are both valid:
         #pmt.from_double(tx_gain)
         pmt.cons(pmt.to_pmt(0), pmt.to_pmt(tx_gain)))
     tag_list = [
         gain_tag,
     ]
     for i in xrange(n_bursts):
         tune_tag = gr.tag_t()
         tune_tag.offset = i * burst_length
         if i > 0 and post_tuning:
             tune_tag.offset -= 1  # Move it to last sample of previous burst
         tune_tag.key = pmt.string_to_symbol('tx_freq')
         tune_tag.value = pmt.to_pmt(self.hop_sequence[i])
         tag_list.append(tune_tag)
         length_tag = gr.tag_t()
         length_tag.offset = i * burst_length
         length_tag.key = pmt.string_to_symbol('packet_len')
         length_tag.value = pmt.from_long(burst_length)
         tag_list.append(length_tag)
         time_tag = gr.tag_t()
         time_tag.offset = i * burst_length
         time_tag.key = pmt.string_to_symbol('tx_time')
         time_tag.value = pmt.make_tuple(
             pmt.from_uint64(int(base_time + i * hop_time)),
             pmt.from_double((base_time + i * hop_time) % 1),
         )
         tag_list.append(time_tag)
     tag_source = blocks.vector_source_c((1.0, ) * n_samples_total,
                                         repeat=False,
                                         tags=tag_list)
     mult = blocks.multiply_cc()
     self.connect(self, mult, self)
     self.connect(tag_source, (mult, 1))
예제 #29
0
    def __init__(
            self,
            n_bursts, n_channels,
            freq_delta, base_freq, dsp_tuning,
            burst_length, base_time, hop_time,
            seed,rate,
	    post_tuning=False, 
            tx_gain=0,
            verbose=False
        ):
        gr.hier_block2.__init__(self,
            "Hopping",
            gr.io_signature(1, 1, gr.sizeof_gr_complex),
            gr.io_signature(1, 1, gr.sizeof_gr_complex),
        )
        n_samples_total = n_bursts * burst_length
        lowest_frequency = base_freq - numpy.floor(n_channels/2) * freq_delta
        self.hop_sequence = [lowest_frequency + n * freq_delta for n in xrange(n_channels)]
	random.seed(seed)	
	lam = random.random()        
	random.shuffle(self.hop_sequence, lambda: lam)
        # Repeat that:
        self.hop_sequence = [self.hop_sequence[x % n_channels] for x in xrange(n_bursts)]
        if verbose:
            print "Hop Frequencies  | Hop Pattern"
            print "=================|================================"
            for f in self.hop_sequence:
                print "{:6.3f} MHz      |  ".format(f/1e6),
                if n_channels < 50:
                    print " " * int((f - base_freq) / freq_delta) + "#"
                else:
                    print "\n"
            print "=================|================================"
        # There's no real point in setting the gain via tag for this application,
        # but this is an example to show you how to do it.
        gain_tag = gr.tag_t()
        gain_tag.offset = 0
        gain_tag.key = pmt.string_to_symbol('tx_command')
        gain_tag.value = pmt.to_pmt({'gain': tx_gain})
        tag_list = [gain_tag,]
        for i in xrange(len(self.hop_sequence)):
            tune_tag = gr.tag_t()
            tune_tag.offset = i * burst_length
            if i > 0 and post_tuning and not dsp_tuning: # TODO dsp_tuning should also be able to do post_tuning
                tune_tag.offset -= 1 # Move it to last sample of previous burst
            if dsp_tuning:
                tune_tag.key = pmt.string_to_symbol('tx_command')
                tune_tag.value = pmt.to_pmt({'rf_freq_policy': int(ord('N')), 'lo_freq': base_freq, 'dsp_freq_policy': int(ord('M')),'dsp_freq': base_freq - self.hop_sequence[i] })
            else:
                tune_tag.key = pmt.string_to_symbol('tx_freq')
                tune_tag.value = pmt.to_pmt(self.hop_sequence[i])
            tag_list.append(tune_tag)
            length_tag = gr.tag_t()
            length_tag.offset = i * burst_length
            length_tag.key = pmt.string_to_symbol('packet_len')
            length_tag.value = pmt.from_long(burst_length)
            tag_list.append(length_tag)	
            time_tag = gr.tag_t()
            time_tag.offset = i * burst_length
            time_tag.key = pmt.string_to_symbol("tx_time")
            time_tag.value = pmt.make_tuple(
                    pmt.from_uint64(int(base_time + i * hop_time)),
                    pmt.from_double((base_time + i * hop_time) % 1),
            )
            tag_list.append(time_tag)
        #############################################
        # Old Version
        #############################################
        tag_source = blocks.vector_source_c((1.0,) * n_samples_total, repeat= True, tags=tag_list)
        mult = blocks.multiply_cc()
        self.connect(self, mult, self)
        self.connect(tag_source, (mult, 1))
예제 #30
0
파일: mkheader.py 프로젝트: mjp5578/recipes
def make_header(options, filename):
    extras_present = False
    if options.freq is not None:
        extras_present = True
    # Open the file and make the header
    hdr_filename = filename + '.hdr'
    hdr_file = open(hdr_filename, 'wb')
    header = pmt.make_dict()
    # Fill in header vals
    # TODO - Read this from blocks.METADATA_VERSION
    ver_val = pmt.from_long(long(0))
    rate_val = pmt.from_double(options.sample_rate)
    time_val = pmt.make_tuple(pmt.from_uint64(options.time_sec),
                              pmt.from_double(options.time_fsec))
    ft_to_sz = parse_file_metadata.ftype_to_size
    # Map shortname to properties
    enum_type = SNAME_TO_ENUM[options.format]
    type_props = SNAME_DEFS[enum_type]
    size_val = pmt.from_long(type_props[0])
    cplx_val = pmt.from_bool(type_props[1])
    type_val = pmt.from_long(type_props[2])
    fmt = type_props[2]
    file_samp_len = long(options.length)
    seg_size = long(options.seg_size)
    bytes_val = pmt.from_uint64(long(seg_size * ft_to_sz[fmt]))
    # Set header vals
    header = pmt.dict_add(header, pmt.intern("version"), ver_val)
    header = pmt.dict_add(header, pmt.intern("size"), size_val)
    header = pmt.dict_add(header, pmt.intern("type"), type_val)
    header = pmt.dict_add(header, pmt.intern("cplx"), cplx_val)
    header = pmt.dict_add(header, pmt.intern("rx_time"), time_val)
    header = pmt.dict_add(header, pmt.intern("rx_rate"), rate_val)
    header = pmt.dict_add(header, pmt.intern("bytes"), bytes_val)

    if extras_present:
        freq_key = pmt.intern("rx_freq")
        freq_val = pmt.from_double(options.freq)
        extras = pmt.make_dict()
        extras = pmt.dict_add(extras, freq_key, freq_val)
        extras_str = pmt.serialize_str(extras)
        start_val = pmt.from_uint64(blocks.METADATA_HEADER_SIZE +
                                    len(extras_str))
    else:
        start_val = pmt.from_uint64(blocks.METADATA_HEADER_SIZE)
    header = pmt.dict_add(header, pmt.intern("strt"), start_val)
    num_segments = file_samp_len / seg_size
    if options.verbose:
        print "Wrote %d headers to: %s (Version %d)" % (
            num_segments + 1, hdr_filename, pmt.to_long(ver_val))
    for x in range(0, num_segments, 1):
        # Serialize and write out file
        if extras_present:
            header_str = pmt.serialize_str(header) + extras_str
        else:
            header_str = pmt.serialize_str(header)
        hdr_file.write(header_str)
        # Update header based on sample rate and segment size
        header = update_timestamp(header, seg_size)

    # Last header is special b/c file size is probably not mult. of seg_size
    header = pmt.dict_delete(header, pmt.intern("bytes"))
    bytes_remaining = ft_to_sz[fmt] * (file_samp_len -
                                       num_segments * long(seg_size))
    bytes_val = pmt.from_uint64(bytes_remaining)
    header = pmt.dict_add(header, pmt.intern("bytes"), bytes_val)
    # Serialize and write out file
    if extras_present:
        header_str = pmt.serialize_str(header) + extras_str
    else:
        header_str = pmt.serialize_str(header)
    hdr_file.write(header_str)
    hdr_file.close()
예제 #31
0
def trigger_tag_create(state=True, sample_index=0):
    return pmt.make_tuple(pmt.from_bool(state), pmt.from_long(sample_index))
	def test_001_t (self):
		# create input data
		steps = 200
		vec_time = np.linspace(0,20,steps);
		vec_velocity = np.linspace(5,5,steps)
		vec_range = np.linspace(100,1,steps)
		vec_velocity_real = [0]*steps
		vec_range_real = [0]*steps
		for k in range(steps):
			vec_velocity_real[k] = vec_velocity[k]
			vec_range_real[k] = vec_range[k]
		
		# random data on trajectory
		mu = 0
		sigma_vel = 0.5
		sigma_rge = 7
		for k in range(len(vec_velocity)):
			vec_velocity[k] = vec_velocity[k] + random.gauss(mu,sigma_vel)
			vec_range[k] = vec_range[k] + random.gauss(mu,sigma_rge)
		
		# set up pmts with zero points
		target_pmts = [0]*len(vec_velocity)
		#zero_points = (5,12,17)
		zero_points = ()
		for k in range(len(vec_velocity)):
			pmt_time = pmt.list2(pmt.string_to_symbol("rx_time"),pmt.make_tuple(pmt.from_long(int(vec_time[k])),pmt.from_double(vec_time[k]-int(vec_time[k]))))
			if k in zero_points:
				vec = [0]
				pmt_velocity = pmt.list2(pmt.string_to_symbol("velocity"),pmt.init_f32vector(1,vec))
				pmt_range = pmt.list2(pmt.string_to_symbol("range"),pmt.init_f32vector(1,vec))
			else:
				pmt_velocity = pmt.list2(pmt.string_to_symbol("velocity"),pmt.init_f32vector(1,(vec_velocity[k],)))
				pmt_range = pmt.list2(pmt.string_to_symbol("range"),pmt.init_f32vector(1,(vec_range[k],)))
			target_pmts[k] = pmt.list3(pmt_time,pmt_velocity,pmt_range)
		
		# set up fg
		test_duration = 1000 # ms, do not change!
		
		num_particle = 300
		std_range_meas = sigma_rge
		std_velocity_meas = sigma_vel
		std_accel_sys = 0.1
		threshold_track = 0.001
		threshold_lost = 4
		tracking_filter = "particle"
		#tracking_filter = "kalman"
		
		# connect multiple strobes for different msgs
		src = [0]*len(target_pmts)
		for k in range(len(target_pmts)):
			src[k] = blocks.message_strobe(target_pmts[k], test_duration-400+400/len(target_pmts)*k)
		tracking = radar.tracking_singletarget(num_particle, std_range_meas, std_velocity_meas, std_accel_sys, threshold_track, threshold_lost, tracking_filter)
		snk = blocks.message_debug()
		
		for k in range(len(target_pmts)):
			self.tb.msg_connect(src[k],"strobe",tracking,"Msg in")
		self.tb.msg_connect(tracking,"Msg out",snk,"store")
		
		self.tb.start()
		sleep(test_duration/1000.0)
		self.tb.stop()
		self.tb.wait
		()
		# check data
#		show_data = False # Toggle visibility of single messages # broken
		msg_num = snk.num_messages()
		vec_out_range = []
		vec_out_velocity = []
		for k in range(msg_num):
			msg_part = snk.get_message(k)
			tme = pmt.nth(0,msg_part) # not used
			vel = pmt.nth(1,msg_part)
			rgn = pmt.nth(2,msg_part)
			vec_out_range.append(pmt.f32vector_elements(pmt.nth(1,rgn))[0])
			vec_out_velocity.append(pmt.f32vector_elements(pmt.nth(1,vel))[0])
#			if show_data:
#				print "msg:", k
#				print pmt.symbol_to_string(pmt.nth(0,vel)), pmt.f32vector_elements(pmt.nth(1,vel))[0]
#				print pmt.symbol_to_string(pmt.nth(0,rgn)), pmt.f32vector_elements(pmt.nth(1,rgn))[0]
#				print 
#		print "RANGE:"
#		print vec_out_range
#		print "VELOCITY:"
#		print vec_out_velocity
		
		# make plots
		show_plots = False # Toggle visibility of plots
		if show_plots:
			time = range(len(vec_range))
			time_out = range(len(vec_out_range))
			plt.figure(1)
			
			plt.subplot(211)
			marker = '-o'
			p1 = plt.plot(time,vec_velocity_real,marker,time,vec_velocity,marker,time_out,vec_out_velocity,marker)
			plt.legend(p1,["IN velocity real", "IN velocity", "OUT velocity"])
			plt.title("VELOCITY")
			plt.xlabel('time')
			
			plt.subplot(212)
			marker = '-o'
			p1 = plt.plot(time,vec_range_real,marker,time,vec_range,marker,time_out,vec_out_range,marker)
			plt.legend(p1,["IN range real","IN range","OUT range"])
			plt.title("RANGE")
			plt.xlabel('time')
			
			plt.show()
예제 #33
0
파일: fileman.py 프로젝트: garverp/recipes
def propagate_headers(options, args):
    infile = args[0]
    outfile = args[1]
    infile_hdr = infile + ".hdr"
    outfile_hdr = outfile + ".hdr"
    sample_cnt_end = 0
    sample_offset = long(options.start)
    # Open input header
    try:
        handle_in = open(infile_hdr, "rb")
    except IOError:
        sys.stderr.write("Unable to open input file header\n")
        sys.exit(1)
    # Open output header
    try:
        handle_out = open(outfile_hdr, "wb")
    except IOError:
        sys.stderr.write("Unable to open output file header\n")
        sys.exit(1)

    # Read first header separately to get file type
    hdr_in, hdr_extra_in, handle_in = read_single_header(handle_in)
    info_in = parse_file_metadata.parse_header(hdr_in, False)
    sample_cnt_end += info_in["nitems"]
    # Parse file type - ensure support for it
    shortname_intype = find_shortname(info_in["cplx"], info_in["type"], info_in["size"])
    if shortname_intype == SNAME_TO_ENUM["unknown"]:
        sys.stderr.write("Unsupported data type\n")
        sys.exit(1)
    if options.output_type == "unknown":
        shortname_outtype = shortname_intype
    else:
        shortname_outtype = SNAME_TO_ENUM[options.output_type]

    # Calc sample_len from file size if not specified
    if options.nsamples is not None:
        sample_len = long(options.nsamples)
    else:
        sample_len = os.path.getsize(infile) / SNAME_DEFS[shortname_intype][0]
    final_index = sample_offset + sample_len

    # Search input headers until we find the correct one
    while sample_cnt_end <= sample_offset:
        hdr_in, hdr_extra_in, handle_in = read_single_header(handle_in)
        info_in = parse_file_metadata.parse_header(hdr_in, False)
        sample_cnt_end += info_in["nitems"]
    time_in = info_in["rx_time"]
    # Starting sample of current segment
    sample_cnt_start = sample_cnt_end - info_in["nitems"]
    # Interpolate new timestamp
    delta = sample_offset - sample_cnt_start
    new_ts = time_in + delta / info_in["rx_rate"]
    # Calc new segment size (samples)
    if sample_cnt_end > final_index:
        first_seg_len = final_index - sample_offset
    else:
        first_seg_len = sample_cnt_end - sample_offset

    # Write the first output header
    hdr_out = hdr_in
    new_secs = long(new_ts)
    new_fracs = new_ts - new_secs
    time_val = pmt.make_tuple(pmt.from_uint64(new_secs), pmt.from_double(new_fracs))
    size_val = pmt.from_long(SNAME_DEFS[shortname_outtype][0])
    bytes_val = pmt.from_uint64(first_seg_len * SNAME_DEFS[shortname_outtype][0])
    type_val = pmt.from_long(SNAME_DEFS[shortname_outtype][2])
    hdr_out = pmt.dict_add(hdr_out, pmt.intern("rx_time"), time_val)
    hdr_out = pmt.dict_add(hdr_out, pmt.intern("bytes"), bytes_val)
    hdr_out = pmt.dict_add(hdr_out, pmt.intern("type"), type_val)
    hdr_out = pmt.dict_add(hdr_out, pmt.intern("size"), size_val)
    hdr_out_str = pmt.serialize_str(hdr_out) + pmt.serialize_str(hdr_extra_in)
    handle_out.write(hdr_out_str)

    # Continue reading headers, modifying, and writing
    last_seg_len = info_in["nitems"]
    print "sample_cnt_end=%d,final_index=%d" % (sample_cnt_end, final_index)
    # Iterate through remaining headers
    while sample_cnt_end < final_index:
        hdr_in, hdr_extra_in, handle_in = read_single_header(handle_in)
        info_in = parse_file_metadata.parse_header(hdr_in, False)
        nitems = info_in["nitems"]
        sample_cnt_start = sample_cnt_end
        sample_cnt_end += nitems
        hdr_out = hdr_in
        # For last header, adjust segment length accordingly
        if sample_cnt_end > final_index:
            last_seg_len = final_index - sample_cnt_start
        else:
            last_seg_len = nitems
        size_val = pmt.from_long(SNAME_DEFS[shortname_outtype][0])
        bytes_val = pmt.from_uint64(last_seg_len * SNAME_DEFS[shortname_outtype][0])
        type_val = pmt.from_long(SNAME_DEFS[shortname_outtype][2])
        hdr_out = pmt.dict_add(hdr_out, pmt.intern("bytes"), bytes_val)
        hdr_out = pmt.dict_add(hdr_out, pmt.intern("type"), type_val)
        hdr_out = pmt.dict_add(hdr_out, pmt.intern("size"), size_val)
        hdr_out_str = pmt.serialize_str(hdr_out) + pmt.serialize_str(hdr_extra_in)
        handle_out.write(hdr_out_str)

    if options.verbose:
        print "Input File:" + infile
        print "Input Header:" + infile_hdr
        print "Input Type:" + ENUM_TO_SNAME[shortname_intype]
        print "Output File:" + outfile
        print "Output File Length (Samples):%d" % (final_index - sample_offset)
        print "Output Header:" + outfile_hdr
        print "File subsection: [%d,%d]" % (sample_offset, final_index)
        print "Output Type:" + ENUM_TO_SNAME[shortname_outtype]
        print "First Segment Length: %e samples" % first_seg_len
        print "Last Segment Length: %e samples" % last_seg_len
        print "delta=%f,new ts=%f" % (delta, new_ts)

    # Clean up
    handle_in.close()
    handle_out.close()

    # Return header info
    return {
        "infile": infile,
        "intype": shortname_intype,
        "outfile": outfile,
        "outtype": shortname_outtype,
        "sample_offset": sample_offset,
        "sample_len": sample_len,
    }
예제 #34
0
 def __init__(
         self,
         n_bursts, n_channels,
         freq_delta, base_freq, dsp_tuning,
         burst_length, base_time, hop_time,
         post_tuning=False,
         tx_gain=0,
         verbose=False
     ):
     gr.hier_block2.__init__(
         self, "FrequencyHopperSrc",
         gr.io_signature(1, 1, gr.sizeof_gr_complex),
         gr.io_signature(1, 1, gr.sizeof_gr_complex),
     )
     n_samples_total = n_bursts * burst_length
     lowest_frequency = base_freq - numpy.floor(n_channels/2) * freq_delta
     self.hop_sequence = [lowest_frequency + n * freq_delta for n in range(n_channels)]
     numpy.random.shuffle(self.hop_sequence)
     # Repeat that:
     self.hop_sequence = [self.hop_sequence[x % n_channels] for x in range(n_bursts)]
     if verbose:
         print("Hop Frequencies  | Hop Pattern")
         print("=================|================================")
         for f in self.hop_sequence:
             print("{:6.3f} MHz      |  ".format(f/1e6), end='')
             if n_channels < 50:
                 print(" " * int((f - base_freq) / freq_delta) + "#")
             else:
                 print("\n")
         print("=================|================================")
     # There's no real point in setting the gain via tag for this application,
     # but this is an example to show you how to do it.
     gain_tag = gr.tag_t()
     gain_tag.offset = 0
     gain_tag.key = pmt.string_to_symbol('tx_command')
     gain_tag.value = pmt.to_pmt({'gain': tx_gain})
     tag_list = [gain_tag,]
     for i in range(len(self.hop_sequence)):
         time = pmt.cons(
             pmt.from_uint64(int(base_time + i * hop_time+0.01)),
             pmt.from_double((base_time + i * hop_time+0.01) % 1),
         )
         tune_tag = gr.tag_t()
         tune_tag.offset = i * burst_length
         # TODO dsp_tuning should also be able to do post_tuning
         if i > 0 and post_tuning and not dsp_tuning:
             tune_tag.offset -= 1 # Move it to last sample of previous burst
         if dsp_tuning:
             tune_tag.key = pmt.string_to_symbol('tx_command')
             tune_tag.value = pmt.to_pmt({'lo_freq': base_freq, 'dsp_freq': base_freq - self.hop_sequence[i]})
             tune_tag.value = pmt.dict_add(tune_tag.value, pmt.intern("time"),time)
         else:
             tune_tag.key = pmt.string_to_symbol('tx_command')
             tune_tag.value = pmt.to_pmt({'freq': self.hop_sequence[i]})
             tune_tag.value = pmt.dict_add(tune_tag.value, pmt.intern('time'), time)
         tag_list.append(tune_tag)
         length_tag = gr.tag_t()
         length_tag.offset = i * burst_length
         length_tag.key = pmt.string_to_symbol('packet_len')
         length_tag.value = pmt.from_long(burst_length)
         tag_list.append(length_tag)
         time_tag = gr.tag_t()
         time_tag.offset = i * burst_length
         time_tag.key = pmt.string_to_symbol('tx_time')
         time_tag.value = pmt.make_tuple(
                 pmt.car(time),
                 pmt.cdr(time)
         )
         tag_list.append(time_tag)
     tag_source = blocks.vector_source_c((1.0,) * n_samples_total, repeat=False, tags=tag_list)
     mult = blocks.multiply_cc()
     self.connect(self, mult, self)
     self.connect(tag_source, (mult, 1))
예제 #35
0
    def test_001_t(self):
        #  set up fg
        fft_len = 256
        cp_len = 32
        samp_rate = 32000
        data = np.random.choice([-1, 1], [100, fft_len])

        timefreq = np.fft.ifft(data, axis=0)

        #add cp
        timefreq = np.hstack((timefreq[:, -cp_len:], timefreq))

        # msg (only 4th and 5th tuples are needed)
        id1 = pmt.make_tuple(pmt.intern("Signal"), pmt.from_uint64(0))
        name = pmt.make_tuple(pmt.intern("OFDM"), pmt.from_float(1.0))
        id2 = pmt.make_tuple(pmt.intern("xxx"), pmt.from_float(0.0))
        id3 = pmt.make_tuple(pmt.intern("xxx"), pmt.from_float(0.0))
        id4 = pmt.make_tuple(pmt.intern("xxx"), pmt.from_float(256))
        id5 = pmt.make_tuple(pmt.intern("xxx"), pmt.from_float(32))
        msg = pmt.make_tuple(id1, name, id2, id3, id4, id5)

        tx = np.reshape(timefreq, (1, -1))

        # GR time!
        src = blocks.vector_source_c(tx[0].tolist(), True, 1, [])
        freq_offset = analog.sig_source_c(1, analog.GR_SIN_WAVE,
                                          50.0 / samp_rate, 1.0, 0.0)
        mixer = blocks.multiply_cc()
        sync = inspector.ofdm_synchronizer_cc(4096)
        dst = blocks.vector_sink_c()
        dst2 = blocks.vector_sink_c()
        msg_src = blocks.message_strobe(msg, 0)

        # connect
        self.tb.connect(src, (mixer, 0))
        self.tb.connect(freq_offset, (mixer, 1))
        self.tb.connect(mixer, sync)
        self.tb.msg_connect((msg_src, 'strobe'), (sync, 'ofdm_in'))
        self.tb.connect(sync, dst)
        self.tb.connect(src, dst2)

        self.tb.start()
        time.sleep(0.1)
        self.tb.stop()
        self.tb.wait()

        # check data
        output = dst.data()
        expect = dst2.data()

        # block outputs 0j until it has enough OFDM symbols to perform estimations
        k = next((k for k in range(len(output)) if output[k] != 0j))

        # use 10,000 samples for comparison since block fails sometimes
        # for one work function
        output = output[k:k + 10000]
        expect = expect[k:k + 10000]

        self.assertComplexTuplesAlmostEqual2(expect,
                                             output,
                                             abs_eps=0.001,
                                             rel_eps=10)
예제 #36
0
def channel_states_tag_create(phase_offset, freq_offset, channel_states):
    return pmt.make_tuple(
        pmt.from_double(phase_offset), pmt.from_double(freq_offset),
        pmt.init_c32vector(len(channel_states), channel_states))
예제 #37
0
    def work(self, input_items, output_items):

        #print "Block work function called: ", len(output_items[0])

        out = output_items[0]

        #print "Need to generate ", len(out), " items"


        i=0
        while i<len(out):
            line = self.f.readline()
            line.rstrip()
            #print "Read the following line: ", line
            ss = re.split(" ", line)

            out[i] = i
            #print "Starting loop iteration ",i,"/",len(out)
            if(ss[0]=="stream"):
                real = ss[1]
                imag = ss[2].replace('\n', '')
                #print "complex number(",real,",",imag,")"
                #out[i] = complex(float(real), float(imag))
                out[i] = numpy.complex(float(real), float(imag))
                out[i] = numpy.complex64(out[i])
                #print out[i]
                #out[i] = numpy.complex64(float(real) + float(imag)*j)
                #print "generated item ", i, " of ", len(out)
                i=i+1

            elif(ss[0] == "tag"):
                #offset,source,key,value1,value2 = ss[1:]
                offset = ss[1]
                source = ss[2]
                key = ss[3]
                value1 = ss[4]
                #print "tag information: ",offset, source, key, value


                #we have a tuple here (this is specific to the toolbox 
                #it looks like this now
                #ss[4]:  {0
                #ss[5]:  0.131072}
                if(value1[0] == '{'):
                    value2 = ss[5]

                    value1 = long(value1[1:])
                    value2 = float(value2[:-2])

                    #print "ss[4]: ", ss[4]
                    #print "ss[5]: ", ss[5]
                    
                    #print "value1: ", value1
                    #print "value2: ", value2
                    _value = pmt.make_tuple(
                            pmt.from_long(value1),
                            pmt.from_float(value2)
                            )
                else:
                    value1 = value1[:-1]
                    counter=0
                    #for i in value1:
                        #print counter, ": ", i
                        #counter=counter+1
                    #print "value1: ", value1
                    value1 = long(value1)
                    #print "value1: ", value1
                    _value = pmt.from_long(value1)


                
                #add tag to stream
                self.add_item_tag(0,                 # output
                        int(offset),                 # offset
                        pmt.string_to_symbol(key),   # key
                        _value,   # value
                        pmt.string_to_symbol(source) # source
                        )

            #break

            ##out[:] = 1+2j



        #print "returning a length of ",len(output_items[0])
        return len(output_items[0])
예제 #38
0
def propagate_headers(options,args):
    infile = args[0]
    outfile = args[1]
    infile_hdr = infile + '.hdr'
    outfile_hdr = outfile + '.hdr'
    sample_cnt_end = 0
    sample_offset = long(options.start)
        # Open input header
    try:
        handle_in = open(infile_hdr, "rb")
    except IOError:
        sys.stderr.write("Unable to open input file header\n")
        sys.exit(1)
    # Open output header
    try:
        handle_out = open(outfile_hdr, "wb")
    except IOError:
        sys.stderr.write("Unable to open output file header\n")
        sys.exit(1)

    # Read first header separately to get file type
    hdr_in, hdr_extra_in, handle_in = read_single_header(handle_in)
    info_in = parse_file_metadata.parse_header(hdr_in,False)
    sample_cnt_end += info_in["nitems"]
    # Parse file type - ensure support for it
    shortname_intype = find_shortname(info_in['cplx'], info_in['type'],
                info_in['size'])
    if shortname_intype == SNAME_TO_ENUM["unknown"]:
        sys.stderr.write("Unsupported data type\n")
        sys.exit(1)
    if options.output_type == 'unknown':
        shortname_outtype = shortname_intype
    else:
        shortname_outtype = SNAME_TO_ENUM[options.output_type]

    # Calc sample_len from file size if not specified
    if options.nsamples is not None:
        sample_len = long(options.nsamples)
	final_index = sample_offset + sample_len
    else:
        sample_len = os.path.getsize(infile)/SNAME_DEFS[shortname_intype][0]
	final_index = sample_len

    # Search input headers until we find the correct one
    while sample_cnt_end <= sample_offset:
        hdr_in, hdr_extra_in, handle_in = read_single_header(handle_in)
        info_in = parse_file_metadata.parse_header(hdr_in,False)
        sample_cnt_end += info_in["nitems"]
    time_in = info_in["rx_time"]
    # Starting sample of current segment
    sample_cnt_start = sample_cnt_end - info_in["nitems"]
    # Interpolate new timestamp
    delta = sample_offset - sample_cnt_start
    new_ts = time_in + delta/info_in["rx_rate"]
    # Calc new segment size (samples)
    if sample_cnt_end > final_index:
        first_seg_len = final_index - sample_offset
    else:
        first_seg_len = sample_cnt_end - sample_offset
    
    # Write the first output header
    hdr_out = hdr_in
    new_secs = long(new_ts)
    new_fracs = new_ts - new_secs
    time_val = pmt.make_tuple(pmt.from_uint64(new_secs),
            pmt.from_double(new_fracs))
    size_val = pmt.from_long(SNAME_DEFS[shortname_outtype][0])
    bytes_val = pmt.from_uint64(first_seg_len*SNAME_DEFS[shortname_outtype][0])
    type_val = pmt.from_long(SNAME_DEFS[shortname_outtype][2])
    hdr_out = pmt.dict_add(hdr_out, pmt.intern("rx_time"), time_val)
    hdr_out = pmt.dict_add(hdr_out, pmt.intern("bytes"), bytes_val)
    hdr_out = pmt.dict_add(hdr_out, pmt.intern("type"), type_val)
    hdr_out = pmt.dict_add(hdr_out, pmt.intern("size"), size_val)
    hdr_out_str = pmt.serialize_str(hdr_out) + pmt.serialize_str(hdr_extra_in)    
    handle_out.write(hdr_out_str)

    # Continue reading headers, modifying, and writing 
    last_seg_len = info_in['nitems']
    print "sample_cnt_end=%d,final_index=%d" % (sample_cnt_end,final_index)
    # Iterate through remaining headers
    while sample_cnt_end < final_index:
        hdr_in, hdr_extra_in, handle_in = read_single_header(handle_in)
        info_in = parse_file_metadata.parse_header(hdr_in,False)
        nitems = info_in["nitems"]
        sample_cnt_start = sample_cnt_end
        sample_cnt_end += nitems
        hdr_out = hdr_in
        # For last header, adjust segment length accordingly
        if sample_cnt_end > final_index:
            last_seg_len = final_index - sample_cnt_start
        else:
            last_seg_len = nitems
        size_val = pmt.from_long(SNAME_DEFS[shortname_outtype][0])
        bytes_val = pmt.from_uint64(last_seg_len*SNAME_DEFS[shortname_outtype][0])
        type_val = pmt.from_long(SNAME_DEFS[shortname_outtype][2])
        hdr_out = pmt.dict_add(hdr_out, pmt.intern("bytes"), bytes_val)
        hdr_out = pmt.dict_add(hdr_out, pmt.intern("type"), type_val)
        hdr_out = pmt.dict_add(hdr_out, pmt.intern("size"), size_val)
        hdr_out_str = pmt.serialize_str(hdr_out) + pmt.serialize_str(hdr_extra_in)
        handle_out.write(hdr_out_str)
        
    if options.verbose:
        print 'Input File:' + infile
        print 'Input Header:' + infile_hdr
        print 'Input Type:' + ENUM_TO_SNAME[shortname_intype]
        print 'Output File:' + outfile
        print 'Output File Length (Samples):%d' % (final_index-sample_offset)
        print 'Output Header:' + outfile_hdr
        print 'File subsection: [%d,%d]' % (sample_offset,final_index)
        print 'Output Type:' + ENUM_TO_SNAME[shortname_outtype]
        print 'First Segment Length: %e samples' % first_seg_len
        print 'Last Segment Length: %e samples' % last_seg_len
        print 'delta=%f,new ts=%f' % (delta,new_ts)

    # Clean up
    handle_in.close()
    handle_out.close()

    # Return header info
    return {'infile':infile,'intype':shortname_intype,'outfile':outfile,
            'outtype':shortname_outtype,'sample_offset':sample_offset,
            'sample_len':sample_len}
예제 #39
0
 def __init__(self,
              n_bursts,
              n_channels,
              freq_delta,
              base_freq,
              dsp_tuning,
              burst_length,
              base_time,
              hop_time,
              post_tuning=False,
              tx_gain=0,
              verbose=False):
     gr.hier_block2.__init__(
         self,
         "FrequencyHopperSrc",
         gr.io_signature(1, 1, gr.sizeof_gr_complex),
         gr.io_signature(1, 1, gr.sizeof_gr_complex),
     )
     n_samples_total = n_bursts * burst_length
     lowest_frequency = base_freq - numpy.floor(n_channels / 2) * freq_delta
     self.hop_sequence = [
         lowest_frequency + n * freq_delta for n in range(n_channels)
     ]
     numpy.random.shuffle(self.hop_sequence)
     # Repeat that:
     self.hop_sequence = [
         self.hop_sequence[x % n_channels] for x in range(n_bursts)
     ]
     if verbose:
         print("Hop Frequencies  | Hop Pattern")
         print("=================|================================")
         for f in self.hop_sequence:
             print("{:6.3f} MHz      |  ".format(f / 1e6), end='')
             if n_channels < 50:
                 print(" " * int((f - base_freq) / freq_delta) + "#")
             else:
                 print("\n")
         print("=================|================================")
     # There's no real point in setting the gain via tag for this application,
     # but this is an example to show you how to do it.
     gain_tag = gr.tag_t()
     gain_tag.offset = 0
     gain_tag.key = pmt.string_to_symbol('tx_command')
     gain_tag.value = pmt.to_pmt({'gain': tx_gain})
     tag_list = [
         gain_tag,
     ]
     for i in range(len(self.hop_sequence)):
         time = pmt.cons(
             pmt.from_uint64(int(base_time + i * hop_time + 0.01)),
             pmt.from_double((base_time + i * hop_time + 0.01) % 1),
         )
         tune_tag = gr.tag_t()
         tune_tag.offset = i * burst_length
         # TODO dsp_tuning should also be able to do post_tuning
         if i > 0 and post_tuning and not dsp_tuning:
             tune_tag.offset -= 1  # Move it to last sample of previous burst
         if dsp_tuning:
             tune_tag.key = pmt.string_to_symbol('tx_command')
             tune_tag.value = pmt.to_pmt({
                 'lo_freq':
                 base_freq,
                 'dsp_freq':
                 base_freq - self.hop_sequence[i]
             })
             tune_tag.value = pmt.dict_add(tune_tag.value,
                                           pmt.intern("time"), time)
         else:
             tune_tag.key = pmt.string_to_symbol('tx_command')
             tune_tag.value = pmt.to_pmt({'freq': self.hop_sequence[i]})
             tune_tag.value = pmt.dict_add(tune_tag.value,
                                           pmt.intern('time'), time)
         tag_list.append(tune_tag)
         length_tag = gr.tag_t()
         length_tag.offset = i * burst_length
         length_tag.key = pmt.string_to_symbol('packet_len')
         length_tag.value = pmt.from_long(burst_length)
         tag_list.append(length_tag)
         time_tag = gr.tag_t()
         time_tag.offset = i * burst_length
         time_tag.key = pmt.string_to_symbol('tx_time')
         time_tag.value = pmt.make_tuple(pmt.car(time), pmt.cdr(time))
         tag_list.append(time_tag)
     tag_source = blocks.vector_source_c((1.0, ) * n_samples_total,
                                         repeat=False,
                                         tags=tag_list)
     mult = blocks.multiply_cc()
     self.connect(self, mult, self)
     self.connect(tag_source, (mult, 1))
예제 #40
0
def make_time_pair(t):
    return pmt.make_tuple(pmt.to_pmt(int(np.trunc(t))),
                          pmt.to_pmt(t - int(np.trunc(t))))