コード例 #1
0
ファイル: fileman.py プロジェクト: starsway/gr-analysis
def time_to_sample(options,args):
    #Open header file
    infile = args[0]
    infile_hdr = infile + '.hdr'
    handle_in = open(infile_hdr, "rb")
    hdr_in, hdr_extra_in, handle_in = read_single_header(handle_in)
    info_in = parse_file_metadata.parse_header(hdr_in,False)
    time_in = info_in["rx_time"]
    sample_start = 0

    # Timestamps always occur on first sample. If start time is less than
    # current sample, we can not time align and exit
    if options.start < time_in:
        print 'Requested timestamp: %f (s) < first Timestamp: %f (s),quitting' % (options.start,time_in)
        sys.exit(-1)

    #Find data chunk where time is in
    while options.start > time_in:
	hdr_in, hdr_extra_in, handle_in = read_single_header(handle_in)
        info_in = parse_file_metadata.parse_header(hdr_in,False)
        time_in2 = info_in["rx_time"]
	if options.start < time_in2:
	   sample_start = sample_start + (options.start - time_in)*info_in["rx_rate"]
           break
        time_in = time_in2
        sample_start = sample_start + info_in["nitems"]
    #Set start point and sample step to samples
    options.start = sample_start
    options.nsamples = options.nsamples * info_in["rx_rate"]
コード例 #2
0
    def create_package(self):
        if (int(math.floor(self.new_timestamp)) -
                int(math.ceil(self.info['rx_time'])) == 0):
            self.package_name = self.package_prefix + "_{0}.raw".format(
                self.new_timestamp)
            self.package_fullpath = self.package_path + self.package_name
            self.package = open(self.package_fullpath, "wb")
            for i in range(0, self.package_amount):
                # read out next header bytes
                self.hdr_start = self.handle.tell()
                self.header_str = self.handle.read(
                    parse_file_metadata.HEADER_LENGTH)
                if (len(self.header_str) == 0):
                    break
                # Convert from string to PMT (should be a dictionary)
                try:
                    print "\033[1;0H"
                    self.header = pmt.deserialize_str(self.header_str)
                except Exception as e:
                    print ERROR, e
                else:
                    self.package.write(self.header_str)
                    print SUCCESS, "Write base header."
                # Get base header info
                self.info = parse_file_metadata.parse_header(self.header, True)
                # Get extra header length
                if (self.info["extra_len"] > 0):
                    self.extra_str = self.handle.read(self.info["extra_len"])
                    if (len(self.extra_str) == 0):
                        break
                # Read extra header
                try:
                    self.extra = pmt.deserialize_str(self.extra_str)
                except Exception as e:
                    print ERROR, e
                else:
                    self.package.write(self.extra_str)
                    print SUCCESS, "Write base header."
                    print "\nExtra Header:"
                    extra_info = parse_file_metadata.parse_extra_dict(
                        self.extra, self.info, True)
                    self.data_str = self.handle.read(self.info['nbytes'])
                    self.package.write(self.data_str)

                # move pointer
                self.nread += parse_file_metadata.HEADER_LENGTH + self.info[
                    "extra_len"] + self.info['nbytes']
                self.handle.seek(self.nread, 0)
                print "\n\n"
            self.package.close()
            print "\033[21;0H%15s %15s %15s" % (
                "|   Timestamp|", "|Upload_Bytes|", "|   All_Bytes|")
            self.uploadtask = S3_uploadtask(self.config, self.uploadtaskID,
                                            self.new_timestamp,
                                            self.package_name,
                                            self.package_fullpath)
            self.uploadtask.setDaemon(True)
            self.uploadtask.start()
            self.uploadtaskID += 1
            self.old_timestamp = self.new_timestamp
コード例 #3
0
def read_meta_info(file):
    with open(file, 'rb') as f:
        header = pmt.deserialize_str(f.read(parse_file_metadata.HEADER_LENGTH))
        info = parse_file_metadata.parse_header(header, False)
    info['st_size'] = file.stat().st_size
    info['band'] = 435 if '435' in str(file) else 436
    return info
コード例 #4
0
def read_gnuradio_header_element(file_handle):
    """
    Read a header element/header structure from the current position of
    a GNU Radio header file. The header file contains multiple header
    elements, one for each issued stream tag.

    Parameters
    ----------
    file_handle:
        File handle for the header file, as obtained using open().
    Returns
    -------
    info: dict
        Header structure.
    header_length: int
        Length of the header element in bytes.
    """
    header_str = file_handle.read(parse_file_metadata.HEADER_LENGTH)
    if len(header_str) == 0:
        return None, 0

    header = pmt.deserialize_str(header_str)
    info = parse_file_metadata.parse_header(header, False)

    #get extra information
    if info["extra_len"] > 0:
        extra_str = file_handle.read(info["extra_len"])
        extra = pmt.deserialize_str(extra_str)
        extra_info = parse_file_metadata.parse_extra_dict(extra, info, False)

    return info, parse_file_metadata.HEADER_LENGTH + info["extra_len"]
コード例 #5
0
    def parseHeaders(self):

        # read out header bytes into a string
        header_str = self.inFile.read(parse_file_metadata.HEADER_LENGTH)

        # Convert from created string to PMT dict
        try:
            header = pmt.deserialize_str(header_str)
        except RuntimeError:
            logging.info(
                f"[Process: {self.radioNum}] Could not deserialize header\n")
            self.stop()

        # Convert from PMT dict to Python dict
        info = parse_file_metadata.parse_header(header)

        if (info["extra_len"] > 0):
            extra_str = self.inFile.read(info["extra_len"])

        # Extra header info
        try:
            extra = pmt.deserialize_str(extra_str)
        except RuntimeError:
            logging.info(
                f"[Process: {self.radioNum}] Could not deserialize extra headers\n"
            )
            self.stop()

        info = parse_file_metadata.parse_extra_dict(extra, info)

        return info
コード例 #6
0
ファイル: fileman.py プロジェクト: garverp/recipes
def read_single_header(handle):
    nread = handle.tell()
    header_str = handle.read(parse_file_metadata.HEADER_LENGTH)
    if len(header_str) == 0:
        sys.stderr.write("Empty Header, quitting.\n")
        sys.exit(1)
    # Convert from string to PMT (should be a dictionary)
    try:
        header = pmt.deserialize_str(header_str)
    except RuntimeError:
        sys.stderr.write(
            "Could not deserialize header: invalid or \
                corrupt data file.\n"
        )
        sys.exit(1)
    info = parse_file_metadata.parse_header(header, False)
    extras_str = handle.read(info["extra_len"])
    try:
        extras_hdr = pmt.deserialize_str(extras_str)
    except RuntimeError:
        sys.stderr.write("Could not deserialize extras\n")
        sys.exit(1)
    nread += parse_file_metadata.HEADER_LENGTH + info["extra_len"]
    handle.seek(nread, 0)
    return header, extras_hdr, handle
コード例 #7
0
	def find_package(self):
		while ((int(math.floor(self.new_timestamp)) - int(math.ceil(self.info['rx_time']))) > 0):
			# read out next header bytes
			self.hdr_start = self.handle.tell()
			self.header_str = self.handle.read(parse_file_metadata.HEADER_LENGTH)
			if(len(self.header_str) == 0):
				break
			# Convert from string to PMT (should be a dictionary)
			try:
				print "\033[1;0H"
				self.header = pmt.deserialize_str(self.header_str)
			except Exception as e:
				print ERROR,e
			else:
				print SUCCESS,"Found base header."
			# Get base header info
			self.info = parse_file_metadata.parse_header(self.header, True)
			# Get extra header length
			if(self.info["extra_len"] > 0):
				self.extra_str = self.handle.read(self.info["extra_len"])
				if(len(self.extra_str) == 0):
					break
			# Read extra header
			try:
				self.extra = pmt.deserialize_str(self.extra_str)
			except Exception as e:
				print ERROR,e
			else:
				print SUCCESS,"Found extra header."
				print "\nExtra Header:"
				self.extra_info = parse_file_metadata.parse_extra_dict(self.extra, self.info, True)
			# move pointer
			self.nread += parse_file_metadata.HEADER_LENGTH + self.info["extra_len"] + self.info['nbytes']
			self.handle.seek(self.nread, 0)
			print "\n\n"
コード例 #8
0
def main():
    #init
    nread = 0
    old_timestamp = 0
    new_timestamp = 0
    uploadtaskID = 0

    os.system("clear")
    #open the file
    handle = open(rawfile_path, "rb")
    handle.seek(nread, 0)
    hdr_start = handle.tell()

    #read the base header
    header_str = handle.read(parse_file_metadata.HEADER_LENGTH)
    header = pmt.deserialize_str(header_str)
    init_info = parse_file_metadata.parse_header(header, True)
    handle.close()

    package_length = parse_file_metadata.HEADER_LENGTH + init_info[
        "extra_len"] + init_info['nbytes']

    while (True):
        timeapi = urllib2.urlopen(timeapi_url)
        new_timestamp = int(timeapi.read())
        if (old_timestamp != new_timestamp):

            if (int(math.floor(new_timestamp)) >= int(
                    math.ceil(init_info['rx_time']))):

                delta_time = int(math.floor(new_timestamp)) - int(
                    math.ceil(init_info['rx_time']))
                if (((delta_time + package_amount) * package_length) <=
                        os.path.getsize(rawfile_path)):

                    nread = delta_time * package_length
                    handle = open(rawfile_path, "rb")
                    handle.seek(nread, 0)
                    hdr_start = handle.tell()
                    package_str = handle.read(package_amount * package_length)
                    handle.close()

                    #create package file
                    package_name = package_prefix + "_{0}.raw".format(
                        new_timestamp)
                    package_fullpath = package_path + package_name
                    package = open(package_fullpath, "wb")
                    package.write(package_str)
                    package.close()

                    print "\033[13;0H%15s %15s %15s" % (
                        "|   Timestamp|", "|Upload_Bytes|", "|   All_Bytes|")

                    uploadtask = S3_uploadtask(uploadtaskID, new_timestamp,
                                               package_name, package_fullpath)
                    uploadtask.setDaemon(True)
                    uploadtask.start()
                    uploadtaskID += 1
                    old_timestamp = new_timestamp
        time.sleep(5)
コード例 #9
0
def main(filename):

    #init
    nheaders = 0
    nread = 0

    while (True):

        #open the file
        handle = open(filename, "rb")
        handle.seek(nread, 0)
        hdr_start = handle.tell()

        #read the base header
        header_str = handle.read(parse_file_metadata.HEADER_LENGTH)
        header = pmt.deserialize_str(header_str)
        print "Package {0} Header".format(nheaders)
        info = parse_file_metadata.parse_header(header, True)

        #read the extra header
        if (info["extra_len"] > 0):
            extra_str = handle.read(info["extra_len"])
            if (len(extra_str) == 0):
                break
            extra = pmt.deserialize_str(extra_str)
            extra_info = parse_file_metadata.parse_extra_dict(
                extra, info, True)

        #read the data
        data_str = handle.read(info['nbytes'])

        #prepare for next package
        if (info['nbytes'] != 0):
            nheaders += 1
            nread += parse_file_metadata.HEADER_LENGTH + info[
                "extra_len"] + info['nbytes']
        handle.close()

        #create package file
        package_name = "Package_{0}.raw".format(int(math.ceil(
            info['rx_time'])))
        package_path = "./Data/package/" + package_name
        package = open(package_path, "wb")
        package.write(header_str)
        package.write(extra_str)
        package.write(data_str)
        package.close()

        while (True):
            size = os.path.getsize(filename)
            if (parse_file_metadata.HEADER_LENGTH + info["extra_len"] +
                    data_length < size - nread):
                break
            time.sleep(0.5)
        os.system("clear")
コード例 #10
0
def read_meta_info(file):
    with open(file, 'rb') as f:
        header = pmt.deserialize_str(f.read(parse_file_metadata.HEADER_LENGTH))
        info = parse_file_metadata.parse_header(header, False)
    info['st_size'] = file.stat().st_size
    bandsname = {435 : ['_435_', '_435.4MHz_', '_435MHz'],
                 436 : ['_436_', '_436.4MHz_', '_436MHz']}
    isband = {k : np.any([vv in str(file) for vv in v]) for k, v in bandsname.items()}
    if not np.any(list(isband.values())):
        raise ValueError(f'Could not deduce band from filename: {str(file)}')
    info['band'] = [k for k, v in isband.items() if v][0]
    return info
コード例 #11
0
    def __init__(self):
        gr.top_block.__init__(self, "Meta To Data")
        ##################################################
        # Variables
        ##################################################
        meta_file = "./Data/meta.raw"
        data_file = "./Data/data.raw"
        header_file = "./Data/header.txt"

        ##################################################
        # Blocks
        ##################################################
        self.blocks_file_sink_0 = blocks.file_sink(gr.sizeof_gr_complex * 1,
                                                   data_file, False)
        self.blocks_file_sink_0.set_unbuffered(False)
        self.blocks_file_meta_source_0 = blocks.file_meta_source(
            meta_file, True, False, '')

        ##################################################
        # Connections
        ##################################################
        self.connect((self.blocks_file_meta_source_0, 0),
                     (self.blocks_file_sink_0, 0))

        ##################################################
        # Get header info
        ##################################################
        print "##################################################"
        print "#                 Header Info                    #"
        print "##################################################"

        handle = open(meta_file, "rb")
        hdr_start = handle.tell()
        header_str = handle.read(parse_file_metadata.HEADER_LENGTH)
        header = pmt.deserialize_str(header_str)
        info = parse_file_metadata.parse_header(header, True)
        if (info["extra_len"] > 0):
            extra_str = handle.read(info["extra_len"])
            extra = pmt.deserialize_str(extra_str)
        extra_info = parse_file_metadata.parse_extra_dict(extra, info, True)
        handle.close()

        header = open(header_file, "wb")
        rx_rate = extra_info['rx_rate']
        rx_freq = pmt.to_python(extra_info['rx_freq'])
        rx_time = extra_info['rx_time']
        header.write("%.00f\n" % rx_rate)
        header.write("%.00f\n" % rx_freq)
        header.write("%.22f\n" % rx_time)
        header.close()

        print
コード例 #12
0
ファイル: fileman.py プロジェクト: garverp/recipes
def time_to_sample(options, args):
    # Open header file
    infile = args[0]
    infile_hdr = infile + ".hdr"
    handle_in = open(infile_hdr, "rb")
    hdr_in, hdr_extra_in, handle_in = read_single_header(handle_in)
    info_in = parse_file_metadata.parse_header(hdr_in, False)
    time_in = info_in["rx_time"]
    sample_start = 0
    # Find data chunk where time is in
    while options.start > time_in:
        hdr_in, hdr_extra_in, handle_in = read_single_header(handle_in)
        info_in = parse_file_metadata.parse_header(hdr_in, False)
        time_in2 = info_in["rx_time"]
        if options.start < time_in2:
            sample_start = sample_start + (options.start - time_in) * info_in["rx_rate"]
            break
        time_in = time_in2
        sample_start = sample_start + info_in["nitems"]
        # Set start point and sample step to samples
    options.start = sample_start
    options.nsamples = options.nsamples * info_in["rx_rate"]
コード例 #13
0
	def get_length(self):
		self.handle = open(self.rawfile_path, "rb")
		self.handle.seek(self.nread, 0)
		self.hdr_start = self.handle.tell()
		#read the base header
		self.header_str = self.handle.read(parse_file_metadata.HEADER_LENGTH)
		print "\033[1;0H"
		self.header = pmt.deserialize_str(self.header_str)
		self.init_info = parse_file_metadata.parse_header(self.header, True)
		self.handle.seek(0, 0)
		#get package_length
		self.package_length = parse_file_metadata.HEADER_LENGTH + self.init_info["extra_len"] + self.init_info['nbytes']
		self.info = self.init_info
コード例 #14
0
ファイル: fileman.py プロジェクト: goldcat/gr-analysis
def time_to_sample(options, args):
    #Open header file
    infile = args[0]
    infile_hdr = infile + '.hdr'
    handle_in = open(infile_hdr, "rb")
    hdr_in, hdr_extra_in, handle_in = read_single_header(handle_in)
    info_in = parse_file_metadata.parse_header(hdr_in, False)
    time_in = info_in["rx_time"]
    sample_start = 0
    #Find data chunk where time is in
    while options.start > time_in:
        hdr_in, hdr_extra_in, handle_in = read_single_header(handle_in)
        info_in = parse_file_metadata.parse_header(hdr_in, False)
        time_in2 = info_in["rx_time"]
        if options.start < time_in2:
            sample_start = sample_start + (options.start -
                                           time_in) * info_in["rx_rate"]
            break
        time_in = time_in2
        sample_start = sample_start + info_in["nitems"]
    #Set start point and sample step to samples
    options.start = sample_start
    options.nsamples = options.nsamples * info_in["rx_rate"]
コード例 #15
0
ファイル: p.py プロジェクト: SpinStabilized/stem_station
def parse_gnuradio_header(header_file, verbose=False):
    headers = []
    index = 0
    rx_time = datetime.timedelta(seconds=0)
    with open(header_file, "rb") as handle:
        file_length = os.path.getsize(header_file)
        while True:
            if file_length - handle.tell() < parse_file_metadata.HEADER_LENGTH:
                break

            header_str = handle.read(parse_file_metadata.HEADER_LENGTH)

            try:
                header = pmt.deserialize_str(header_str)
            except RuntimeError:
                break

            info = parse_file_metadata.parse_header(header, verbose)

            if info["nbytes"] == 0:
                break

            if info["extra_len"] > 0:
                extra_str = handle.read(info["extra_len"])
                if len(extra_str) == 0:
                    break

                try:
                    extra = pmt.deserialize_str(extra_str)
                except RuntimeError:
                    break

                parse_file_metadata.parse_extra_dict(extra, info, verbose)

            if len(headers) > 0:
                last_rx_time = headers[-1]["rx_time"]
                samples_delta = headers[-1]["nitems"] / headers[-1]["rx_rate"]
                samples_delta = datetime.timedelta(seconds=samples_delta)
                info["rx_time"] = last_rx_time + samples_delta

                info["index"] = index
                index = index + info["nitems"]
            else:
                info["rx_time"] = datetime.timedelta(seconds=0.0)
                info["index"] = 0
                index = info["nitems"]

            headers.append(info)

    return headers
コード例 #16
0
ファイル: p.py プロジェクト: SpinStabilized/stem_station
def parse_gnuradio_header(header_file, verbose=False):
    headers = []
    index = 0
    rx_time = datetime.timedelta(seconds=0)
    with open(header_file, 'rb') as handle:
        file_length = os.path.getsize(header_file)
        while True:
            if file_length - handle.tell() < parse_file_metadata.HEADER_LENGTH:
                break

            header_str = handle.read(parse_file_metadata.HEADER_LENGTH)

            try:
                header = pmt.deserialize_str(header_str)
            except RuntimeError:
                break

            info = parse_file_metadata.parse_header(header, verbose)

            if info['nbytes'] == 0:
                break

            if (info['extra_len'] > 0):
                extra_str = handle.read(info['extra_len'])
                if (len(extra_str) == 0):
                    break

                try:
                    extra = pmt.deserialize_str(extra_str)
                except RuntimeError:
                    break

                parse_file_metadata.parse_extra_dict(extra, info, verbose)

            if len(headers) > 0:
                last_rx_time = headers[-1]['rx_time']
                samples_delta = headers[-1]['nitems'] / headers[-1]['rx_rate']
                samples_delta = datetime.timedelta(seconds=samples_delta)
                info['rx_time'] = last_rx_time + samples_delta

                info['index'] = index
                index = index + info['nitems']
            else:
                info['rx_time'] = datetime.timedelta(seconds=0.0)
                info['index'] = 0
                index = info['nitems']

            headers.append(info)

    return headers
コード例 #17
0
ファイル: data.py プロジェクト: acrerd/pulsar_telescope
    def parse_metadata(self, filepath):
        head_file = filepath+".hdr"
        hlen = parse_file_metadata.HEADER_LENGTH

        headers = []
        extra_tags = []

        overs = []

        if not os.path.isfile(head_file):
            return {'total power':{}}, []

        with open(head_file,'rb') as fd:
            for h_str in iter(partial(fd.read, hlen), ''):
                h_pmt = pmt.deserialize_str(h_str)
                h_parsed=parse_file_metadata.parse_header(h_pmt,False)
                headers.append(h_parsed)
                if(h_parsed["extra_len"] > 0):
            
                    extra_str = fd.read(h_parsed["extra_len"])
                    if(len(extra_str) == 0):
                        break
                    extra = pmt.deserialize_str(extra_str)
                    e_parsed = parse_file_metadata.parse_extra_dict(extra, h_parsed, False)
                    extra_tags.append(e_parsed)

        # Load the extra data into the tagging system for the LightCurve.
        tags = pd.DataFrame({'total power':[{} for _ in xrange(self.data_len)]})
        nums_done = 0 
        segment_start_time = headers[0]['rx_time']
        segments = 1
        for i  in xrange(len(extra_tags)):
            j = int(nums_done + extra_tags[i]['nitems'])
            if not extra_tags[i]['rx_time'] == segment_start_time:

                should = segment_start_time + j/extra_tags[i]['rx_rate']

                miss_sec = extra_tags[i]['rx_time']-should

                overs.append({'new_seg':j, 'new_time':extra_tags[i]['rx_time']})

                segment_start_time = extra_tags[i]['rx_time']
                segments += 1
            j = int(nums_done + extra_tags[i]['nitems'])
            tags['total power'][j] = extra_tags[i]
            nums_done += extra_tags[i]['nitems']
        new = self.import_tags(extra_tags, 'total power')
            
        return {'total power': headers[0]}, overs
コード例 #18
0
def main(filename, detached=False):
    handle = open(filename, "rb")

    nheaders = 0
    nread = 0
    while (True):
        # read out next header bytes
        hdr_start = handle.tell()
        header_str = handle.read(parse_file_metadata.HEADER_LENGTH)
        if (len(header_str) == 0):
            break

        # Convert from string to PMT (should be a dictionary)
        try:
            header = pmt.deserialize_str(header_str)
        except RuntimeError:
            sys.stderr.write(
                "Could not deserialize header: invalid or corrupt data file.\n"
            )
            sys.exit(1)

        print "HEADER {0}".format(nheaders)
        info = parse_file_metadata.parse_header(header, True)
        print "%.22f" % (info["rx_time"])

        if (info["extra_len"] > 0):
            extra_str = handle.read(info["extra_len"])
            if (len(extra_str) == 0):
                break

            try:
                extra = pmt.deserialize_str(extra_str)
            except RuntimeError:
                sys.stderr.write(
                    "Could not deserialize extras: invalid or corrupt data file.\n"
                )
                sys.exit(1)

            print "\nExtra Header:"
            extra_info = parse_file_metadata.parse_extra_dict(
                extra, info, True)

        nheaders += 1
        nread += parse_file_metadata.HEADER_LENGTH + info["extra_len"]
        if (not detached):
            nread += info['nbytes']
        handle.seek(nread, 0)
        print "\n\n"
コード例 #19
0
ファイル: fileman.py プロジェクト: goldcat/gr-analysis
def truncate_file(options, args):
    #Check if input is in time format
    if options.timeMode:
        time_to_sample(options, args)
    #Check if need to chunk whole file
    if options.repeat_end:
        infile = args[0]
        infile_hdr = infile + '.hdr'
        handle_in = open(infile_hdr, "rb")
        hdr_in, hdr_extra_in, handle_in = read_single_header(handle_in)
        info_in = parse_file_metadata.parse_header(hdr_in, False)
        shortname_intype = find_shortname(info_in['cplx'], info_in['type'],
                                          info_in['size'])
        file_length = os.path.getsize(infile) / SNAME_DEFS[shortname_intype][0]
        num_files = math.ceil(file_length / options.nsamples)
        num_nums = len(str(num_files)) - 3
        stop_point = options.start + options.nsamples
        count = 0
        leading_zeros_count = 10
        fileName = args[1].split('.')
        while stop_point < file_length:
            if count >= leading_zeros_count:
                leading_zeros_count = leading_zeros_count * 10
                num_nums = num_nums - 1
            leading_zeros = ''
            for i in range(num_nums):
                leading_zeros = leading_zeros + '0'
            args[1] = fileName[0] + '_' + leading_zeros + str(
                count) + '.' + fileName[1]
            the_config = propagate_headers(options, args)
            tb = buildblock(the_config)
            tb.run()
            count = count + 1
            options.start = stop_point + 1
            stop_point = stop_point + options.nsamples
        options.nsamples = file_length - options.start
        args[1] = fileName[0] + '_' + str(count) + '.' + fileName[1]
    #Propagate and update header. Return flowgraph config.
    the_config = propagate_headers(options, args)
    #Build the flowgraph
    tb = buildblock(the_config)
    #Execute
    tb.run()
    #Delete original file if option is selected
    if options.deleteOriginal:
        os.remove(infile)
        os.remove(infile_hdr)
コード例 #20
0
ファイル: fileman.py プロジェクト: garverp/gr-analysis
def truncate_file(options,args):
    #Check if input is in time format
    if options.timeMode:
        time_to_sample(options,args)
    #Check if need to chunk whole file
    if options.repeat_end:
	infile = args[0]
	infile_hdr = infile + '.hdr'
	handle_in = open(infile_hdr, "rb")
	hdr_in, hdr_extra_in, handle_in = read_single_header(handle_in)
	info_in = parse_file_metadata.parse_header(hdr_in,False)
	shortname_intype = find_shortname(info_in['cplx'], info_in['type'],
                info_in['size'])
	file_length = os.path.getsize(infile)/SNAME_DEFS[shortname_intype][0]
        num_files = math.ceil(file_length / options.nsamples)
        num_nums  = len(str(num_files)) - 3
        stop_point = options.start + options.nsamples
        count = 0
	leading_zeros_count = 10
	fileName = args[1].split('.')
	while stop_point < file_length:
	    if count >= leading_zeros_count:
		leading_zeros_count = leading_zeros_count * 10
		num_nums = num_nums - 1
	    leading_zeros = ''
            for i in range(num_nums):
	    	leading_zeros = leading_zeros + '0'
	    args[1] = fileName[0] + '_' + leading_zeros + str(count) + '.' + fileName[1]
	    the_config = propagate_headers(options,args)
	    tb = buildblock(the_config)
	    tb.run()
	    count = count + 1
	    options.start = stop_point + 1
	    stop_point = stop_point + options.nsamples
	options.nsamples = file_length - options.start
	args[1] = fileName[0] + '_' + str(count) + '.' + fileName[1]
    #Propagate and update header. Return flowgraph config.
    the_config = propagate_headers(options,args)
    #Build the flowgraph
    tb = buildblock(the_config)
    #Execute
    tb.run()
    #Delete original file if option is selected
    if options.deleteOriginal:
	os.remove(infile)
	os.remove(infile_hdr)
コード例 #21
0
ファイル: fileman.py プロジェクト: starsway/gr-analysis
def read_single_header(handle):
    nread = handle.tell()
    header_str = handle.read(parse_file_metadata.HEADER_LENGTH)
    if len(header_str) == 0:
        sys.stderr.write("Empty Header, quitting.\n")
        sys.exit(1)
    # Convert from string to PMT (should be a dictionary)
    try:
        header = pmt.deserialize_str(header_str)
    except RuntimeError:
        sys.stderr.write("Could not deserialize header: invalid or \
                corrupt data file.\n")
        sys.exit(1)
    info = parse_file_metadata.parse_header(header, False)
    extras_str = handle.read(info["extra_len"])
    try:
        extras_hdr = pmt.deserialize_str(extras_str)
    except RuntimeError:
        sys.stderr.write("Could not deserialize extras\n")
        sys.exit(1)
    nread += parse_file_metadata.HEADER_LENGTH + info["extra_len"]
    handle.seek(nread, 0)
    return header, extras_hdr, handle
fileToWrite = open("signal.txt", 'w')
fileToRead = open("meta_signal.bin", "rb")

posInFile = 0
while(True):
    header_str = fileToRead.read(parse_file_metadata.HEADER_LENGTH)
    if(len(header_str) == 0):
        break
    try:
        header = pmt.deserialize_str(header_str)
    except RuntimeError:
        sys.stderr.write("Could not deserialize header: invalid or corrupt data file.\n")
        sys.exit(1)

    info = parse_file_metadata.parse_header(header, False)
    
    if(info["extra_len"] > 0):
        extra_str = fileToRead.read(info["extra_len"])
        if(len(extra_str) == 0):
            break
        
        try:
            extra = pmt.deserialize_str(extra_str)
        except RuntimeError:
            sys.stderr.write("Could not deserialize extras: invalid or corrupt data file.\n")
            sys.exit(1)

    posInFile += parse_file_metadata.HEADER_LENGTH + info["extra_len"]
    fileToWrite.write(time.asctime(time.localtime(floatTime+info["rx_time"])) + '\n')
    for x in range(0, info["nitems"]):
コード例 #23
0
ファイル: fileman.py プロジェクト: starsway/gr-analysis
def propagate_headers(options,args):
    infile = args[0]
    outfile = args[1]
    infile_hdr = infile + '.hdr'
    outfile_hdr = outfile + '.hdr'
    sample_cnt_end = 0
    sample_offset = long(options.start)
        # Open input header
    try:
        handle_in = open(infile_hdr, "rb")
    except IOError:
        sys.stderr.write("Unable to open input file header\n")
        sys.exit(1)
    # Open output header
    try:
        handle_out = open(outfile_hdr, "wb")
    except IOError:
        sys.stderr.write("Unable to open output file header\n")
        sys.exit(1)

    # Read first header separately to get file type
    hdr_in, hdr_extra_in, handle_in = read_single_header(handle_in)
    info_in = parse_file_metadata.parse_header(hdr_in,False)
    sample_cnt_end += info_in["nitems"]
    # Parse file type - ensure support for it
    shortname_intype = find_shortname(info_in['cplx'], info_in['type'],
                info_in['size'])
    if shortname_intype == SNAME_TO_ENUM["unknown"]:
        sys.stderr.write("Unsupported data type\n")
        sys.exit(1)
    if options.output_type == 'unknown':
        shortname_outtype = shortname_intype
    else:
        shortname_outtype = SNAME_TO_ENUM[options.output_type]

    # Calc sample_len from file size if not specified
    if options.nsamples is not None:
        sample_len = long(options.nsamples)
	final_index = sample_offset + sample_len
    else:
        sample_len = os.path.getsize(infile)/SNAME_DEFS[shortname_intype][0]
	final_index = sample_len

    # Search input headers until we find the correct one
    while sample_cnt_end <= sample_offset:
        hdr_in, hdr_extra_in, handle_in = read_single_header(handle_in)
        info_in = parse_file_metadata.parse_header(hdr_in,False)
        sample_cnt_end += info_in["nitems"]
    time_in = info_in["rx_time"]
    # Starting sample of current segment
    sample_cnt_start = sample_cnt_end - info_in["nitems"]
    # Interpolate new timestamp
    delta = sample_offset - sample_cnt_start
    new_ts = time_in + delta/info_in["rx_rate"]
    # Calc new segment size (samples)
    if sample_cnt_end > final_index:
        first_seg_len = final_index - sample_offset
    else:
        first_seg_len = sample_cnt_end - sample_offset
    
    # Write the first output header
    hdr_out = hdr_in
    new_secs = long(new_ts)
    new_fracs = new_ts - new_secs
    time_val = pmt.make_tuple(pmt.from_uint64(new_secs),
            pmt.from_double(new_fracs))
    size_val = pmt.from_long(SNAME_DEFS[shortname_outtype][0])
    bytes_val = pmt.from_uint64(first_seg_len*SNAME_DEFS[shortname_outtype][0])
    type_val = pmt.from_long(SNAME_DEFS[shortname_outtype][2])
    hdr_out = pmt.dict_add(hdr_out, pmt.intern("rx_time"), time_val)
    hdr_out = pmt.dict_add(hdr_out, pmt.intern("bytes"), bytes_val)
    hdr_out = pmt.dict_add(hdr_out, pmt.intern("type"), type_val)
    hdr_out = pmt.dict_add(hdr_out, pmt.intern("size"), size_val)
    hdr_out_str = pmt.serialize_str(hdr_out) + pmt.serialize_str(hdr_extra_in)    
    handle_out.write(hdr_out_str)

    # Continue reading headers, modifying, and writing 
    last_seg_len = info_in['nitems']
    print "sample_cnt_end=%d,final_index=%d" % (sample_cnt_end,final_index)
    # Iterate through remaining headers
    while sample_cnt_end < final_index:
        hdr_in, hdr_extra_in, handle_in = read_single_header(handle_in)
        info_in = parse_file_metadata.parse_header(hdr_in,False)
        nitems = info_in["nitems"]
        sample_cnt_start = sample_cnt_end
        sample_cnt_end += nitems
        hdr_out = hdr_in
        # For last header, adjust segment length accordingly
        if sample_cnt_end > final_index:
            last_seg_len = final_index - sample_cnt_start
        else:
            last_seg_len = nitems
        size_val = pmt.from_long(SNAME_DEFS[shortname_outtype][0])
        bytes_val = pmt.from_uint64(last_seg_len*SNAME_DEFS[shortname_outtype][0])
        type_val = pmt.from_long(SNAME_DEFS[shortname_outtype][2])
        hdr_out = pmt.dict_add(hdr_out, pmt.intern("bytes"), bytes_val)
        hdr_out = pmt.dict_add(hdr_out, pmt.intern("type"), type_val)
        hdr_out = pmt.dict_add(hdr_out, pmt.intern("size"), size_val)
        hdr_out_str = pmt.serialize_str(hdr_out) + pmt.serialize_str(hdr_extra_in)
        handle_out.write(hdr_out_str)
        
    if options.verbose:
        print 'Input File:' + infile
        print 'Input Header:' + infile_hdr
        print 'Input Type:' + ENUM_TO_SNAME[shortname_intype]
        print 'Output File:' + outfile
        print 'Output File Length (Samples):%d' % (final_index-sample_offset)
        print 'Output Header:' + outfile_hdr
        print 'File subsection: [%d,%d]' % (sample_offset,final_index)
        print 'Output Type:' + ENUM_TO_SNAME[shortname_outtype]
        print 'First Segment Length: %e samples' % first_seg_len
        print 'Last Segment Length: %e samples' % last_seg_len
        print 'delta=%f,new ts=%f' % (delta,new_ts)

    # Clean up
    handle_in.close()
    handle_out.close()

    # Return header info
    return {'infile':infile,'intype':shortname_intype,'outfile':outfile,
            'outtype':shortname_outtype,'sample_offset':sample_offset,
            'sample_len':sample_len}
コード例 #24
0
ファイル: fileman.py プロジェクト: garverp/recipes
def propagate_headers(options, args):
    infile = args[0]
    outfile = args[1]
    infile_hdr = infile + ".hdr"
    outfile_hdr = outfile + ".hdr"
    sample_cnt_end = 0
    sample_offset = long(options.start)
    # Open input header
    try:
        handle_in = open(infile_hdr, "rb")
    except IOError:
        sys.stderr.write("Unable to open input file header\n")
        sys.exit(1)
    # Open output header
    try:
        handle_out = open(outfile_hdr, "wb")
    except IOError:
        sys.stderr.write("Unable to open output file header\n")
        sys.exit(1)

    # Read first header separately to get file type
    hdr_in, hdr_extra_in, handle_in = read_single_header(handle_in)
    info_in = parse_file_metadata.parse_header(hdr_in, False)
    sample_cnt_end += info_in["nitems"]
    # Parse file type - ensure support for it
    shortname_intype = find_shortname(info_in["cplx"], info_in["type"], info_in["size"])
    if shortname_intype == SNAME_TO_ENUM["unknown"]:
        sys.stderr.write("Unsupported data type\n")
        sys.exit(1)
    if options.output_type == "unknown":
        shortname_outtype = shortname_intype
    else:
        shortname_outtype = SNAME_TO_ENUM[options.output_type]

    # Calc sample_len from file size if not specified
    if options.nsamples is not None:
        sample_len = long(options.nsamples)
    else:
        sample_len = os.path.getsize(infile) / SNAME_DEFS[shortname_intype][0]
    final_index = sample_offset + sample_len

    # Search input headers until we find the correct one
    while sample_cnt_end <= sample_offset:
        hdr_in, hdr_extra_in, handle_in = read_single_header(handle_in)
        info_in = parse_file_metadata.parse_header(hdr_in, False)
        sample_cnt_end += info_in["nitems"]
    time_in = info_in["rx_time"]
    # Starting sample of current segment
    sample_cnt_start = sample_cnt_end - info_in["nitems"]
    # Interpolate new timestamp
    delta = sample_offset - sample_cnt_start
    new_ts = time_in + delta / info_in["rx_rate"]
    # Calc new segment size (samples)
    if sample_cnt_end > final_index:
        first_seg_len = final_index - sample_offset
    else:
        first_seg_len = sample_cnt_end - sample_offset

    # Write the first output header
    hdr_out = hdr_in
    new_secs = long(new_ts)
    new_fracs = new_ts - new_secs
    time_val = pmt.make_tuple(pmt.from_uint64(new_secs), pmt.from_double(new_fracs))
    size_val = pmt.from_long(SNAME_DEFS[shortname_outtype][0])
    bytes_val = pmt.from_uint64(first_seg_len * SNAME_DEFS[shortname_outtype][0])
    type_val = pmt.from_long(SNAME_DEFS[shortname_outtype][2])
    hdr_out = pmt.dict_add(hdr_out, pmt.intern("rx_time"), time_val)
    hdr_out = pmt.dict_add(hdr_out, pmt.intern("bytes"), bytes_val)
    hdr_out = pmt.dict_add(hdr_out, pmt.intern("type"), type_val)
    hdr_out = pmt.dict_add(hdr_out, pmt.intern("size"), size_val)
    hdr_out_str = pmt.serialize_str(hdr_out) + pmt.serialize_str(hdr_extra_in)
    handle_out.write(hdr_out_str)

    # Continue reading headers, modifying, and writing
    last_seg_len = info_in["nitems"]
    print "sample_cnt_end=%d,final_index=%d" % (sample_cnt_end, final_index)
    # Iterate through remaining headers
    while sample_cnt_end < final_index:
        hdr_in, hdr_extra_in, handle_in = read_single_header(handle_in)
        info_in = parse_file_metadata.parse_header(hdr_in, False)
        nitems = info_in["nitems"]
        sample_cnt_start = sample_cnt_end
        sample_cnt_end += nitems
        hdr_out = hdr_in
        # For last header, adjust segment length accordingly
        if sample_cnt_end > final_index:
            last_seg_len = final_index - sample_cnt_start
        else:
            last_seg_len = nitems
        size_val = pmt.from_long(SNAME_DEFS[shortname_outtype][0])
        bytes_val = pmt.from_uint64(last_seg_len * SNAME_DEFS[shortname_outtype][0])
        type_val = pmt.from_long(SNAME_DEFS[shortname_outtype][2])
        hdr_out = pmt.dict_add(hdr_out, pmt.intern("bytes"), bytes_val)
        hdr_out = pmt.dict_add(hdr_out, pmt.intern("type"), type_val)
        hdr_out = pmt.dict_add(hdr_out, pmt.intern("size"), size_val)
        hdr_out_str = pmt.serialize_str(hdr_out) + pmt.serialize_str(hdr_extra_in)
        handle_out.write(hdr_out_str)

    if options.verbose:
        print "Input File:" + infile
        print "Input Header:" + infile_hdr
        print "Input Type:" + ENUM_TO_SNAME[shortname_intype]
        print "Output File:" + outfile
        print "Output File Length (Samples):%d" % (final_index - sample_offset)
        print "Output Header:" + outfile_hdr
        print "File subsection: [%d,%d]" % (sample_offset, final_index)
        print "Output Type:" + ENUM_TO_SNAME[shortname_outtype]
        print "First Segment Length: %e samples" % first_seg_len
        print "Last Segment Length: %e samples" % last_seg_len
        print "delta=%f,new ts=%f" % (delta, new_ts)

    # Clean up
    handle_in.close()
    handle_out.close()

    # Return header info
    return {
        "infile": infile,
        "intype": shortname_intype,
        "outfile": outfile,
        "outtype": shortname_outtype,
        "sample_offset": sample_offset,
        "sample_len": sample_len,
    }
コード例 #25
0
def gr2fits(filename, merge=True, verbose=False):

    try:
        handle = open(filename, 'rb')
    except:
        raise IOError('File %s does not exist' % filename)

    nheaders = 0
    nread = 0
    baseband = []

    fitsout = fits.HDUList()

    while (True):
        """
        /opt/local/bin/gr_read_file_metadata
        note that there can be > 1 metadata blocks
        I think they can come every 1e6 items
        """
        # read out next header bytes
        hdr_start = handle.tell()
        header_str = handle.read(parse_file_metadata.HEADER_LENGTH)
        if (len(header_str) == 0):
            break

        # Convert from string to PMT (should be a dictionary)
        try:
            header = pmt.deserialize_str(header_str)
        except RuntimeError:
            raise IOError(
                "Could not deserialize header: invalid or corrupt data file.\n"
            )

        if verbose:
            print("HEADER {0}".format(nheaders))
        info = parse_file_metadata.parse_header(header, verbose)
        if (info["extra_len"] > 0):
            extra_str = handle.read(info["extra_len"])
            if (len(extra_str) == 0):
                break

            try:
                extra = pmt.deserialize_str(extra_str)
            except RuntimeError:
                sys.stderr.write(
                    "Could not deserialize extras: invalid or corrupt data file.\n"
                )
                break

            if verbose:
                print("\nExtra Header:")
            extra_info = parse_file_metadata.parse_extra_dict(
                extra, info, verbose)

        nheaders += 1
        nread += parse_file_metadata.HEADER_LENGTH + info["extra_len"]
        handle.seek(nread, 0)
        h = extra_info
        if h['size'] == 8 and h['cplx']:
            dtype = scipy.complex64

        d = scipy.fromfile(handle, dtype=dtype, count=h['nitems'])
        t0 = np.arange(2 * len(d)) / h['rx_rate'] / 2
        t = np.arange(len(d)) / h['rx_rate']

        nread += info['nbytes']
        handle.seek(nread, 0)

        fitsout.append(fits.ImageHDU(data=np.c_[d.real, d.imag]))
        fitsout[-1].header['NITEMS'] = (h['nitems'],
                                        'Number of complex samples')
        fitsout[-1].header['RATE'] = (h['rx_rate'], '[Hz] sample rate')
        fitsout[-1].header['RX_FREQ'] = (pmt.to_float(h['rx_freq']) / 1e6,
                                         '[MHz] Radio frequency')
        fitsout[-1].header['RX_TIME'] = (h['rx_time'],
                                         '[s] Time of start of block')

    if merge:
        totallen = 0
        for i in xrange(0, len(fitsout)):
            totallen += fitsout[i].header['NAXIS2']
        d = np.zeros((totallen, 2), dtype=fitsout[1].data.dtype)
        nmax = 0
        for i in xrange(0, len(fitsout)):
            d[nmax:nmax + fitsout[i].header['NAXIS2']] = fitsout[i].data
            nmax += fitsout[i].header['NAXIS2']
        newfitsout = fits.HDUList()
        newfitsout.append(fits.PrimaryHDU(data=d))
        newfitsout[0].header = fitsout[1].header
        newfitsout[0].header['NITEMS'] = totallen
        newfitsout[0].header['EXPTIME'] = (d.shape[0] /
                                           newfitsout[0].header['RATE'],
                                           '[s] Duration of file')
        fitsout = newfitsout

    fitsout.verify('silentfix')
    if os.path.exists(filename + '.fits'):
        os.remove(filename + '.fits')
    fitsout.writeto(filename + '.fits')
    print('Wrote %s.fits' % filename)
    return fitsout
fileToWrite = open("signal.txt", 'w')
fileToRead = open("meta_signal.bin", "rb")

posInFile = 0
while (True):
    header_str = fileToRead.read(parse_file_metadata.HEADER_LENGTH)
    if (len(header_str) == 0):
        break
    try:
        header = pmt.deserialize_str(header_str)
    except RuntimeError:
        sys.stderr.write(
            "Could not deserialize header: invalid or corrupt data file.\n")
        sys.exit(1)

    info = parse_file_metadata.parse_header(header, False)

    if (info["extra_len"] > 0):
        extra_str = fileToRead.read(info["extra_len"])
        if (len(extra_str) == 0):
            break

        try:
            extra = pmt.deserialize_str(extra_str)
        except RuntimeError:
            sys.stderr.write(
                "Could not deserialize extras: invalid or corrupt data file.\n"
            )
            sys.exit(1)

    posInFile += parse_file_metadata.HEADER_LENGTH + info["extra_len"]