def get_fil_dict(input_file):
    filterbank_info = parseheader.parseSigprocHeader(input_file)
    filterbank_stats = parseheader.updateHeader(filterbank_info)

    return filterbank_stats
コード例 #2
0
                    #info["processing_id"] = trapum.create_processing(pipeline_id,submit_time,"enqueued")

                    # Publish to Rabbit
                    #log.debug("Ready to publish to RabbitMQ")
                    #pika_process.publish_info(opts,info)
                    sys.exit(0)

                files_per_beam = sorted(
                    glob.glob(new_path + '/' + beam_name + '/*.fil'))

                if len(files_per_beam) == 1:
                    log.info(
                        "No need to merge since one file recorded per beam")
                    sys.exit(0)

                file_info1 = parseheader.parseSigprocHeader(files_per_beam[0])
                file_info = parseheader.updateHeader(file_info1)

                if float(opts.length) / float(file_info['tobs']) < 2.0:
                    log.info(
                        "No need to merge since required length is smaller than two files"
                    )
                    continue
                    #sys.exit(0)
                elif float(
                        opts.length) > len(files_per_beam) * file_info['tobs']:
                    #log.info("Length asked for exceeds total observation length!!")
                    continue
                    #sys.exit(0)
                else:
                    no_of_files_per_merge = int(