def test_particle_queueio(tmpdir, batch_size, n_reads=10):

    queueio_name = "queueio_{}".format(uuid.uuid4())


    # First, create a file to write to:
    file_name = str(tmpdir + "/test_queueio_particles_{}.h5".format(queueio_name))

    # Next, write some particles to that file:
    create_particle_file(file_name, rand_num_events=25)

    # Now, let's get the configuration of a queueio object:

    default_config = larcv.QueueProcessor.default_config()
    default_config["InputFiles"].append(file_name)

    # Add the batch filler to the default config:
    default_config["Verbosity"] = 0
    default_config["ProcessDriver"]["ProcessName"].append(f"test_{queueio_name}")
    default_config["ProcessDriver"]["ProcessType"].append("BatchFillerParticle")

    
    if default_config["ProcessDriver"]["ProcessList"] is None:
      default_config["ProcessDriver"]["ProcessList"] = {
        f"test_{queueio_name}": 
          {
            "ParticleProducer": "test",
          }
        }
    else:
      default_config["ProcessDriver"]["ProcessList"].append(
        {f"test_{queueio_name}": 
          {
            "ParticleProducer": "test",
          }
        })


    queue_proc = larcv.QueueProcessor()
    queue_proc.configure(default_config)



    indexes = numpy.arange(batch_size*n_reads*2) % queue_proc.get_n_entries()

    queue_proc.set_next_batch(indexes[0:batch_size])
    queue_proc.prepare_next()
    while queue_proc.is_reading():
      print("Sleeping")
      time.sleep(0.1)
    queue_proc.pop_current_data()

    for i in range(n_reads):
      batch=indexes[i*batch_size:(i+1)*batch_size]
      queue_proc.set_next_batch(batch)
      queue_proc.prepare_next()
      
      while queue_proc.is_reading():
        print("Sleeping")
        time.sleep(0.1)
Esempio n. 2
0
def test_sparsetensor2d_queueio(tmpdir, batch_size, n_projections, n_reads=10):

    queueio_name = "queueio_{}".format(uuid.uuid4())

    # First, create a file to write to:
    file_name = str(tmpdir +
                    "/test_queueio_sparsetensor2d_{}.h5".format(queueio_name))

    # Next, write some sparsetensor2ds to that file:
    create_sparsetensor2d_file(file_name,
                               rand_num_events=25,
                               n_projections=n_projections)

    # Now, let's get the configuration of a queueio object:
    default_config = larcv.QueueProcessor.default_config()
    default_config["InputFiles"].append(file_name)

    # Add the batch filler to the default config:
    default_config["Verbosity"] = 0
    default_config["ProcessDriver"]["ProcessName"].append(
        f"test_{queueio_name}")
    default_config["ProcessDriver"]["ProcessType"].append(
        "BatchFillerSparseTensor2D")

    process_list = {
        f"test_{queueio_name}": {
            "TensorProducer": "test",
            "MaxVoxels": 100,
            "UnfilledVoxelValue": -999,
            "Channels": list(range(n_projections)),
        }
    }

    if default_config["ProcessDriver"]["ProcessList"] is None:
        default_config["ProcessDriver"]["ProcessList"] = process_list
    else:
        default_config["ProcessDriver"]["ProcessList"].append(process_list)

    queue_proc = larcv.QueueProcessor()
    queue_proc.configure(default_config)

    indexes = numpy.arange(
        batch_size * n_reads * 2) % queue_proc.get_n_entries()

    queue_proc.set_next_batch(indexes[0:batch_size])
    queue_proc.prepare_next()
    while queue_proc.is_reading():
        print("Sleeping")
        time.sleep(0.1)
    queue_proc.pop_current_data()

    for i in range(n_reads):
        batch = indexes[i * batch_size:(i + 1) * batch_size]
        queue_proc.set_next_batch(batch)
        queue_proc.prepare_next()

        while queue_proc.is_reading():
            print("Sleeping")
            time.sleep(0.1)
Esempio n. 3
0
    def configure(self,cfg, color=0):
        # if "this" was configured before, reset it
        if self._name: self.reset()

        # get name
        if not cfg['filler_name']:
            sys.stderr.write('filler_name is empty!\n')
            raise ValueError

        # ensure unique name
        if self.__class__.exist(cfg['filler_name']) and not self.__class__.instance_by_name(cfg['filler_name']) == self:
            sys.stderr.write('filler_name %s already running!' % cfg['filler_name'])
            return
        self._name = cfg['filler_name']

        # get QueueProcessor config file
        self._cfg_file = cfg['filler_cfg']
        # if not self._cfg_file or not os.path.isfile(self._cfg_file):
        #     sys.stderr.write('filler_cfg file does not exist: %s\n' % self._cfg_file)
        #     raise ValueError

        # set verbosity
        if 'verbosity' in cfg:
            self._verbose = bool(cfg['verbosity'])

        # configure thread processor
        self._proc = larcv.QueueProcessor(self._name)

        self._proc.configure(self._cfg_file, color)

        # fetch batch filler info
        self._storage = {}
        # Making a map between process names and data outputs:
        for pid, datatype in zip(self._proc.batch_fillers(), self._proc.batch_types()):
            # What's the name from the config?
            name = self._proc.storage_name(pid)
            datatype = larcv.BatchDataTypeName(datatype)
            self._storage[name]=batch_pydata(datatype)
            if 'make_copy' in cfg and cfg['make_copy']:
                self._storage[name]._make_copy = True


        # all success?
        # register *this* instance
        self.__class__._instance_m[self._name] = self
Esempio n. 4
0
def test_tensor3d_queueio(tmpdir, make_copy, batch_size, from_dense, n_reads=2):


    queueio_name = "queueio_{}".format(uuid.uuid4())


    # First, create a file to write to:
    file_name = str(tmpdir + "/test_queueio_tensor3d_{}.h5".format(queueio_name))

    # Next, write some tensor3ds to that file:
    if from_dense:
      create_dense_tensor3d_file(file_name, rand_num_events=25)
    else:
      create_tensor3d_file(file_name, rand_num_events=25)


    if from_dense:
        tensor_type = "dense"
    else:
        tensor_type = "sparse"

    # Now, let's get the configuration of a queueio object:
    default_config = larcv.QueueProcessor.default_config()
    default_config["InputFiles"].append(file_name)

    # Add the batch filler to the default config:
    default_config["Verbosity"] = 0
    default_config["ProcessDriver"]["ProcessName"].append(f"test_{queueio_name}")
    default_config["ProcessDriver"]["ProcessType"].append("BatchFillerTensor3D")

    process_list = {f"test_{queueio_name}": 
    {
      "TensorProducer": "test",
      "TensorType": tensor_type,
      "Channels": [0,],
      "EmptyVoxelValue": -999.,
      "Augment": True,
      }
    }

    if default_config["ProcessDriver"]["ProcessList"] is None:
      default_config["ProcessDriver"]["ProcessList"] = process_list
    else:
      default_config["ProcessDriver"]["ProcessList"].append(process_list)
 

    queue_proc = larcv.QueueProcessor()
    queue_proc.configure(default_config)


    indexes = numpy.arange(batch_size*n_reads*2) % queue_proc.get_n_entries()

    queue_proc.set_next_batch(indexes[0:batch_size])
    print(queue_proc.is_reading())
    queue_proc.prepare_next()
    time.sleep(2)
    while queue_proc.is_reading():
      print("Sleeping")
      time.sleep(0.5)
    queue_proc.pop_current_data()

    for i in range(n_reads):
      batch=indexes[i*batch_size:(i+1)*batch_size]
      queue_proc.set_next_batch(batch)
      queue_proc.prepare_next()
      
      while queue_proc.is_reading():
        print("Sleeping")
        time.sleep(0.1)