コード例 #1
0
ファイル: data.py プロジェクト: tesatory/fastnet
  def get_next_batch(self, batch_size):
    if self._reader is None:
      self._start_read()

    if self._gpu_batch is None:
      self._fill_reserved_data()

    height, width = self._gpu_batch.data.shape
    gpu_data = self._gpu_batch.data
    gpu_labels = self._gpu_batch.labels

    if self.index + batch_size >=  width:
      width = width - self.index
      labels = gpu_labels[self.index:self.index + batch_size]

      #data = gpu_data[:, self.index:self.index + batch_size]
      data = gpuarray.zeros((height, width), dtype = np.float32)
      gpu_partial_copy_to(gpu_data, data, 0, height, self.index, self.index + width)

      self.index = 0
      self._fill_reserved_data()
    else:
      labels = gpu_labels[self.index:self.index + batch_size]
      #data = gpu_data[:, self.index:self.index + batch_size]
      data = gpuarray.zeros((height, batch_size), dtype = np.float32)
      gpu_partial_copy_to(gpu_data, data, 0, height, self.index, self.index + batch_size)
      #labels = gpu_labels[self.index:self.index + batch_size]
      self.index += batch_size
    return BatchData(data, labels, self._gpu_batch.epoch)
コード例 #2
0
  def get_next_batch(self, batch_size):
    if self._reader is None:
      self._start_read()

    if self._gpu_batch is None:
      self._fill_reserved_data()

    height, width = self._gpu_batch.data.shape
    gpu_data = self._gpu_batch.data
    gpu_labels = self._gpu_batch.labels

    if self.index + batch_size >=  width:
      width = width - self.index
      labels = gpu_labels[self.index:self.index + batch_size]

      #data = gpu_data[:, self.index:self.index + batch_size]
      data = gpuarray.zeros((height, width), dtype = np.float32)
      gpu_partial_copy_to(gpu_data, data, 0, height, self.index, self.index + width)

      self.index = 0
      self._fill_reserved_data()
    else:
      labels = gpu_labels[self.index:self.index + batch_size]
      #data = gpu_data[:, self.index:self.index + batch_size]
      data = gpuarray.zeros((height, batch_size), dtype = np.float32)
      gpu_partial_copy_to(gpu_data, data, 0, height, self.index, self.index + batch_size)
      #labels = gpu_labels[self.index:self.index + batch_size]
      self.index += batch_size
    return BatchData(data, labels, self._gpu_batch.epoch, self._gpu_batch.batch_index)
コード例 #3
0
        def get_next_batch(self, batch_size):
            if self._reader is None:
                self._start_read()

            if self._gpu_batch is None:
                self._fill_reserved_data()

            if not self.multiview:
                height, width = self._gpu_batch.data.shape
                gpu_data = self._gpu_batch.data
                gpu_labels = self._gpu_batch.labels
                epoch = self._gpu_batch.epoch

                if self.index + batch_size >= width:
                    width = width - self.index
                    labels = gpu_labels[self.index:self.index + batch_size]

                    data = gpuarray.zeros((height, width), dtype=np.float32)
                    gpu_partial_copy_to(gpu_data, data, 0, height, self.index,
                                        self.index + width)
                    self.index = 0
                    self._fill_reserved_data()
                else:
                    labels = gpu_labels[self.index:self.index + batch_size]
                    data = gpuarray.zeros((height, batch_size),
                                          dtype=np.float32)
                    gpu_partial_copy_to(gpu_data, data, 0, height, self.index,
                                        self.index + batch_size)
                    self.index += batch_size
            else:
                # multiview provider
                # number of views should be 10
                # when using multiview, do not pre-move data and labels to gpu
                height, width = self._cpu_batch.data.shape
                cpu_data = self._cpu_batch.data
                cpu_labels = self._cpu_batch.labels
                epoch = self._cpu_batch.epoch

                width /= self.num_view

                if self.index + batch_size >= width:
                    batch_size = width - self.index

                labels = cpu_labels[self.index:self.index + batch_size]
                data = np.zeros((height, batch_size * self.num_view),
                                dtype=np.float32)
                for i in range(self.num_view):
                    data[:, i * batch_size:(i + 1) *
                         batch_size] = cpu_data[:, self.index +
                                                width * i:self.index +
                                                width * i + batch_size]

                data = copy_to_gpu(np.require(data, requirements='C'))
                labels = copy_to_gpu(np.require(labels, requirements='C'))

                self.index = (self.index + batch_size) / width

            #util.log_info('Batch: %s %s %s', data.shape, gpu_labels.shape, labels.shape)
            return BatchData(data, labels, epoch)
コード例 #4
0
ファイル: layer.py プロジェクト: alemagnani/fastnet
 def logreg_cost_multiview(self, label, output, num_view):
   unit = self.batch_size / num_view
   if self.cost.shape[0] != unit:
     self.cost = gpuarray.zeros((unit, 1), dtype = np.float32)
   maxid = gpuarray.zeros((self.batch_size, 1), dtype = np.float32)
   find_col_max_id(maxid, output)
   self.batchCorrect = same_reduce_multiview(label, maxid, num_view)
   tmp = gpuarray.zeros((output.shape[0], unit), dtype = np.float32)
   gpu_partial_copy_to(output, tmp, 0, output.shape[0], 0, unit)
   logreg_cost_col_reduce(tmp, label, self.cost)
コード例 #5
0
ファイル: layer.py プロジェクト: sportsbitenews/fastnet
 def logreg_cost_multiview(self, label, output, num_view):
   unit = self.batch_size / num_view
   if self.cost.shape[0] != unit:
     self.cost = gpuarray.zeros((unit, 1), dtype = np.float32)
   maxid = gpuarray.zeros((self.batch_size, 1), dtype = np.float32)
   find_col_max_id(maxid, output)
   self.batchCorrect = same_reduce_multiview(label, maxid, num_view)
   tmp = gpuarray.zeros((output.shape[0], unit), dtype = np.float32)
   gpu_partial_copy_to(output, tmp, 0, output.shape[0], 0, unit)
   logreg_cost_col_reduce(tmp, label, self.cost)
コード例 #6
0
ファイル: data.py プロジェクト: rjpower/fastnet
    def get_next_batch(self, batch_size):
      if self._reader is None:
        self._start_read()

      if self._gpu_batch is None:
        self._fill_reserved_data()

      if not self.multiview:
        height, width = self._gpu_batch.data.shape
        gpu_data = self._gpu_batch.data
        gpu_labels = self._gpu_batch.labels
        epoch = self._gpu_batch.epoch

        if self.index + batch_size >=  width:
          width = width - self.index
          labels = gpu_labels[self.index:self.index + batch_size]

          data = gpuarray.zeros((height, width), dtype = np.float32)
          gpu_partial_copy_to(gpu_data, data, 0, height, self.index, self.index + width)
          self.index = 0
          self._fill_reserved_data()
        else:
          labels = gpu_labels[self.index:self.index + batch_size]
          data = gpuarray.zeros((height, batch_size), dtype = np.float32)
          gpu_partial_copy_to(gpu_data, data, 0, height, self.index, self.index + batch_size)
          self.index += batch_size
      else:
        # multiview provider
        # number of views should be 10
        # when using multiview, do not pre-move data and labels to gpu
        height, width = self._cpu_batch.data.shape
        cpu_data = self._cpu_batch.data
        cpu_labels = self._cpu_batch.labels
        epoch = self._cpu_batch.epoch

        width /= self.num_view

        if self.index + batch_size >=  width:
          batch_size = width - self.index

        labels = cpu_labels[self.index:self.index + batch_size]
        data = np.zeros((height, batch_size * self.num_view), dtype = np.float32)
        for i in range(self.num_view):
          data[:, i* batch_size: (i+ 1) * batch_size] = cpu_data[:, self.index + width * i : self.index + width * i + batch_size]

        data = copy_to_gpu(np.require(data, requirements = 'C'))
        labels = copy_to_gpu(np.require(labels, requirements = 'C'))


        self.index = (self.index + batch_size) / width
      
      #util.log_info('Batch: %s %s %s', data.shape, gpu_labels.shape, labels.shape)
      return BatchData(data, labels, epoch)