Пример #1
0
    def __next__(self):
        if (b.isEmpty(self.loader._handle)):
            timing_info = b.getTimingInfo(self.loader._handle)
            print("Load     time ::", timing_info.load_time)
            print("Decode   time ::", timing_info.decode_time)
            print("Process  time ::", timing_info.process_time)
            print("Transfer time ::", timing_info.transfer_time)
            raise StopIteration

        if self.loader.run() != 0:
            raise StopIteration

        if (types.NCHW == self.tensor_format):
            self.loader.copyToTensorNCHW(self.out, self.multiplier,
                                         self.offset, self.reverse_channels,
                                         int(self.tensor_dtype))
        else:
            self.loader.copyToTensorNHWC(self.out, self.multiplier,
                                         self.offset, self.reverse_channels,
                                         int(self.tensor_dtype))

        self.loader.getImageLabels(self.labels)
        self.labels_tensor = torch.from_numpy(self.labels).type(
            torch.LongTensor)

        if self.tensor_dtype == types.FLOAT:
            return torch.from_numpy(self.out), self.labels_tensor
        elif self.tensor_dtype == types.TensorDataType.FLOAT16:
            return torch.from_numpy(self.out.astype(
                np.float16)), self.labels_tensor
Пример #2
0
    def __next__(self):
        if(b.isEmpty(self.loader._handle)):
            timing_info = b.getTimingInfo(self.loader._handle)
            print("Load     time ::",timing_info.load_time)
            print("Decode   time ::",timing_info.decode_time)
            print("Process  time ::",timing_info.process_time)
            print("Transfer time ::",timing_info.transfer_time)
            raise StopIteration

        if self.loader.run() != 0:
            raise StopIteration

        if(types.NCHW == self.tensor_format):
            self.loader.copyToTensorNCHW(self.out, self.multiplier, self.offset, self.reverse_channels, int(self.tensor_dtype))
        else:
            self.loader.copyToTensorNHWC(self.out, self.multiplier, self.offset, self.reverse_channels, int(self.tensor_dtype))

        self.loader.getImageLabels(self.labels)
        tf.reset_default_graph()
        self.labels_tensor = tf.convert_to_tensor(self.labels,np.int32)

        if self.tensor_dtype == types.FLOAT:
            return tf.convert_to_tensor(self.out,np.float32), self.labels_tensor
        elif self.tensor_dtype == types.FLOAT16:
            return tf.convert_to_tensor(self.out,np.float16), self.labels_tensor
Пример #3
0
    def __next__(self):
        if (b.isEmpty(self.loader._handle)):
            timing_info = b.getTimingInfo(self.loader._handle)
            print("Load     time ::", timing_info.load_time)
            print("Decode   time ::", timing_info.decode_time)
            print("Process  time ::", timing_info.process_time)
            print("Transfer time ::", timing_info.transfer_time)
            raise StopIteration

        if self.loader.run() != 0:
            raise StopIteration

        if (types.NCHW == self.tensor_format):
            self.loader.copyToTensorNCHW(self.out, self.multiplier,
                                         self.offset, self.reverse_channels,
                                         int(self.tensor_dtype))
        else:
            self.loader.copyToTensorNHWC(self.out, self.multiplier,
                                         self.offset, self.reverse_channels,
                                         int(self.tensor_dtype))

        if ((self.loader._name == "Caffe2ReaderDetection")
                or (self.loader._name == "CaffeReaderDetection")):
            self.lis = []  # Empty list for bboxes
            self.lis_lab = []  # Empty list of labels

            #Count of labels/ bboxes in a batch
            self.bboxes_label_count = np.zeros(self.bs, dtype="int32")
            self.count_batch = self.loader.GetBoundingBoxCount(
                self.bboxes_label_count)
            # 1D labels array in a batch
            self.labels = np.zeros(self.count_batch, dtype="int32")
            self.loader.GetBBLabels(self.labels)
            # 1D bboxes array in a batch
            self.bboxes = np.zeros((self.count_batch * 4), dtype="float32")
            self.loader.GetBBCords(self.bboxes)
            #Image sizes of a batch
            self.img_size = np.zeros((self.bs * 2), dtype="int32")
            self.loader.GetImgSizes(self.img_size)

            count = 0
            sum_count = 0
            for i in range(self.bs):
                count = self.bboxes_label_count[i]

                self.label_2d_numpy = (self.labels[sum_count:sum_count +
                                                   count])
                self.label_2d_numpy = np.reshape(self.label_2d_numpy,
                                                 (-1, 1)).tolist()
                self.bb_2d_numpy = (self.bboxes[sum_count *
                                                4:(sum_count + count) * 4])
                self.bb_2d_numpy = np.reshape(self.bb_2d_numpy,
                                              (-1, 4)).tolist()

                self.lis_lab.append(self.label_2d_numpy)
                self.lis.append(self.bb_2d_numpy)

                sum_count = sum_count + count

            self.target = self.lis
            self.target1 = self.lis_lab
            max_cols = max(
                [len(row) for batch in self.target for row in batch])
            max_rows = max([len(batch) for batch in self.target])
            self.bb_padded = [
                batch + [[0] * (max_cols)] * (max_rows - len(batch))
                for batch in self.target
            ]
            self.bb_padded = torch.FloatTensor([
                row + [0] * (max_cols - len(row)) for batch in self.bb_padded
                for row in batch
            ])
            self.bb_padded = self.bb_padded.view(-1, max_rows, max_cols)

            max_cols1 = max(
                [len(row) for batch in self.target1 for row in batch])
            max_rows1 = max([len(batch) for batch in self.target1])
            self.labels_padded = [
                batch + [[0] * (max_cols1)] * (max_rows1 - len(batch))
                for batch in self.target1
            ]
            self.labels_padded = torch.LongTensor([
                row + [0] * (max_cols1 - len(row))
                for batch in self.labels_padded for row in batch
            ])
            self.labels_padded = self.labels_padded.view(
                -1, max_rows1, max_cols1)

            if self.tensor_dtype == types.FLOAT:
                return torch.from_numpy(
                    self.out), self.bb_padded, self.labels_padded
            elif self.tensor_dtype == types.FLOAT16:
                return torch.from_numpy(self.out.astype(
                    np.float16)), self.bb_padded, self.labels_padded

        else:
            if (self.loader._oneHotEncoding == True):
                self.loader.GetOneHotEncodedLabels(self.labels)
                self.labels_tensor = torch.from_numpy(self.labels).type(
                    torch.LongTensor)
                self.labels_tensor = self.labels_tensor.view(
                    -1, self.bs, self.loader._numOfClasses)
            else:
                self.loader.getImageLabels(self.labels)
                self.labels_tensor = torch.from_numpy(self.labels).type(
                    torch.LongTensor)

            if self.tensor_dtype == types.FLOAT:
                return torch.from_numpy(self.out), self.labels_tensor
            elif self.tensor_dtype == types.FLOAT16:
                return torch.from_numpy(self.out.astype(
                    np.float16)), self.labels_tensor
Пример #4
0
    def __next__(self):
        if (b.isEmpty(self.loader._handle)):
            timing_info = b.getTimingInfo(self.loader._handle)
            print("Load     time ::", timing_info.load_time)
            print("Decode   time ::", timing_info.decode_time)
            print("Process  time ::", timing_info.process_time)
            print("Transfer time ::", timing_info.transfer_time)
            raise StopIteration

        if self.loader.run() != 0:
            raise StopIteration

        if (types.NCHW == self.tensor_format):
            self.loader.copyToTensorNCHW(self.out, self.multiplier,
                                         self.offset, self.reverse_channels,
                                         int(self.tensor_dtype))
        else:
            self.loader.copyToTensorNHWC(self.out, self.multiplier,
                                         self.offset, self.reverse_channels,
                                         int(self.tensor_dtype))

        if (self.loader._name == "TFRecordReaderDetection"):
            self.bbox_list = []
            self.label_list = []
            self.num_bboxes_list = []
            #Count of labels/ bboxes in a batch
            self.bboxes_label_count = np.zeros(self.bs, dtype="int32")
            self.count_batch = self.loader.GetBoundingBoxCount(
                self.bboxes_label_count)
            self.num_bboxes_list = self.bboxes_label_count.tolist()
            # 1D labels array in a batch
            self.labels = np.zeros(self.count_batch, dtype="int32")
            self.loader.GetBBLabels(self.labels)
            # 1D bboxes array in a batch
            self.bboxes = np.zeros((self.count_batch * 4), dtype="float32")
            self.loader.GetBBCords(self.bboxes)
            #1D Image sizes array of image in a batch
            self.img_size = np.zeros((self.bs * 2), dtype="int32")
            self.loader.GetImgSizes(self.img_size)
            count = 0  # number of bboxes per image
            sum_count = 0  # sum of the no. of the bboxes
            for i in range(self.bs):
                count = self.bboxes_label_count[i]
                self.label_2d_numpy = (self.labels[sum_count:sum_count +
                                                   count])
                self.label_2d_numpy = np.reshape(self.label_2d_numpy,
                                                 (-1, 1)).tolist()
                self.bb_2d_numpy = (self.bboxes[sum_count *
                                                4:(sum_count + count) * 4])
                self.bb_2d_numpy = np.reshape(self.bb_2d_numpy,
                                              (-1, 4)).tolist()
                self.label_list.append(self.label_2d_numpy)
                self.bbox_list.append(self.bb_2d_numpy)
                sum_count = sum_count + count

            self.target = self.bbox_list
            self.target1 = self.label_list
            max_cols = max(
                [len(row) for batch in self.target for row in batch])
            # max_rows = max([len(batch) for batch in self.target])
            max_rows = 100
            bb_padded = [
                batch + [[0] * (max_cols)] * (max_rows - len(batch))
                for batch in self.target
            ]
            bb_padded_1 = [
                row + [0] * (max_cols - len(row)) for batch in bb_padded
                for row in batch
            ]
            arr = np.asarray(bb_padded_1)
            self.res = np.reshape(arr, (-1, max_rows, max_cols))
            max_cols = max(
                [len(row) for batch in self.target1 for row in batch])
            # max_rows = max([len(batch) for batch in self.target1])
            max_rows = 100
            lab_padded = [
                batch + [[0] * (max_cols)] * (max_rows - len(batch))
                for batch in self.target1
            ]
            lab_padded_1 = [
                row + [0] * (max_cols - len(row)) for batch in lab_padded
                for row in batch
            ]
            labarr = np.asarray(lab_padded_1)
            self.l = np.reshape(labarr, (-1, max_rows, max_cols))
            self.num_bboxes_arr = np.array(self.num_bboxes_list)

            if self.tensor_dtype == types.FLOAT:
                return self.out.astype(
                    np.float32), self.res, self.l, self.num_bboxes_arr
            elif self.tensor_dtype == types.FLOAT16:
                return self.out.astype(
                    np.float16), self.res, self.l, self.num_bboxes_arr
        elif (self.loader._name == "TFRecordReaderClassification"):
            self.labels = np.zeros((self.bs), dtype="int32")
            self.loader.getImageLabels(self.labels)

            if self.tensor_dtype == types.FLOAT:
                return self.out.astype(np.float32), self.labels
            elif self.tensor_dtype == types.TensorDataType.FLOAT16:
                return self.out.astype(np.float16), self.labels
Пример #5
0
 def Timing_Info(self):
     return b.getTimingInfo(self._handle)
Пример #6
0
    def __next__(self):
        if (b.isEmpty(self.loader._handle)):
            timing_info = b.getTimingInfo(self.loader._handle)
            print("Load     time ::", timing_info.load_time)
            print("Decode   time ::", timing_info.decode_time)
            print("Process  time ::", timing_info.process_time)
            print("Transfer time ::", timing_info.transfer_time)
            raise StopIteration

        if self.loader.run() != 0:
            raise StopIteration

        if (types.NCHW == self.tensor_format):
            self.loader.copyToTensorNCHW(self.out, self.multiplier,
                                         self.offset, self.reverse_channels,
                                         int(self.tensor_dtype))
        else:
            self.loader.copyToTensorNHWC(self.out, self.multiplier,
                                         self.offset, self.reverse_channels,
                                         int(self.tensor_dtype))

        if ((self.loader._name == "Caffe2ReaderDetection")
                or (self.loader._name == "CaffeReaderDetection")):
            sum = 0
            self.lis = []  #Empty list for bboxes
            self.lis_lab = []  # Empty list of labels
            for idx in range(self.bs):
                sum = self.loader.GetBoundingBoxCount(idx)
                self.labels = np.zeros(sum, dtype="int32")
                self.bboxes = np.zeros(sum * 4, dtype="float32")
                self.loader.GetBBLabels(self.labels, idx)
                self.loader.GetBBCords(self.bboxes, idx)

                self.bb_2d_numpy = np.reshape(self.bboxes, (-1, 4)).tolist()
                self.label_2d_numpy = np.reshape(self.labels, (-1, 1)).tolist()

                self.lis.append(self.bb_2d_numpy)
                self.lis_lab.append(self.label_2d_numpy)

            self.target = self.lis
            self.target1 = self.lis_lab
            max_cols = max(
                [len(row) for batch in self.target for row in batch])
            max_rows = max([len(batch) for batch in self.target])
            self.bb_padded = [
                batch + [[0] * (max_cols)] * (max_rows - len(batch))
                for batch in self.target
            ]
            self.bb_padded = torch.FloatTensor([
                row + [0] * (max_cols - len(row)) for batch in self.bb_padded
                for row in batch
            ])
            self.bb_padded = self.bb_padded.view(-1, max_rows, max_cols)

            max_cols1 = max(
                [len(row) for batch in self.target1 for row in batch])
            max_rows1 = max([len(batch) for batch in self.target1])
            self.labels_padded = [
                batch + [[0] * (max_cols1)] * (max_rows1 - len(batch))
                for batch in self.target1
            ]
            self.labels_padded = torch.LongTensor([
                row + [0] * (max_cols1 - len(row))
                for batch in self.labels_padded for row in batch
            ])
            self.labels_padded = self.labels_padded.view(
                -1, max_rows1, max_cols1)

            if self.tensor_dtype == types.FLOAT:
                return torch.from_numpy(
                    self.out), self.bb_padded, self.labels_padded
            elif self.tensor_dtype == types.FLOAT16:
                return torch.from_numpy(self.out.astype(
                    np.float16)), self.bb_padded, self.labels_padded

        else:
            self.loader.getImageLabels(self.labels)
            self.labels_tensor = torch.from_numpy(self.labels).type(
                torch.LongTensor)

            if self.tensor_dtype == types.FLOAT:
                return torch.from_numpy(self.out), self.labels_tensor
            elif self.tensor_dtype == types.FLOAT16:
                return torch.from_numpy(self.out.astype(
                    np.float16)), self.labels_tensor
Пример #7
0
    def __next__(self):
        if (b.isEmpty(self.loader._handle)):
            timing_info = b.getTimingInfo(self.loader._handle)
            print("Load     time ::", timing_info.load_time)
            print("Decode   time ::", timing_info.decode_time)
            print("Process  time ::", timing_info.process_time)
            print("Transfer time ::", timing_info.transfer_time)
            raise StopIteration

        if self.loader.run() != 0:
            raise StopIteration

        if (types.NCHW == self.tensor_format):
            self.loader.copyToTensorNCHW(self.out, self.multiplier,
                                         self.offset, self.reverse_channels,
                                         int(self.tensor_dtype))
        else:
            self.loader.copyToTensorNHWC(self.out, self.multiplier,
                                         self.offset, self.reverse_channels,
                                         int(self.tensor_dtype))

        if (self.loader._name == "TFRecordReaderDetection"):
            sum = 0
            self.lis = []  #Empty list for bboxes
            self.lis_lab = []  # Empty list of labels

            for idx in range(self.bs):
                sum = self.loader.GetBoundingBoxCount(idx)
                self.labels = np.zeros(sum, dtype="int32")
                self.bboxes = np.zeros(sum * 4, dtype="float32")
                self.loader.GetBBLabels(self.labels, idx)
                self.loader.GetBBCords(self.bboxes, idx)

                self.bb_2d_numpy = np.reshape(self.bboxes, (-1, 4)).tolist()
                self.label_2d_numpy = np.reshape(self.labels, (-1, 1)).tolist()

                self.lis.append(self.bb_2d_numpy)
                self.lis_lab.append(self.label_2d_numpy[0])

            self.target = self.lis
            self.target1 = self.lis_lab

            # tf.reset_default_graph()

            max_cols = max(
                [len(row) for batch in self.target for row in batch])
            max_rows = max([len(batch) for batch in self.target])
            bb_padded = [
                batch + [[0] * (max_cols)] * (max_rows - len(batch))
                for batch in self.target
            ]
            bb_padded_1 = [
                row + [0] * (max_cols - len(row)) for batch in bb_padded
                for row in batch
            ]
            # t=tf.convert_to_tensor(bb_padded_1)
            # self.res=tf.reshape(t, [-1,max_rows, max_cols],name="bboxes")
            arr = np.asarray(bb_padded_1)
            self.res = np.reshape(arr, (-1, max_rows, max_cols))

            # self.l = tf.convert_to_tensor(self.target1)
            # self.labels_tensor = tf.reshape(self.l, [self.bs,-1],name="label")
            self.l = np.asarray(self.target1)
            self.l = np.reshape(self.l, (self.bs, -1))

            if self.tensor_dtype == types.FLOAT:
                # return tf.convert_to_tensor(self.out,np.float32), self.res,self.labels_tensor
                return self.out.astype(np.float32), self.res, self.l
            elif self.tensor_dtype == types.FLOAT16:
                # return tf.convert_to_tensor(self.out,np.float16), self.res,self.labels_tensor
                return self.out.astype(np.float16), self.res, self.l
        elif (self.loader._name == "TFRecordReaderClassification"):
            self.labels = np.zeros((self.bs), dtype="int32")

            self.loader.getImageLabels(self.labels)
            # tf.reset_default_graph()
            # self.labels_tensor = tf.convert_to_tensor(self.labels,np.int32)

            if self.tensor_dtype == types.FLOAT:
                # return tf.convert_to_tensor(self.out,np.float32), self.labels_tensor
                return self.out.astype(np.float32), self.labels
            elif self.tensor_dtype == types.TensorDataType.FLOAT16:
                # return tf.convert_to_tensor(self.out,np.float16), self.labels_tensor
                return self.out.astype(np.float16), self.labels