def _encode_item(self, item: datasets.PredictionItem, encode_y=False, treshold=0.5): if encode_y: v = item.y else: v = item.prediction i = item.item_id() tokens = self[i].x tags = self.decode(v, len(tokens)) docId = None if not self.byDoc: docId = self.sentences[i].doc.num q = self[i] res = [] if docId is not None: for v in range(len(tags)): res.append({ "doc_id": docId, "sentence_id": i, "token": tokens[v], "tag": tags[v] }) else: for v in range(len(tags)): res.append({ "sentence_id": i, "token": tokens[v], "tag": tags[v] }) return res
def __getitem__(self, item): id = self.ids[item] it = self.reader.item(id) broken = np.random.choice([0.0, 1.0]) sample = sample_text(it, broken) tokens = utils.stoa(sample) vs = vectors.query(tokens) vslen = len(vs) if vslen > 100: vs = vs[:100] else: nones = np.zeros((100 - vslen, 300)) nones[:] = none vs = np.concatenate((vs, nones), 0) return PredictionItem(id, vs, np.array([broken]))
def applyThreshold(dsItem: PredictionItem) -> PredictionItem: prediction = dsItem.prediction if isinstance(prediction,list): thresholded = [ x > threshold for x in prediction ] else: thresholded = prediction > threshold result = PredictionItem(dsItem.id, dsItem.x, dsItem.y, thresholded) return result
def __getitem__(self, item) -> datasets.PredictionItem: inputs = [i[item] for i in self.inputs] outputs = [i[item] for i in self.outputs] if isinstance(inputs, list) and len(inputs) == 1: inputs = inputs[0] if isinstance(outputs, list) and len(outputs) == 1: outputs = outputs[0] return PredictionItem(item, inputs, outputs)
def augment_item(self, i): if self.augs != None: b = imgaug.Batch(images=[i.x], segmentation_maps=[ imgaug.SegmentationMapOnImage(i.y, shape=i.y.shape) ]) for v in self.augs.augment_batches([b]): bsa: imgaug.Batch = v break xa = bsa.images_aug[0] xa = cv.resize(xa, (i.x.shape[1], i.x.shape[0])) ya = bsa.segmentation_maps_aug[0].arr ya = cv.resize(ya, (i.x.shape[1], i.x.shape[0])) r = self.next(xa, ya) return PredictionItem(i.id, r, ya > 0.5) else: r = self.next(i.x, i.y) return PredictionItem(i.id, r, i.y)
def __getitem__(self, item): child_item = self.child[item] new_size_in = self.get_new_size( (child_item.x.shape[0], child_item.x.shape[1])) new_size_out = self.get_new_size( (child_item.y.shape[0], child_item.y.shape[1])) rnd = 0.5 if self.strategy == "random": rnd = random.random() return PredictionItem( child_item.id, self.get_new_image(new_size_in, child_item.x, rnd), self.get_new_image(new_size_out, child_item.y, rnd))
def __getitem__(self, item_): original_item = self.child[item_] input = original_item.x mask = self.rescale_mask_to_input(input, original_item.y) for time in range(self.times): drop_item, drop_mask = self.get_drop_item() rescaled_drop_item, rescaled_drop_mask = self.rescale_drop_item( input, drop_item, drop_mask, self.drop_size) self.apply_drop_item(input, mask, rescaled_drop_item, rescaled_drop_mask, original_item.id + "_" + str(time)) return PredictionItem(original_item.id, input, mask.astype(np.bool))
def __getitem__(self, item): in_ext = self.exts[item] image = imageio.imread( os.path.join(self.path, self.ids[item] + "." + in_ext)) out = np.zeros(image.shape) if len(out.shape) < 3: out = np.expand_dims(out, axis=2) out = out.astype(np.float32) out = np.sum(out, axis=2) out = np.expand_dims(out, axis=2) #out = out / np.max(out) return PredictionItem(self.ids[item] + str(), image, out)
def __getitem__(self, item) -> PredictionItem: imageId = self.imageIds[item] image = self.get_value(imageId) gt = self.get_mask(imageId, image.shape) labels = [] masks = [] bboxes = [] for m in gt: mask = m[0] if np.max(mask) == 0: continue label = m[1] labels.append(label) masks.append(mask > 0) bboxes.append(getBB(mask, True)) labelsArr = np.array(labels, dtype=np.int64) + 1 bboxesArr = np.array(bboxes, dtype=np.float32) masksArr = np.array(masks, dtype=np.int16) y = (labelsArr, bboxesArr, masksArr) return PredictionItem(imageId, image, y)
def __getitem__(self, item): pos = item // (self.parts * self.parts) off = item % (self.parts * self.parts) if pos == self.lastPos: dm = self.lastImage else: dm = self.ds[pos] self.lastPos = pos self.lastImage = dm row = off // self.parts col = off % self.parts x, y = dm.x, dm.y x1, y1 = self.crop(row, col, x), self.crop(row, col, y) vs = PredictionItem(dm.id, x1, y1) if hasattr(dm, "prediction") and dm.prediction is not None: pred = self.crop(row, col, dm.prediction) vs.prediction = pred vs.imageId = dm.id vs.row = row vs.col = col return vs
def __getitem__(self, item): ds_item = self.datasource[item] return PredictionItem(ds_item.id, ds_item.inputs[0], ds_item.outputs[0])
def __getitem__(self, item) -> PredictionItem: imageId = self.imageIds[item] image = self.get_value(imageId) prediction = self.get_target(item) return PredictionItem(self._id(item), image, prediction)
def __getitem__(self, item): image, mask = self.getImageAndMask() return PredictionItem(str(item), image, mask)
def applyThreshold(dsItem: PredictionItem) -> PredictionItem: thresholded = dsItem.prediction > threshold result = PredictionItem(dsItem.id, dsItem.x, dsItem.y, thresholded) return result
def __getitem__(self, item) -> PredictionItem: imageId = self.imageIds[item] image = self.get_value(imageId) prediction = self.get_mask(imageId, image.shape) return PredictionItem(imageId, image, prediction)
def __getitem__(self, item): child_item = self.child[item] return PredictionItem(child_item.id, self.get_new_image(child_item.x), child_item.y)
def __getitem__(self, item) -> datasets.PredictionItem: return PredictionItem(self.ids[item], self.feature[item], np.array([self._target[item]]))
def binarize_target(inp: PredictionItem): inp.y = inp.y > 0.5 return inp
def __getitem__(self, item) -> PredictionItem: return PredictionItem(item, self.x[item], self.y[item])