def __init__(self,
                 run_name,
                 filename,
                 window,
                 stride,
                 arguments,
                 size=-1,
                 start=0,
                 test=False):

        # Create the two files using the preprocessing pipeline
        in_file, out_file = preprocess(run_name,
                                       filename,
                                       arguments.split(','),
                                       test=test)

        # Load the input
        waveform_in, _ = torchaudio.load(in_file)
        self.x = waveform_in[0]
        # Split it using the sliding window
        self.x = sliding_window(self.x, window, stride)
        # Only keep necessary samples
        self.x = self.x[start:start + size, None, :]

        # Load the output
        waveform_out, _ = torchaudio.load(out_file)
        self.y = waveform_out[0]
        # Split it using the sliding window
        self.y = sliding_window(self.y, window, stride)
        # Only keep necessary samples
        self.y = self.y[start:start + size, None, :]
예제 #2
0
 def test_sliding_window(self):
     l = range(4)
     # print(l)
     it = sliding_window(l, 2, 1)
     targets = [c[1] for c in list(islice(sliding_window(l, 2, 1), 3))]
     # print targets
     self.assertEqual(targets, [[1,2],[2,3],[1,2]])
예제 #3
0
def getLaneCurve(img):
    imgCopy = img.copy()

    h, w, c = img.shape
    points = utils.valTrackbars()

    imgThres = utils.thresholding(img)
    # imgWarp = utils.warpImg(img, points, w, h)
    imgWarpThres = utils.warpImg(imgThres, points, w, h)
    imgWarpPoints = utils.drawPoints(imgCopy, points)
    imgLane = img.copy()

    # imgCanny = utils.canny(imgWarp)
    basePoint, imgHist = utils.getHistogram(imgWarpThres, display=True)
    imgSliding, curves, lanes, ploty = utils.sliding_window(imgWarpThres,
                                                            draw_windows=True)

    curverad = utils.get_curve(imgLane, curves[0], curves[1])
    lane_curve = np.mean([curverad[0], curverad[1]])
    imgLane = utils.drawLanes(img,
                              curves[0],
                              curves[1],
                              frameWidth,
                              frameHeight,
                              src=points)
    # print(round((lane_curve-84.41)/7.5,2))

    imgStack = utils.stackImages(
        0.6,
        ([img, imgWarpThres, imgWarpPoints], [imgHist, imgSliding, imgLane]))
    cv2.imshow('Stack', imgStack)

    return cv2.resize(imgStack, (2 * frameHeight, frameHeight))
예제 #4
0
def gen_hard_features(file,
                      clf,
                      downscale,
                      min_hw=(128, 64),
                      step_size=(10, 10),
                      orientations=9,
                      pixels_per_cell=(8, 8),
                      cells_per_block=(2, 2),
                      normalize='L2-Hys'):
    img = imread(file, as_gray=True)

    feat_list = []
    for im_scaled in pyramid_gaussian(img,
                                      downscale=downscale,
                                      multichannel=False):
        if im_scaled.shape[0] < min_hw[0] or im_scaled.shape[1] < min_hw[1]:
            break
        for (x, y, im_window) in sliding_window(im_scaled, min_hw, step_size):
            if im_window.shape[0] != min_hw[0] or im_window.shape[1] != min_hw[
                    1]:
                continue

            features = hog(im_window, orientations, pixels_per_cell,
                           cells_per_block, normalize)
            features = np.reshape(features, (1, -1))
            pred = clf.predict(features)
            # dist = clf.decision_function(features)

            if pred == 1:
                feat_list.append(features)

    return feat_list
예제 #5
0
def print_score():
    df = get_pure_cases_df()
    X, y = sliding_window(df, 28, 14)
    assert X.shape[0] == y.shape[0]
    X = X.numpy()
    y = y.numpy()
    mae = []
    rmse = []
    mape = []
    for i in range(X.shape[0]):
        mae.append(
            mean_absolute_error(y[i], np.broadcast_to(X[i][-1],
                                                      (y.shape[1], 1))))
        rmse.append(
            mean_squared_error(y[i],
                               np.broadcast_to(X[i][-1], (y.shape[1], 1)),
                               squared=False))
        mape.append(
            mean_absolute_percentage_error(
                y[i], np.broadcast_to(X[i][-1], (y.shape[1], 1))))

    mae = np.array(mae)
    rmse = np.array(rmse)
    mape = np.array(mape)
    print(f"RMSE: {rmse.mean()}, MAE: {mae.mean()}, MAPE: {mape.mean()}")
예제 #6
0
def find_clumps_faster(genome: str, k: int, L: int, t: int) -> List[str]:
    """Find pattens forming (L, t)-clump
    Computational complexity: O(L * len(genome))

    Clump (L, t) means that k-mers appear at least t times
    in a length-L window of a genome.

    >>> find_clumps_faster("CGGACTCGACAGATGTGAAGAACGACAATGTGAAGACTCGACACGACAGAGTGAAGAGAAGAGGAAACATTGTAA", 5, 50, 4)
    ['CGACA', 'GAAGA']
    """
    assert L <= len(genome)
    interval = genome[:L]
    kmer_counts = collections.Counter("".join(xs)
                                      for xs in sliding_window(interval, k))
    res = {w for w, cnt in kmer_counts.items() if cnt >= t}

    offset = 0
    while offset + L <= len(genome):
        w_out = genome[offset:offset + k]
        w_in = genome[offset + L - k + 1:offset + L + 1]
        kmer_counts[w_out] -= 1
        kmer_counts[w_in] += 1
        if kmer_counts[w_in] >= t:
            res.add(w_in)
        offset += 1

    return sorted(res)
def main():

    # Create TIF reader
    reader = TIFReader(args.file, args.level)

    # Load image and create tissue mask
    print("Loading image...", end='\r')
    wsi = reader.load_image()
    print("Loading image...Done.")

    # Get the sliding window and padding params
    windows, padding = utils.sliding_window(
        wsi.shape, (args.window_shape, args.window_shape))

    # Pad the WSI
    wsi_padded = np.pad(wsi, ((0, padding['y']), (0, padding['x']), (0, 0)),
                        mode='constant',
                        constant_values=255)

    print("Segmenting tissue...", end='\r')
    tissue_mask = reader.segment_tissue(wsi_padded)
    print("Segmenting tissue...Done.")

    # Prediction
    tumor_map = predict_tumor_regions(wsi_padded, tissue_mask, windows)

    # Save
    np.save('figures/{}.npy'.format(args.file.split('.')[0]), tumor_map)
    cmapper = cm.get_cmap('plasma')
    colorized = Image.fromarray(
        np.uint8(cmapper(np.clip(tumor_map, 0, 1)) * 255))
    colorized.save('figures/{}.tif'.format(args.file.split('.')[0]))
예제 #8
0
    def detect(self, image):
        clone = image.copy()

        image = rgb2gray(image)

        # list to store the detections
        detections = []
        # current scale of the image
        downscale_power = 0

        # downscale the image and iterate
        for im_scaled in pyramid(image,
                                 downscale=self.downscale,
                                 min_size=self.window_size):
            # if the width or height of the scaled image is less than
            # the width or height of the window, then end the iterations
            if im_scaled.shape[0] < self.window_size[1] or im_scaled.shape[
                    1] < self.window_size[0]:
                break
            for (x, y, im_window) in sliding_window(im_scaled,
                                                    self.window_step_size,
                                                    self.window_size):
                if im_window.shape[0] != self.window_size[
                        1] or im_window.shape[1] != self.window_size[0]:
                    continue

                # calculate the HOG features
                feature_vector = hog(im_window)
                X = np.array([feature_vector])
                prediction = self.clf.predict(X)
                if prediction == 1:
                    x1 = int(x * (self.downscale**downscale_power))
                    y1 = int(y * (self.downscale**downscale_power))
                    detections.append(
                        (x1, y1, x1 + int(self.window_size[0] *
                                          (self.downscale**downscale_power)),
                         y1 + int(self.window_size[1] *
                                  (self.downscale**downscale_power))))

            # Move the the next scale
            downscale_power += 1

        # Display the results before performing NMS
        clone_before_nms = clone.copy()
        for (x1, y1, x2, y2) in detections:
            # Draw the detections
            cv2.rectangle(clone_before_nms, (x1, y1), (x2, y2), (0, 255, 0),
                          thickness=2)

        # Perform Non Maxima Suppression
        detections = non_max_suppression(np.array(detections), self.threshold)

        clone_after_nms = clone
        # Display the results after performing NMS
        for (x1, y1, x2, y2) in detections:
            # Draw the detections
            cv2.rectangle(clone_after_nms, (x1, y1), (x2, y2), (0, 255, 0),
                          thickness=2)

        return clone_before_nms, clone_after_nms
예제 #9
0
def most_probable_kmer(text: str, k: int, profile: Profile) -> str:
    """Find the most likelihood kmer given probability distribution profile such that

    argmax_{kmer} P(kmer | profile)

    >>> text = "ACCTGTTTATTGCCTAAGTTCCGAACAAACCCAATATAGCCCGAGGGCCT"
    >>> profile = [[0.2, 0.2, 0.3, 0.2, 0.3], [0.4, 0.3, 0.1, 0.5, 0.1], [0.3, 0.3, 0.5, 0.2, 0.4], [0.1, 0.2, 0.1, 0.1, 0.2]]
    >>> most_probable_kmer(text, 5, profile)
    'CCGAG'
    """
    nuc2idx = dict(zip("ACGT", range(4)))
    assert len(profile) == 4
    assert len(profile[0]) == k

    max_sofar = -1000
    ans = None
    for kmer in sliding_window(text, k):
        indices = [nuc2idx[c] for c in kmer]
        iter = (ps[i] for i, ps in zip(indices, zip(*profile)))
        prob = functools.reduce(operator.mul, iter)
        if prob > max_sofar:
            max_sofar = prob
            ans = kmer

    if ans is None:
        raise ValueError("Something is wrong!")
    return "".join(ans)
예제 #10
0
def find_faces(model, imgs, threshold=None, step=WINDOW_STEP):
    '''Slide a window and find predictions that are over a threshold (or all if threshold is None)'''
    if threshold is None:
        threshold = -np.Inf

    rects_dict = {}
    scores_dict = {}
    for img_id, img in tqdm(imgs.items(), desc='Predicting faces'):
        rects = []
        windows = []
        for scale in SCALES:
            img_scaled = rescale(img, scale, multichannel=False)
            for rect, window in sliding_window(img_scaled, step=step):
                rect.scale(1 / scale)
                rects.append(rect)
                windows.append(window)

        rects = np.array(rects)
        features = []
        for window in tqdm(windows, desc='Computing features'):
            features.append(features_extract_fn(window))
        try:
            pred_proba = model.predict_proba(features)[:, 1]
        except AttributeError:
            pred_proba = model.decision_function(features)
           
        mask = pred_proba >= threshold

        rects_dict.update({img_id: rects[mask]})
        scores_dict.update({img_id: pred_proba[mask]})

    return remove_duplicate_rects(rects_dict, scores_dict)
예제 #11
0
    def detect(self, img_name, image, stepSize=None, windowSize=None, scale=None, minSize=None):
        windowSize = windowSize if windowSize is not None else self.windowSize
        stepSize = stepSize if stepSize is not None else self.stepSize
        scale = scale if scale is not None else self.scale
        minSize = minSize if minSize is not None else self.minSize

        window_num = 0
        polygons_metal = list()
        polygons_thatch = list()
        rects_metal = list()
        rects_thatch = list()

        #loop through pyramid

        for level, resized in enumerate(utils.pyramid(image, scale=scale, minSize=minSize)):
            for (x, y, window) in utils.sliding_window(resized, stepSize=stepSize, windowSize=windowSize):
                
                #self.debug_scaling(image, img_name, resized, x, y, level):

                # if the window does not meet our desired window size, ignore it
                if window.shape[0] != windowSize[0] or window.shape[1] != windowSize[1]:
                    continue
                window_num += 1

                #save the correctly translated coordinates of this window
                polygon, rectangle = self.get_translated_coords(x, y, level, scale, windowSize)
                polygons_metal.append(polygon)
                rects_metal.append(rectangle)
                
                polygons_thatch.append(polygon)
                rects_thatch.append(rectangle)
        self.total_window_num += window_num
        rects = {'thatch': rects_thatch, 'metal': rects_metal}
        polygons = {'thatch': polygons_thatch, 'metal': polygons_metal}
        return polygons, rects
예제 #12
0
def get_all_nonfaces(train_imgs, labels):
    '''Get all nonfaces images from sliding window of size BOX_SIZE'''
    for id, img in tqdm(train_imgs.items(), desc='Extracting all nonfaces'):
        for scale in SCALES:
            img_scaled = rescale(img, scale, multichannel=False)
            true_rects = [r.scale(scale) for r in labels[id]]
            for rect, window in sliding_window(img_scaled):
                rect.scale(1 / scale)
                if not rect.overlap(*true_rects, threshold=0.4):
                    yield window
예제 #13
0
def frequent_words(text: str, k: int) -> Patterns:
    """
    >>> frequent_words("ACTGACTCCCACCCC", 3)
    {'CCC': 3}

    """
    counter = Counter("".join(chunk) for chunk in sliding_window(text, k))
    max_count = max(counter.values())
    res = {k: cnt for k, cnt in counter.items() if cnt == max_count}
    return res
예제 #14
0
def approx_pattern_matching(pattern: str, genome: str, d: int) -> List[int]:
    """Find locations of k-mer (pattern') in the genome with
    hamming_distance(pattern, pattern') <= d

    >>> approx_pattern_matching("ATTCTGGA", "CGCCCGAATCCAGAACGCATTCCCATATTTCGGGACCACTGGCCTCCACGGTACGGACGTCAATCAAAT", 3)
    [6, 7, 26, 27]
    """
    intervals = sliding_window(genome, len(pattern))
    return [
        i for i, interval in enumerate(intervals)
        if hamming_distance(pattern, interval) <= d
    ]
예제 #15
0
def checkImage(image_file):
    image = cv2.imread(image_file)
    smoke_count = 0
    window_count = 0
    for (x, y, window) in sliding_window(
            image, stepSize, (winW, winH)):
        window_count = window_count + 1
        prediction = classifier2.predict_proba(
            process_image(window, feature.getFeature))[0]
        if(prediction[1] >= threshold):
            smoke_count = smoke_count + 1
    logger.info("%d smoke in %d widnows of %s." % (
        smoke_count, window_count, os.path.basename(image_file)))
예제 #16
0
def test(net, img, hyperparams):
    """
    Test a model on a specific image
    """
    net.eval()
    patch_size = hyperparams['patch_size']
    center_pixel = hyperparams['center_pixel']
    batch_size, device = hyperparams['batch_size'], hyperparams['device']
    n_classes = hyperparams['n_classes']

    kwargs = {
        'step': hyperparams['test_stride'],
        'window_size': (patch_size, patch_size)
    }
    probs = np.zeros(img.shape[:2] + (n_classes, ))

    iterations = count_sliding_window(img, **kwargs) // batch_size
    for batch in tqdm(grouper(batch_size, sliding_window(img, **kwargs)),
                      total=(iterations),
                      desc="Inference on the image"):
        with torch.no_grad():
            if patch_size == 1:
                data = [b[0][0, 0] for b in batch]
                data = np.copy(data)
                data = torch.from_numpy(data)
            else:
                data = [b[0] for b in batch]
                data = np.copy(data)
                data = data.transpose(0, 3, 1, 2)
                data = torch.from_numpy(data)
                # data = data.unsqueeze(1)              # 3DConv时执行

            indices = [b[1:] for b in batch]
            data = data.to(device)
            output = net(data)
            if isinstance(output, tuple):
                output = output[0]
            output = output.to('cpu')  # 将cpu 改为 cuda

            if patch_size == 1 or center_pixel:
                output = output.numpy()
            else:
                output = np.transpose(output.numpy(), (0, 2, 3, 1))
            for (x, y, w, h), out in zip(indices, output):
                if center_pixel:
                    # probs[x, y] += out
                    probs[x + w // 2, y + h // 2] += out
                    # probs[x:x + w, y:y + h] += out
                else:
                    probs[x:x + w, y:y + h] += out
    return probs
예제 #17
0
def test(net, img, hyperparams):
    """
    Test a model on a specific image
    """
    net.eval()
    patch_size = hyperparams["patch_size"]
    center_pixel = hyperparams["center_pixel"]
    batch_size, device = hyperparams["batch_size"], hyperparams["device"]
    n_classes = hyperparams["n_classes"]

    kwargs = {
        "step": hyperparams["test_stride"],
        "window_size": (patch_size, patch_size),
    }
    probs = np.zeros(img.shape[:2] + (n_classes,))

    iterations = count_sliding_window(img, **kwargs) // batch_size
    for batch in tqdm(
        grouper(batch_size, sliding_window(img, **kwargs)),
        total=(iterations),
        desc="Inference on the image",
    ):
        with torch.no_grad():
            if patch_size == 1:
                data = [b[0][0, 0] for b in batch]
                data = np.copy(data)
                data = torch.from_numpy(data)
            else:
                data = [b[0] for b in batch]
                data = np.copy(data)
                data = data.transpose(0, 3, 1, 2)
                data = torch.from_numpy(data)
                data = data.unsqueeze(1)

            indices = [b[1:] for b in batch]
            data = data.to(device)
            output = net(data)
            if isinstance(output, tuple):
                output = output[0]
            output = output.to("cpu")

            if patch_size == 1 or center_pixel:
                output = output.numpy()
            else:
                output = np.transpose(output.numpy(), (0, 2, 3, 1))
            for (x, y, w, h), out in zip(indices, output):
                if center_pixel:
                    probs[x + w // 2, y + h // 2] += out
                else:
                    probs[x : x + w, y : y + h] += out
    return probs
예제 #18
0
def test(net, img, args):
    """
    Test a model on a specific image
    """
    net.eval()
    patch_size = args.patch_size
    center_pixel = args.center_pixel
    batch_size, device = args.batch_size, torch.device(args.device)
    n_classes = args.n_classes

    kwargs = {
        'step': args.test_stride,
        'window_size': (patch_size, patch_size)
    }
    probs = np.zeros(img.shape[:2] + (n_classes, ))

    iterations = utils.count_sliding_window(img, **kwargs) // batch_size
    for batch in tqdm(utils.grouper(batch_size,
                                    utils.sliding_window(img, **kwargs)),
                      total=(iterations),
                      desc="Inference on the image"):
        with torch.no_grad():
            if patch_size == 1:
                data = [b[0][0, 0] for b in batch]
                data = np.copy(data)
                data = torch.from_numpy(data)
            else:
                data = [b[0] for b in batch]
                data = np.copy(data)
                data = data.transpose(0, 3, 1, 2)
                data = torch.from_numpy(data)
                data = data.unsqueeze(1)

            indices = [b[1:] for b in batch]
            data = data.to(device)
            output = net(data)
            if isinstance(output, tuple):
                output = output[0]
            output = output.to('cpu')

            if patch_size == 1 or center_pixel:
                output = output.numpy()
            else:
                output = np.transpose(output.numpy(), (0, 2, 3, 1))
            for (x, y, w, h), out in zip(indices, output):
                if center_pixel:
                    probs[x + w // 2, y + h // 2] += out
                else:
                    probs[x:x + w, y:y + h] += out
    return probs
def run_pipe(config, data, model_file, output_file, output_format):

    print("pipe mode")
    model = keras.models.load_model(model_file)
    model.summary()

    data.load_indexed_features(model_file + '_ft.pickle')
    data.load_embeddings()
    reader = data.get_reader()
    writer = get_writer(output_file, output_format)

    labels_index_inverted = {}
    for key, value in data.labels_index.iteritems():
        labels_index_inverted[value] = key

    token_features_generator = data.get_token_feature_generator()

    while True:
        document = reader.nextDocument()
        if document is None:
            break
        token_features_generator.generateFeatures(document)

        for sentence in document.getSentences():

            sentence_seq = data.process_sentence(sentence)
            sequences = []
            for w in sliding_window(sentence_seq, data.window_size,
                                    data.window_size):
                sequences.append(w)

            x = []
            for seq in sequences:
                x.append(data.get_vector_sequence(seq))

            y_pred = model.predict(branched_bi_gru_lstm.get_x(np.asarray(x)))
            y_pred = map(lambda v: labels_index_inverted[v.argmax()], y_pred)

            for idx, a in enumerate(y_pred):
                if a == data.irrelevant_class:
                    continue
                annotation = create_annotation(idx, idx, a, sentence)
                sentence.addChunk(annotation)

        writer.writeDocument(document)

    reader.close()
    writer.close()
예제 #20
0
def RPN_trainloader():
	# Skip ever 100 no intersection
	pos_count = 0
	for i, image_loc in enumerate(image_list):
		print("NEW IMAGE")
		name = image_loc[:image_loc.index(".")]

		cur_image = np.asarray(Image.open(os.path.join(IMAGE_DIR, image_loc)))
		bboxes = json.loads(open(os.path.join(BBOX_DIR, name + ".json"), "r").read())

		rectArr = list(map(lambda x: utils.Rectangle(int(min(x[0], x[1])),
													 int(min(x[2], x[3])),
													 int(max(x[0], x[1])),
													 int(max(x[2], x[3]))), bboxes))

		image = cur_image
		sizes = [(image.shape[1] // 15, image.shape[0] // 20), (image.shape[1] // 30, image.shape[0] // 30),
				 (image.shape[1] // 20, image.shape[0] // 15), (image.shape[1] // 30, image.shape[0] // 40)]


		for width, height in sizes:
			for window in utils.sliding_window(image, (10, 10), windowSize=(width, height)):
				curRect = utils.Rectangle(window[0], window[1], window[0] + width, window[1] + height)
				area = calculateIntersectArea(curRect, rectArr)
				ratio = area / curRect.area
				label = 0
				if ratio > .3:
					label = 1
				if ratio > .7:
					label = 2

				if label == 0:
					pos_count += 1
					if pos_count % 10 != 0:
						continue
				# print(area, curRect.area)
				# if area > 500:
				#     plt.imshow(window[2])
				#     plt.show()
				img = image_loader(Image.fromarray(window[2]))



				# pilTrans = transforms.ToPILImage()
				# pilImg = pilTrans(img.cpu())
				# plt.imshow(np.asarray(pilImg))
				# plt.show()
				yield (img, label)
예제 #21
0
    def process_document(self, document, token_features_generator = None, sequences=None):

        if not token_features_generator:
            token_features_generator = self.get_token_feature_generator()

        if sequences is None:
            sequences = []

        token_features_generator.generateFeatures(document)
        for sentence in document.getSentences():
            sentence_seq = self.process_sentence(sentence)

            for w in sliding_window(sentence_seq, self.window_size, self.window_size):
                sequences.append(w)

        return sequences
예제 #22
0
    def gen_context(self, sent):
        size = self.window_size
        # pad zeros
        pad_word_idx = np.pad(sent, (size, size), 'constant',
                              constant_values=self.padding_index)
        # following is correct but not easy to understand
        wds = sliding_window(pad_word_idx, size)
        ctx = np.concatenate((wds[:-(size+1)], wds[(size+1):]), axis=1)

        # dynamic window size
        dyn_padding = np.random.randint(self.window_size + 1, size=len(sent))
        for word_ctx, d in zip(ctx, dyn_padding):
            word_ctx[:d] = self.padding_index
            word_ctx[-d:] = self.padding_index

        return ctx
예제 #23
0
    def detect(self,
               img_name,
               image,
               stepSize=None,
               windowSize=None,
               scale=None,
               minSize=None):
        windowSize = windowSize if windowSize is not None else self.windowSize
        stepSize = stepSize if stepSize is not None else self.stepSize
        scale = scale if scale is not None else self.scale
        minSize = minSize if minSize is not None else self.minSize

        window_num = 0
        polygons_metal = list()
        polygons_thatch = list()
        rects_metal = list()
        rects_thatch = list()

        #loop through pyramid

        for level, resized in enumerate(
                utils.pyramid(image, scale=scale, minSize=minSize)):
            for (x, y, window) in utils.sliding_window(resized,
                                                       stepSize=stepSize,
                                                       windowSize=windowSize):

                #self.debug_scaling(image, img_name, resized, x, y, level):

                # if the window does not meet our desired window size, ignore it
                if window.shape[0] != windowSize[0] or window.shape[
                        1] != windowSize[1]:
                    continue
                window_num += 1

                #save the correctly translated coordinates of this window
                polygon, rectangle = self.get_translated_coords(
                    x, y, level, scale, windowSize)
                polygons_metal.append(polygon)
                rects_metal.append(rectangle)

                polygons_thatch.append(polygon)
                rects_thatch.append(rectangle)
        self.total_window_num += window_num
        rects = {'thatch': rects_thatch, 'metal': rects_metal}
        polygons = {'thatch': polygons_thatch, 'metal': polygons_metal}
        return polygons, rects
예제 #24
0
    def depth_blur(self, img):
        (winW, winH) = (self.winsize, self.winsize)
        alpha = self.attenuation
        for (x, y, window) in utils.sliding_window(img,
                                                   stepSize=self.winsize - 6,
                                                   windowSize=(winW, winH)):
            if window.shape[0] != winH or window.shape[1] != winW:
                continue

            #print(int(y+winW/3))
            #print(int(x+winH/3))
            img[y:y + winW, x:x + winH] = cv2.GaussianBlur(
                img[y:y + winW, x:x + winH], (winW, winH),
                alpha * self.depth_img[int(y + winW / 2),
                                       int(x + winH / 2)])

        return img
    def __init__(self,
                 run_name,
                 filename,
                 window,
                 stride,
                 rate,
                 size=-1,
                 start=0,
                 test=False):

        file_in_out, out_file = preprocess(run_name,
                                           filename, ["scale " + str(rate)],
                                           test=test)
        waveform, sample_rate = torchaudio.load(file_in_out)
        input = waveform[0]
        inputs = sliding_window(input, window, stride)
        self.x = inputs[start:start + size, None, :]
예제 #26
0
def audio_data_to_expectation_of_next_sample(data, labels):
    orig_type = data.dtype
    data_max = data.max()
    data = data / data_max
    expectations = data
    window_size = 512
    padding = np.zeros((data.shape[0], window_size), dtype=data.dtype)
    data = np.concatenate((padding, data), axis=1)

    def denormalize(normalized_data):
        return (normalized_data * data_max).astype(orig_type)

    chunks = sliding_window(
        data,
        size=window_size)[:, :-1]  # drop the last one, which has no prediction
    # todo include the label data somehow
    # todo maybe also include the index of the next sample?
    return chunks, expectations, denormalize
예제 #27
0
    def detect_roofs(self, image):
        # loop over the image pyramid
        for resized in utils.pyramid(image, scale=1.5):
            # loop over the sliding window for each layer of the pyramid
            for (x, y, window) in utils.sliding_window(resized, stepSize=32, windowSize=(winW, winH)):
                # if the window does not meet our desired window size, ignore it
                if window.shape[0] != winH or window.shape[1] != winW:
                    continue

                # THIS IS WHERE YOU WOULD PROCESS YOUR WINDOW, SUCH AS APPLYING A
                # MACHINE LEARNING CLASSIFIER TO CLASSIFY THE CONTENTS OF THE
                # WINDOW

                # since we do not have a classifier, we'll just draw the window
                clone = resized.copy()
                cv2.rectangle(clone, (x, y), (x + winW, y + winH), (0, 255, 0), 2)
                cv2.imshow("Window", clone)
                cv2.waitKey(1)
                time.sleep(0.025)
예제 #28
0
def detect(origin_img, hog, clf):
    windows = []
    for img, scale in pyramid(origin_img):
        points = []
        features = []
        for (x1, y1, window) in sliding_window(img, 8, (128, 128)):
            if window.shape[0] == 128 and window.shape[1] == 128:
                features.append(hog.compute(window).reshape(-1))
                points.append([x1, y1])

        if len(features) == 0:
            continue

        Y = clf.predict(features)
        points = np.asarray(points)[Y==1] * scale
        w = np.concatenate((points, points + 128*scale), axis=1).astype(int)
        if w.shape[0] > 0:
            windows.append(w.tolist())
    return windows
예제 #29
0
 def detect(self, image):
     clone = image.copy()
     image = rgb2gray(image)
     detections = []  # 记录识别的目标
     downscale_power = 0  # 当前下采样系数
     # 迭代下采样
     for im_scaled in pyramid(image,
                              downscale=self.downscale,
                              min_size=self.window_size):
         if im_scaled.shape[0] < self.window_size[1] or im_scaled.shape[
                 1] < self.window_size[0]:
             # 如果采样尺度小于模板窗,就停止迭代
             break
         for (x, y, im_window) in sliding_window(im_scaled,
                                                 self.window_step_size,
                                                 self.window_size):
             if im_window.shape[0] != self.window_size[
                     1] or im_window.shape[1] != self.window_size[0]:
                 continue
             feature_vector = hog(im_window, block_norm="L1")  # 计算HOG特征
             X = np.array([feature_vector])
             prediction = self.clf.predict(X)
             if prediction == 1:
                 x1 = int(x * (self.downscale**downscale_power))
                 y1 = int(y * (self.downscale**downscale_power))
                 detections.append(
                     (x1, y1, x1 + int(self.window_size[0] *
                                       (self.downscale**downscale_power)),
                      y1 + int(self.window_size[1] *
                               (self.downscale**downscale_power))))
         downscale_power += 1  # 移动到下一个尺度
     clone_before_nms = clone.copy()  # 用来显示NMS处理前的结果
     for (x1, y1, x2, y2) in detections:
         cv2.rectangle(clone_before_nms, (x1, y1), (x2, y2), (0, 255, 0),
                       thickness=2)  # 描框
     detections = non_max_suppression(np.array(detections),
                                      self.threshold)  # NMS处理后的结果
     clone_after_nms = clone
     # NMS处理后的结果
     for (x1, y1, x2, y2) in detections:
         cv2.rectangle(clone_after_nms, (x1, y1), (x2, y2), (0, 255, 0),
                       thickness=2)  # 描框
     return clone_before_nms, clone_after_nms
예제 #30
0
def _greedy_motifs_search_template(
    dna: List[str], k: int, t: int, to_profile
) -> List[str]:
    assert len(dna) == t
    best_motifs = []
    best_score = 10000000
    iter = sliding_window(dna[0], k)
    iter = map(lambda cs: "".join(cs), iter)
    for kmer in iter:
        motifs = [kmer]
        for seq in dna[1:]:
            prof = to_profile(motifs)
            motif = most_probable_kmer(seq, k, prof)
            motifs.append(motif)

        if score(motifs) < best_score:
            best_score = score(motifs)
            best_motifs = motifs

    return best_motifs
예제 #31
0
def stream_train_images(dir_path, true_rectangles_dict, window_size=(128, 128), window_step=32, visualize=False):
    # define the window width and height
    winW, winH = window_size

    for image_path in list_images(dir_path):
        if not is_image_file(image_path):
            continue

        # Read the image
        image = scipy.misc.imread(image_path)
        parent_dir_path, image_name = os.path.split(image_path)
        parent_dir_name = os.path.split(parent_dir_path)[-1]
        image_name_key = os.path.join(parent_dir_name, image_name)
        true_rectangles = true_rectangles_dict[image_name_key]

        if visualize:
            clone = image.copy()
            for rect in true_rectangles:
                cv2.rectangle(clone, (rect[0], rect[1]), (rect[2], rect[3]), RED, thickness=2)

        for (x, y, window) in sliding_window(image, step_size=window_step, window_size=(winW, winH)):
            # if the window does not meet our desired window size, ignore it
            if window.shape[0] != winH or window.shape[1] != winW:
                continue

            if visualize:
                copy = clone.copy()
                cv2.rectangle(copy, (x, y), (x + winW, y + winH), BLUE, thickness=2)
                cv2.imshow(image_path, copy)
                cv2.waitKey(1)

            if all(bb_intersection_over_union((x, y, x + winW, y + winH), rect) == 0 for rect in true_rectangles):
                if visualize:
                    cv2.rectangle(clone, (x, y), (x + winW, y + winH), GREEN, thickness=2)
                    cv2.imshow(image_path, clone)
                    cv2.waitKey(1)

                yield image_name, window

        if visualize:
            cv2.destroyAllWindows()
예제 #32
0
def motif_enumeration(dna: List[str], k: int, d: int) -> Set[str]:
    """Search for all (k, d)-motifs in dna with brute force.
    O(t * n * X * t * n)
        where t = len(dna)
              n = len(dna[0])
              X = time complexity of neighbors()

    >>> motif_enumeration({"ATTTGGC", "TGCCTTA", "CGGTATC", "GAAAATT"}, 3, 1) == {"ATA", "ATT", "GTT", "TTT"}
    True
    """
    seen = set()
    res = set()
    for seq in dna:
        for seed in sliding_window(seq, k):
            if seed in seen:
                continue
            seen.add(seed)
            for pattern in neighbors(seed, d):
                if all(hamming_distance_str(pattern, s) <= d for s in dna):
                    res.add(pattern)
    return res
예제 #33
0
파일: views.py 프로젝트: JudoWill/judosite
def practice_list(request, club = None):

    club = get_object_or_404(Club, Slug = club)
    practices = club.practice_set.all().annotate(NumPeople = Count('person')).order_by('-Date')
    if practices.count() > 10:
        chart = GChart(ctype = 'line')
        data = sliding_window(practices)
        chart.dataset(data[:500]).axes.type('xy')
    

    if request.method == 'POST':
        form = PracticeModelForm(request.POST)
        new_practice = form.save(commit = False)
        new_practice.Club = club
        new_practice.save()
        return HttpResponseRedirect(new_practice.get_absolute_url())
            
    else:
        form = PracticeModelForm()

    return render_to_response('Dojo/Practice_object_list.html', locals(),
                              context_instance = RequestContext(request))
예제 #34
0
def find_bbox(mean, evecs, image, width, height, is_upper, jaw_split, show=False):
    """Finds a bounding box around the four upper or lower incisors.
    A sliding window is moved over the given image. The window which matches best
    with the given appearance model is returned.

    Args:
        mean: PCA mean.
        evecs: PCA eigen vectors.
        image: The dental radiograph on which the incisors should be located.
        width (int): The default width of the search window.
        height (int): The default height of the search window.
        is_upper (bool): Wheter to look for the upper (True) or lower (False) incisors.
        jaw_split (Path): The jaw split.

    Returns:
        A bounding box around what looks like four incisors.
        The region of the image selected by the bounding box.

    """
    h, w = image.shape

    # [b1, a1]---------------
    # -----------------------
    # -----------------------
    # -----------------------
    # ---------------[b2, a2]

    if is_upper:
        b1 = int(w/2 - w/10)
        b2 = int(w/2 + w/10)
        a1 = int(np.max(jaw_split.get_part(b1, b2), axis=0)[1]) - 350
        a2 = int(np.max(jaw_split.get_part(b1, b2), axis=0)[1])
    else:
        b1 = int(w/2 - w/12)
        b2 = int(w/2 + w/12)
        a1 = int(np.min(jaw_split.get_part(b1, b2), axis=0)[1])
        a2 = int(np.min(jaw_split.get_part(b1, b2), axis=0)[1]) + 350

    search_region = [(b1, a1), (b2, a2)]

    best_score = float("inf")
    best_score_bbox = [(-1, -1), (-1, -1)]
    best_score_img = np.zeros((500, 400))
    for wscale in np.arange(0.8, 1.3, 0.1):
        for hscale in np.arange(0.7, 1.3, 0.1):
            winW = int(width * wscale)
            winH = int(height * hscale)
            for (x, y, window) in sliding_window(image, search_region, step_size=36, window_size=(winW, winH)):
                # if the window does not meet our desired window size, ignore it
                if window.shape[0] != winH or window.shape[1] != winW:
                    continue

                reCut = cv2.resize(window, (width, height))

                X = reCut.flatten()
                Y = project(evecs, X, mean)
                Xacc = reconstruct(evecs, Y, mean)

                score = np.linalg.norm(Xacc - X)
                if score < best_score:
                    best_score = score
                    best_score_bbox = [(x, y), (x + winW, y + winH)]
                    best_score_img = reCut

                if show:
                    window = [(x, y), (x + winW, y + winH)]
                    Plotter.plot_autoinit(image, window, score, jaw_split, search_region, best_score_bbox,
                                          title="wscale="+str(wscale)+" hscale="+str(hscale))

    return (best_score_bbox, best_score_img)
if __name__ == '__main__':
    if(len(sys.argv) != 2):
        logger.critical("请输入测试图片路径!")
        sys.exit()
    else:
        image_path = sys.argv[1]
        # 全局模型
        overallModel_file = config["model"]["overallModel_file"]
        with open(overallModel_file, 'rb') as fid:
            classifier1 = cPickle.load(fid)
            # logger.info("overall model imported successfully.")
        # 局部模型
        localModel_file = config["model"]["localModel_file"]
        with open(localModel_file, 'rb') as fid:
            classifier2 = cPickle.load(fid)
            # logger.info("overall model imported successfully.")

    image = cv2.imread(image_path)
    pred = classifier1.predict_proba(process_image(
        image, feature.getFeature))[0]
    # threshold_window = 1 - min(0.5, pred[1]) * 5 / 7.0  # 窗口上的阈值取决于整体上的概率
    threshold_window = 0.6
    result = list()
    for (x, y, window) in sliding_window(image, stepSize, (winW, winH)):
        prediction = classifier2.predict_proba(
            process_image(window, feature.getFeature))[0]
        if(prediction[1] >= threshold_window):
            result.append([x, y, x+winW, y+winH])
    print result