def segment_signal(signal, numScales, deltaTau=None, kMin=None, linkType=None):
	if deltaTau is None: deltaTau = DELTA_TAU
	if kMin is None: kMin = pow((1 - exp(-2 * deltaTau)), -0.5)

	signal = asarray(signal).flatten()
	signalLength, maxDiff, numLeadingZeros, numTrailingZeros = _signal_properties(signal)

	# Calculate scale sigma's
	scaleSigmaArray = exp(arange(0, numScales) * deltaTau)

	# Setup initial node set
	nodeIds = nonzero(diff(signal, axis=0))[0] + 1
	if nodeIds[-1] != signalLength: 
		nodeIds = append(nodeIds, signalLength)
	
	# Loop!
	baseSignal = asarray(signal, dtype=np_double)
	prevScaleSignal, prevSegmentEnds = baseSignal, nodeIds
	nodeMapping, segmentEnds  = {}, { 0: nodeIds }
	for scaleIndex in range(1, numScales):
		scaleSignal = convolve(baseSignal, scaleSigmaArray[scaleIndex])

		d, dcp, r = _search_volume(scaleSigmaArray, scaleIndex, kMin)
		
		parents, scaleSegmentEnds = link(nodeIds, d, dcp, r, prevScaleSignal, scaleSignal, signalLength, \
										 maxDiff, prevSegmentEnds, numLeadingZeros, numTrailingZeros, linkType=linkType)	

		nodeMapping[scaleIndex] = zip(nodeIds, parents)
		segmentEnds[scaleIndex] = scaleSegmentEnds

		prevScaleSignal, prevSegmentEnds, nodeIds = scaleSignal, scaleSegmentEnds, unique(parents)

	return nodeMapping, segmentEnds
Пример #2
0
def segment_signal(signal, numScales, deltaTau=None, kMin=None, linkType=None):
    if deltaTau is None: deltaTau = DELTA_TAU
    if kMin is None: kMin = pow((1 - exp(-2 * deltaTau)), -0.5)

    signal = asarray(signal).flatten()
    signalLength, maxDiff, numLeadingZeros, numTrailingZeros = _signal_properties(
        signal)

    # Calculate scale sigma's
    scaleSigmaArray = exp(arange(0, numScales) * deltaTau)

    # Setup initial node set
    nodeIds = nonzero(diff(signal, axis=0))[0] + 1
    if nodeIds[-1] != signalLength:
        nodeIds = append(nodeIds, signalLength)

    # Loop!
    baseSignal = asarray(signal, dtype=np_double)
    prevScaleSignal, prevSegmentEnds = baseSignal, nodeIds
    nodeMapping, segmentEnds = {}, {0: nodeIds}
    for scaleIndex in range(1, numScales):
        scaleSignal = convolve(baseSignal, scaleSigmaArray[scaleIndex])

        d, dcp, r = _search_volume(scaleSigmaArray, scaleIndex, kMin)

        parents, scaleSegmentEnds = link(nodeIds, d, dcp, r, prevScaleSignal, scaleSignal, signalLength, \
                 maxDiff, prevSegmentEnds, numLeadingZeros, numTrailingZeros, linkType=linkType)

        nodeMapping[scaleIndex] = zip(nodeIds, parents)
        segmentEnds[scaleIndex] = scaleSegmentEnds

        prevScaleSignal, prevSegmentEnds, nodeIds = scaleSignal, scaleSegmentEnds, unique(
            parents)

    return nodeMapping, segmentEnds
Пример #3
0
def accuracy(nt_mean, nt_var, nt_count, t_mean, t_var, repetitions):
    nt_mean *= repetitions
    nt_var *= repetitions
    nt_std = np.sqrt(nt_var)
    t_mean *= repetitions
    t_var *= repetitions
    t_std = np.sqrt(t_var)
    return convolve(
        lambda t: max_gauss_cdf(nt_mean - t_mean, nt_std, nt_count, t),
        lambda t: max_gauss_pdf(0, t_std, 1, t),
        0
    )
Пример #4
0
def pyrContract(current_img):
    # Convolves input with the gaussian mask
    blur_img = cv.convolve(cp.copy(current_img), mask.g_3)

    height = math.floor(current_img.shape[0] / 2)
    width = math.floor(current_img.shape[1] / 2)
    channel = current_img.shape[2]

    aux = np.zeros((height, width, channel), np.uint8)

    for i in range(height):
        for j in range(width):
            for k in range(channel):
                # Skip every other pixel of input
                aux.itemset((i, j, k), blur_img.item(i * 2, j * 2, k))

    return aux
Пример #5
0
def segment_signal(signal, numScales, deltaTau=None, kMin=None, linkType=None, doNodeMapping=False, minNodeInterval=None):
	assert type(signal) == list or (type(signal) == ndarray and signal.ndim == 1)

	if deltaTau is None: deltaTau = DELTA_TAU
	if kMin is None: kMin = pow((1 - exp(-2 * deltaTau)), -0.5)

	if type(signal) == list: signal = asarray(signal).flatten()
	signalLength, maxDiff, numLeadingZeros, numTrailingZeros = _signal_properties(signal)

	# Calculate scale sigma's
	scaleSigmaArray = exp(arange(0, numScales) * deltaTau)

	# Setup initial node set
	nodeIds = _initial_node_set(signal, signalLength, minNodeInterval)
	
	# Setup initial loop vars
	segmentEnds  = { 0: nodeIds }
	if doNodeMapping: nodeMapping = {}

	# Loop!
	baseSignal = asarray(signal, dtype=np_double)
	prevScaleSignal, prevSegmentEnds = baseSignal, nodeIds
	for scaleIndex in range(1, numScales):
		scaleSignal = convolve(baseSignal, scaleSigmaArray[scaleIndex])

		d, dcp, r = _search_volume(scaleSigmaArray, scaleIndex, kMin)
		
		parents, scaleSegmentEnds = link(nodeIds, d, dcp, r, prevScaleSignal, scaleSignal, signalLength, \
										 maxDiff, prevSegmentEnds, numLeadingZeros, numTrailingZeros, linkType=linkType)	

		
		if doNodeMapping: nodeMapping[scaleIndex] = zip(nodeIds, parents)
		segmentEnds[scaleIndex] = scaleSegmentEnds

		prevScaleSignal, prevSegmentEnds, nodeIds = scaleSignal, scaleSegmentEnds, unique(parents)

	return (segmentEnds, nodeMapping) if doNodeMapping else segmentEnds
Пример #6
0
 def feed_forward(self, X):
     self.X = X
     out = convolve(
         X, self.kernel, padding=self.padding,
         stride=self.stride) + self.bias
     return out
Пример #7
0
import cv2
import numpy as np
from matplotlib import pyplot as plt
from convolution import convolve

img = cv2.imread('My Images/github.jpeg')
#blur = cv2.blur(img,(5,5))
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

kernel = np.ones((5,5), np.float32)/25
dst = convolve(img, kernel)

plt.subplot(121),plt.imshow(img),plt.title('Original')
plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(dst),plt.title('Blurred')
plt.xticks([]), plt.yticks([])
plt.show()
Пример #8
0
import matplotlib.pyplot as plt
from stimuli import events2neural
from convolution import convolve

TR = 2
n_vols = 240
duration = 3/TR

all_tr_times = np.arange(240)*2

neural1 = events2neural(".././cond001.txt", TR, n_vols)
neural2 = events2neural(".././cond002.txt", TR, n_vols)
neural3 = events2neural(".././cond003.txt", TR, n_vols)
neural4 = events2neural(".././cond004.txt", TR, n_vols)

convolved1 = convolve(neural1, TR, n_vols, duration)
np.savetxt("conv001.txt", convolved1)
convolved2 = convolve(neural2, TR, n_vols, duration)
np.savetxt("conv002.txt", convolved2)
convolved3 = convolve(neural3, TR, n_vols, duration)
np.savetxt("conv003.txt", convolved3)
convolved4 = convolve(neural4, TR, n_vols, duration)
np.savetxt("conv004.txt", convolved4)

plt.subplot(221)
plt.plot(all_tr_times, convolved1)
plt.plot(all_tr_times, neural1)
plt.title("Condition 1")

plt.subplot(222)
plt.plot(all_tr_times, convolved2)
Пример #9
0
width, height = image.size
totalPixels = width * height

a = [[1, 1, 2, 2, 2, 1, 1],
     [1, 2, 2, 4, 2, 2, 1],
     [2, 2, 4, 8, 4, 2, 2],
     [2, 4, 8, 16, 8, 4, 2],
     [2, 2, 4, 8, 4, 2, 2],
     [1, 2, 2, 4, 2, 2, 1],
     [1, 2, 2, 4, 2, 2, 1],
     [1, 1, 2, 2, 2, 1, 1]]


print(a)
smoothingMask = np.array(a,np.float)
print(smoothingMask)
for i, j in itertools.product(range(smoothingMask.shape[0]), range(smoothingMask.shape[1])):
  smoothingMask[i,j] /= smoothingMask.shape[0] *smoothingMask.shape[1]



pixels = image.load()
convolution.convolve(pixels, image.size, smoothingMask, False)


outDir = sys.argv[2] + \
    '/gaussian/{inputFile}/'.format(inputFile=os.path.basename(sys.argv[1]))
if not os.path.exists(outDir):
    os.makedirs(outDir)
image.save(outDir+'_gaussian.jpg')
Пример #10
0
import cv2 as cv
import numpy as np
from convolution import convolve


src = cv.imread("Lena.png", cv.IMREAD_GRAYSCALE)
print(src.dtype)
kernel = np.array([[0, 1, 0], [1, -4, 1], [0, 1, 0]])
Wx = np.array([[0.25, 0, -0.25], [0.5, 0, -0.5], [0.25, 0, -0.25]])
Wy = np.array([[-0.25, -0.5, -0.25], [0, 0, 0], [0.25, 0.5, 0.25]])
dstImg = None
convolve(src, dstImg, kernel)

# cv.imshow("Source", src)
cv.imshow("Destination", dstImg)
cv.waitKey(0)
cv.destroyAllWindows()

# image = cv.imread("D:\dreams.jpg", cv.IMREAD_GRAYSCALE)
# # # image = np.ones((5, 7), np.uint8)
# # image = np.array([[35,22,73,44,5,6,7], [122,120,84,8,9,12,9], [21,12,53,4,5,6,7], [2,0,4,8,9,12,9], [1,2,3,4,5,6,7]])
# # height, width= image.shape
# # print(height, width)

# # square = np.zeros((height, width), np.uint8)
# # square[0] = image[0]
# # square[height-1] = image[height-1]
# # square[:height, 0] = image[:height, 0]
# # square[:height, width-1] = image[:height, width-1]

# Wx = np.array([[0.25, 0, -0.25], [0.5, 0, -0.5], [0.25, 0, -0.25]])
Пример #11
0
import cv2
import numpy as np
from matplotlib import pyplot as plt
from convolution import convolve

img = cv2.imread('My Images/github.jpeg')
#blur = cv2.blur(img,(5,5))
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

kernel = np.ones((5, 5), np.float32) / 25
dst = convolve(img, kernel)

plt.subplot(121), plt.imshow(img), plt.title('Original')
plt.xticks([]), plt.yticks([])
plt.subplot(122), plt.imshow(dst), plt.title('Blurred')
plt.xticks([]), plt.yticks([])
plt.show()
Пример #12
0
import convolution as cv
import gaussian_pyramid as gPyr
import place_pyramid as pPyr
import blending as bl
import fourier as ft

# Python uses the BGR color scheme
input = cv2.imread('input/p1-1-0.png')

# 2.1

filter = [mask.g_3, mask.g_7, mask.g_15]

for i in range(len(filter)):
    time = ut.time()
    output = cv.convolve(cp.copy(input), filter[i])
    print("Convolution time[", i, "]: ", time.elapsed())
    cv2.imwrite('output/p1-2-1-{}.png'.format(i), output)

    time = ut.time()
    output = cv2.filter2D(cp.copy(input), -1, np.flip(np.flip(filter[i], 0),
                                                      1))
    print("OpenCV Convolution time[", i, "]: ", time.elapsed())
    print("")

# 2.2

output = gPyr.gaussianPyramid(cp.copy(input), 3)

for i in range(len(output)):
    cv2.imwrite('output/p1-2-2-{}.png'.format(i), output[i])
Пример #13
0
from PIL import Image
import matplotlib.pyplot as plt
import imageUtils as imgutil
import convolution
import numpy as np

img = Image.open('linia.png').convert('L')
binimg = imgutil.binarize(img, 128)
#mask = np.matrix([[1, 2, 1],[2,4,2],[1,2,1]], dtype=np.uint8)
mask = np.matrix([[1, 2, 4, 2, 1], [2, 4, 8, 4, 2], [1, 2, 4, 2, 1]],
                 dtype=np.uint8)
convimg = convolution.convolve(binimg, mask)
imgplot = imgutil.imshow_gray(imgutil.invert(convimg))
plt.show()
Пример #14
0
from skimage import io, color
import numpy as np
import matplotlib.pyplot as plt
from skimage import exposure
from skimage.filters import threshold_otsu

from convolution import convo as convolve

img = io.imread('data.jpg')  # Load the image
img = color.rgb2gray(img)

kernel = np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]])
thresh = threshold_otsu(img)
binary = img > thresh
edges = convolve(binary, kernel)

plt.imshow(edges, cmap=plt.cm.gray)
plt.axis('off')
plt.show()