def blurrImage(nameImage, name, plot=False, path=None):
    print("Blurring image...")
    image = cv2.imread(nameImage)
    if image.shape[2] == 3:
        # print("COLOR")
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    else:
        gray = image
    kernel = (7, 7)
    cleaned = cv2.GaussianBlur(gray, kernel, 0)

    if path is not None:
        new_path = altsep.join((path, "Blurred"))
        if not exists(new_path):
            tl.makeDir(new_path)
        dst = altsep.join((new_path, (name + ".png")))
        img_color = cv2.cvtColor(cleaned, cv2.COLOR_GRAY2BGR)
        cv2.imwrite(dst, img_color)

    if plot:
        cv2.imshow("Original '{}'".format(name), image)
        cv2.waitKey(2000)
        cv2.destroyAllWindows()

        cv2.imshow("Cleaned '{}'".format(name), cleaned)
        cv2.waitKey(2000)
        cv2.destroyAllWindows()
    return cleaned
def cleanImage(bin_img, name, plot=False, path=None):
    print("Cleaning image...")
    n = 3
    n_2 = 3
    iter = 2  #3
    iter_2 = 2
    kernel = (n, n)
    kernel2 = (n_2, n_2)
    cleaned = bin_img.copy()

    # cleaned = cv2.dilate(cleaned, kernel= kernel2, iterations= iter_2)
    cleaned = cv2.morphologyEx(cleaned,
                               cv2.MORPH_CLOSE,
                               kernel=kernel,
                               iterations=iter)
    cleaned = cv2.morphologyEx(cleaned,
                               cv2.MORPH_OPEN,
                               kernel=kernel,
                               iterations=iter)
    cleaned = cv2.erode(cleaned, kernel=kernel2, iterations=iter_2)

    if path is not None:
        new_path = altsep.join((path, "Cleaned"))
        if not exists(new_path):
            tl.makeDir(new_path)
        dst = altsep.join((new_path, (name + ".png")))
        cv2.imwrite(dst, cleaned)

    if plot:
        cv2.imshow("Cleaned '{}'".format(name), cleaned)
        cv2.waitKey(2000)
        cv2.destroyAllWindows()

    return cleaned
def zhangSuen(image, name, plot=False, path=None):
    "the Zhang-Suen Thinning Algorithm"
    Image_Thinned = image.copy()  # deepcopy to protect the original image
    changing1 = changing2 = 1  #  the points to be removed (set as 0)
    while changing1 or changing2:  #  iterates until no further changes occur in the image
        # Step 1
        changing1 = []
        rows, columns = Image_Thinned.shape  # x for rows, y for columns
        for x in range(1, rows - 1):  # No. of  rows
            for y in range(1, columns - 1):  # No. of columns
                P2, P3, P4, P5, P6, P7, P8, P9 = n = neighbours(
                    x, y, Image_Thinned)
                if (Image_Thinned[x][y] == 1
                        and  # Condition 0: Point P1 in the object regions
                        2 <= sum(n) <= 6 and  # Condition 1: 2<= N(P1) <= 6
                        transitions(n) == 1 and  # Condition 2: S(P1)=1
                        P2 * P4 * P6 == 0 and  # Condition 3
                        P4 * P6 * P8 == 0):  # Condition 4
                    changing1.append((x, y))
        for x, y in changing1:
            Image_Thinned[x][y] = 0
        # Step 2
        changing2 = []
        for x in range(1, rows - 1):
            for y in range(1, columns - 1):
                P2, P3, P4, P5, P6, P7, P8, P9 = n = neighbours(
                    x, y, Image_Thinned)
                if (Image_Thinned[x][y] == 1 and  # Condition 0
                        2 <= sum(n) <= 6 and  # Condition 1
                        transitions(n) == 1 and  # Condition 2
                        P2 * P4 * P8 == 0 and  # Condition 3
                        P2 * P6 * P8 == 0):  # Condition 4
                    changing2.append((x, y))
        for x, y in changing2:
            Image_Thinned[x][y] = 0

    if path is not None:
        new_path = altsep.join((path, "ZhangSuen"))
        if not exists(new_path):
            tl.makeDir(new_path)
        dst = altsep.join((new_path, (name + ".png")))
        cv2.imwrite(dst, Image_Thinned)

    if plot:
        cv2.imshow("Thinning '{}'".format(name), Image_Thinned)
        cv2.waitKey(1000)
        cv2.destroyAllWindows()
    return Image_Thinned
Example #4
0
def extract(video_in, out_path, face_path, pad):
    '''
    This function detects and draws the eyes and faces that appears in the video following the
    default scheme face_cascade and eye_cascade
    :param video_in: full or relative path for the video file
    :param video_out: path where the video resultant will be stored
    :param face_path: path where is the XML schema for faces
    :return: none
    '''

    face_cascade = cv2.CascadeClassifier(face_path)
    img_ext = '.png'
    video_name = splitext(basename(video_in))[0]

    '''
    cv2.CAP_ANY
    cv2.CAP_VFW
    cv2.CAP_QT
    cv2.CAP_DSHOW
    cv2.CAP_MSMF 
    cv2.CAP_WINRT
    cv2.CAP_FFMPEG
    cv2.CAP_PROP_FOURCC
    '''
    video = cv2.VideoCapture(video_in, cv2.CAP_FFMPEG)
    # video = cv2.VideoCapture(video_in, 6)

    num = 0
    while (video.isOpened()):
        ret, frame = video.read()

        if ret:
            faces = face_cascade.detectMultiScale(frame,
                                                  scaleFactor=1.3,
                                                  minNeighbors=5,
                                                  minSize=(30, 30),
                                                  flags=cv2.CASCADE_SCALE_IMAGE)

            for (x, y, w, h) in faces:
                # Add padding if it exists to rectangle and ROI
                roi_color = frame[-int(pad/2)+y : y + h + int(pad/2), -int(pad/2) + x : x + w + int(pad/2)]
                # cv2.rectangle(frame, (x, y), (-int(pad/2) + x + w + int(pad/2), -int(pad/2) + y + h + int(pad/2)), (255, 255, 0), 1)  # BGR
                # cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 255, 0), 2)  # BGR
                cv2.imwrite(altsep.join((out_path, video_name+'_'+str(num)+'_'+str(int(pad/2))+img_ext)), roi_color)
            # cv2.imshow(video_name, frame)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        else:
            break
        num += 1
    # num_ext = len([name for name in listdir(out_path) if isfile(join(out_path, name))])
    print("Extracted {} images from '{}'".format(num, video_name))

    video.release()
    cv2.destroyAllWindows()

    return num
def image_enhance(gray, name, plot=False, path=None):
    print("Enhancing ridges...")
    blksze = 16
    thresh = 0.1
    print(type(gray))
    normim, mask = ridge_segment(gray, blksze,
                                 thresh)  # normalise the image and find a ROI

    gradientsigma = 1
    blocksigma = 7
    orientsmoothsigma = 7
    orientim = ridge_orient(
        normim, gradientsigma, blocksigma,
        orientsmoothsigma)  # find orientation of every pixel

    blksze = 38
    windsze = 5
    minWaveLength = 5
    maxWaveLength = 15
    freq, medfreq = ridge_freq(
        normim, mask, orientim, blksze, windsze, minWaveLength,
        maxWaveLength)  #find the overall frequency of ridges

    freq = medfreq * mask
    kx = 0.65
    ky = 0.65
    newim = ridge_filter(normim, orientim, freq, kx,
                         ky)  # create gabor filter and do the actual filtering

    img_enhanced = (newim < -3).astype(float)

    if path is not None:
        new_path = altsep.join((path, "Enhanced"))
        if not exists(new_path):
            tl.makeDir(new_path)
        dst = altsep.join((new_path, (name + ".png")))
        img_color = cv2.cvtColor(img_enhanced, cv2.COLOR_GRAY2BGR)
        cv2.imwrite(dst, img_color)

    if plot:
        cv2.imshow("Enhanced '{}'".format(name), img_enhanced)
        cv2.waitKey(2000)
        cv2.destroyAllWindows()

    return img_enhanced
def process(skeleton, name, label):
    print("Minutiae extraction...")
    # img = cv2.cvtColor(skeleton, cv2.COLOR_GRAY2BGR)
    (h, w) = skeleton.shape[:2]

    temp = []
    temps = np.zeros(3)
    data = np.zeros(shape=(h, w))

    # if path is not None:
    filename = 'minutiae_' + name + "_" + str(label) + '.csv'
    full_name = altsep.join((".\Result", filename))
    file = open(full_name, 'w')
    # file.write('# (x, y) position of minutiae and type as class (0: termination, 1: bifurcation)\n')
    file.write("Posisi X" + ";" + "Posisi Y" + ";" + "Type Minutiae" + ";" +
               "Nama Class" + '\n')
    for i in range(h):
        for j in range(w):
            if skeleton[i, j] == 255:
                # En caso de valer 255 se analizan sus vecinos,
                # para saber si se trata de una terminación o de una bifurcación

                window = skeleton[i - 1:i + 2, j - 1:j + 2]
                neighbours = sum(window.ravel()) // 255

                if neighbours == 2:
                    temps[0] = i
                    temps[1] = j
                    temps[2] = 0

                    temp.append(temps)
                    # if path is not None:
                    file.write(
                        str(i) + ";" + str(j) + ";" + str(0) + ";" +
                        str(label) + "\n")
                    # cv2.circle(img, (j, i), 1, (0, 255, 0), 1)

                if neighbours > 3:
                    temps[0] = i
                    temps[1] = j
                    temps[2] = 1
                    file.write(
                        str(i) + ";" + str(j) + ";" + str(1) + ";" +
                        str(label) + "\n")

                    temp.append(temps)
                    # if path is not None:
                    # file.write(str(i) + ',' + str(j) + ',1\n')
                    # cv2.circle(img, (j, i), 1, (255, 0, 0), 1)

    # print(data)
    # if plot:
    #     cv2.imshow("Minutiae '{}'".format(name), img)
    #     cv2.waitKey(2000)
    #     cv2.destroyAllWindows()
    return temp
Example #7
0
def getSequences(path):
    '''
    Auxiliary function that extracts folder names from a given path
    :param path: source path
    :return: array with folder names
    '''
    sequences = [altsep.join((path, f)) for f in listdir(path)
                 if isdir(join(path, f)) and basename(join(path, f)) != 'Results']

    return sequences
def thinImage(image, name, plot=False, path=None):
    print("Thinning image...")
    image *= 255
    image8 = np.uint8(image)
    thinned = thinning(image8.astype(np.uint8))

    if path is not None:
        new_path = altsep.join((path, "Thinned"))
        if not exists(new_path):
            tl.makeDir(new_path)
        dst = altsep.join((new_path, (name + ".png")))
        cv2.imwrite(dst, thinned)

    if plot:
        cv2.imshow("Thinning '{}'".format(name), thinned)
        cv2.waitKey(2000)
        cv2.destroyAllWindows()

    return thinned
Example #9
0
def getSamples(path):
    '''
    This function returns a list of sample names
    :param path: source path
    :return:  list of full path of the samples
    '''
    samples = [
        altsep.join((path, f)) for f in listdir(path) if isfile(join(path, f))
    ]

    return samples
Example #10
0
def getSamples(path, ext=''):
    '''
    Auxiliary function that extracts file names from a given path based on extension
    :param path: source path
    :param ext: file extension
    :return: array with samples
    '''
    samples = [
        altsep.join((path, f)) for f in listdir(path)
        if isfile(join(path, f)) and f.endswith(ext)
    ]

    if len(samples) == 0:
        print("ERROR!!! ARRAY OF SAMPLES IS EMPTY (check file extension)")

    return samples
Example #11
0
def plot_confusion_matrix(plot_path,
                          cm,
                          classes,
                          normalize=False,
                          title='Confusion matrix',
                          cmap=plt.cm.Blues):
    """
    This function prints and plots the confusion matrix.
    Normalization can be applied by setting `normalize=True`.
    """
    plt_name = altsep.join((plot_path, "".join((title, ".png"))))
    if normalize:
        cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
        print("Normalized confusion matrix")
    else:
        print('Confusion matrix, without normalization')

    print(cm)

    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(classes))
    plt.xticks(tick_marks, classes, rotation=45)
    plt.yticks(tick_marks, classes)

    fmt = '.2f' if normalize else 'd'
    thresh = cm.max() / 2.
    for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
        plt.text(j,
                 i,
                 format(cm[i, j], fmt),
                 horizontalalignment="center",
                 color="white" if cm[i, j] > thresh else "black")

    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label', labelpad=0)

    plt.savefig(plt_name)
    plt.show()
def process(skeleton, name, plot=False, path=None):
    print("Minutiae extraction...")
    img = cv2.cvtColor(skeleton, cv2.COLOR_GRAY2BGR)
    (h, w) = skeleton.shape[:2]
    if path is not None:
        filename = 'minutiae_' + name + '.txt'
        full_name = altsep.join((path, filename))
        file = open(full_name, 'w')
        file.write(
            '# (x, y) position of minutiae and type as class (0: termination, 1: bifurcation)\n'
        )
    for i in range(h):
        for j in range(w):
            if skeleton[i, j] == 255:
                # En caso de valer 255 se analizan sus vecinos,
                # para saber si se trata de una terminación o de una bifurcación

                window = skeleton[i - 1:i + 2, j - 1:j + 2]
                neighbours = sum(window.ravel()) // 255

                if neighbours == 2:
                    # En caso de que los vecinos sean igual a 2 (Contando el mismo píxel que se analiza)
                    # se trataría de una terminación y esta se almacenaría en el archivo fichero,
                    # también se dibuja en la imágenes un circulo de color verde.
                    if path is not None:
                        file.write(str(i) + ',' + str(j) + ',0\n')
                    cv2.circle(img, (j, i), 1, (0, 255, 0), 1)

                if neighbours > 3:
                    # En caso de que los vecinos sean mayores a 3 (Contando el mismo píxel que se analiza)
                    # se trataría de una bifurcación y esta se almacenaría en el archivo fichero,
                    # también se dibuja en la imágenes un circulo de color rojo.
                    if path is not None:
                        file.write(str(i) + ',' + str(j) + ',1\n')
                    cv2.circle(img, (j, i), 1, (255, 0, 0), 1)
    if plot:
        cv2.imshow("Minutiae '{}'".format(name), img)
        cv2.waitKey(2000)
        cv2.destroyAllWindows()
def process(skeleton, name, plot= False, path= None):
    print("Minutiae extraction...")
    img = cv2.cvtColor(skeleton, cv2.COLOR_GRAY2BGR)
    (h,w) = skeleton.shape[:2]
    if path is not None:
        filename = name+'.txt'
        full_name = altsep.join((path, filename))
        file = open(full_name, 'w')
        #file.write('# (x, y) position of minutiae and type as class (0: termination, 1: bifurcation)\n')

    for i in range(h):
        for j in range(w):
            if skeleton[i, j] == 255:
                # En caso de valer 255 se analizan sus vecinos,
                # para saber si se trata de una terminación o de una bifurcación
                
                window = skeleton[i - 1:i + 2, j - 1:j + 2]
                neighbours = sum(window.ravel()) // 255
                left = skeleton[i-8:i+9, j-40:j]
                right = skeleton[i-8:i+9, j+1:j+40]
                left_neighbours = sum(left.ravel()) // 255
                right_neighbours = sum(right.ravel()) // 255
                
                if neighbours != 2 and neighbours < 4:
                    continue
                
                isolate = 0
                for k in range(1, 10):
                    x = 0
                    xleft = max({i-k, 0})
                    xright = min({i+k+1, h-1})
                    yleft = max({j-k, 0})
                    yright = min({j+k+1, w-1})
                    x = sum(skeleton[xleft, yleft:yright]) + sum(skeleton[xright, yleft:yright])
                    x += sum(skeleton[xleft:xright, yleft]) + sum(skeleton[xleft:xright, yright])
                    if x == 0:
                        isolate = 1
                        break
                
                if isolate:
                    continue
                    
                if neighbours == 2 and left_neighbours>30 and right_neighbours>30:
                    # En caso de que los vecinos sean igual a 2 (Contando el mismo píxel que se analiza)
                    # se trataría de una terminación y esta se almacenaría en el archivo fichero,
                    # también se dibuja en la imágenes un circulo de color verde.
                    if path is not None:
                        #.write(str(i) + ',' + str(j) + ',0\n')
                        file.write(str(i) + ',' + str(j) + '\n')
                    cv2.circle(img, (j, i), 1, (0, 255, 0), 1)

                if (neighbours == 4 and is_ok_4(window)) or (neighbours > 4 and is_ok_5(window)):
                    # En caso de que los vecinos sean mayores a 3 (Contando el mismo píxel que se analiza)
                    # se trataría de una bifurcación y esta se almacenaría en el archivo fichero,
                    # también se dibuja en la imágenes un circulo de color rojo.
                    if path is not None:
                        #file.write(str(i) + ',' + str(j) + ',1\n')
                        file.write(str(i) + ',' + str(j) + '\n')
                    cv2.circle(img, (j, i), 1, (255, 0, 0), 1)
                
    if plot:
        img2 = cv2.resize(img, (img.shape[1] * 2, img.shape[0] * 2))
        cv2.imshow("Minutiae '{}'".format(name), img2)
        cv2.waitKey(10000)
        cv2.destroyAllWindows()
Example #14
0
    conf_seq = {0: '.gif', 1: '.gif', 2: '.jpg', 3: '.ppm'}

    # For Horn&Schonck algorithm
    n_iter = 50  # 10 - 100
    lam_pond = 1  # 0.1 - 60

    ext = conf_seq[seq_idx]
    window = [5, 9, 15]
    n = 5
    k_gauss = (n, n)
    plt_step = 3

    # Get the frames of the sequence and make results folder
    seq_selected = sequences[seq_idx]
    name_sequence = basename(seq_selected)
    results_path = altsep.join((args["results"], name_sequence))

    if not exists(results_path) and save:
        tl.makeDir(results_path)

    frames = tl.natSort(tl.getSamples(seq_selected, ext))
    # print(frames)

    for win in window:
        start = time.time()

        # Compute the optical flow using all frames of the sequence and measure times
        for fr_idx in range(len(frames) - 1):
            if ext == '.gif':
                img1 = Image.open(frames[fr_idx])
                img1_rgb = img1.convert('RGB')
Example #15
0
def getSamples(path, ext=''):
    samples = [
        altsep.join((path, f)) for f in listdir(path)
        if isfile(join(path, f)) and f.endswith(ext)
    ]
    return samples
Example #16
0
def prepareData(paths, descriptor, plot_path, ratio=1.0):
    labels = []
    data = []
    numData = int(countFiles(paths) * ratio)
    first_r = True
    first_a = True
    numLabels = {}

    if ratio != 1.0:
        print("{} examples will be processed for TESTING with ratio = {}\n".
              format(numData, ratio))
        for path in paths:
            images = getSamples(path)
            # Randomize data for testing and get the first numData files
            images = np.random.permutation(images)[:numData]

            for image in images:
                img = cv2.imread(image)
                gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                hist, bins, lbp_img = descriptor.describe(gray)

                # extract the label from the image path, then update the
                # label and data lists
                lbl = image.split("/")[-2]
                labels.append(lbl)
                data.append(hist)

                if first_r and lbl == "real" or first_a and lbl == "attack":
                    plt_name = altsep.join((plot_path, "".join(
                        (lbl,
                         "_example_{}_{}.png".format(descriptor.numPoints,
                                                     descriptor.radius)))))
                    fig = plt.figure()
                    plt.subplot(131), plt.imshow(gray, cmap='gray')
                    plt.axis('off'), plt.title("Image original (gray)")

                    plt.subplot(132), plt.imshow(lbp_img, cmap='gray')
                    plt.axis('off'), plt.title("LBP")

                    plt.subplot(133), plt.hist(lbp_img.ravel(),
                                               int(lbp_img.max() + 2),
                                               normed=1,
                                               color='red')
                    # plt.title("LBP histogram")

                    fig.suptitle("{} image using LBP ({},{})".format(
                        lbl, descriptor.numPoints, descriptor.radius),
                                 fontsize=14)

                    plt.tight_layout()
                    plt.savefig(plt_name)
                    plt.show()

                    if first_r and lbl == "real":
                        first_r = False
                    elif first_a and lbl == "attack":
                        first_a = False

        numLabels = Counter(labels)
        # print(numLabels.keys())
        print("Data array contains {} items ({} real and {} attack)\n"
              "Labels array contains {} items\n".format(
                  len(data), numLabels['real'], numLabels['attack'],
                  len(labels)))
        # input("Press Enter to continue...")
    else:
        print("{} examples will be processed for TRAINING with ratio = {}\n".
              format(numData, ratio))
        for path in paths:
            images = getSamples(path)

            for image in images:
                img = cv2.imread(image)
                gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                hist, bins, lbp_img = descriptor.describe(gray)

                # extract the label from the image path, then update the
                # label and data lists
                lbl = image.split("/")[-2]
                labels.append(lbl)
                data.append(hist)

                if first_r and lbl == "real" or first_a and lbl == "attack":
                    plt_name = altsep.join((plot_path, "".join(
                        (lbl,
                         "_example_{}_{}.png".format(descriptor.numPoints,
                                                     descriptor.radius)))))
                    fig = plt.figure()
                    plt.subplot(131), plt.imshow(gray, cmap='gray')
                    plt.axis('off'), plt.title("Image original (gray)")

                    plt.subplot(132), plt.imshow(lbp_img, cmap='gray')
                    plt.axis('off'), plt.title("LBP")

                    plt.subplot(133), plt.hist(lbp_img.ravel(),
                                               int(lbp_img.max() + 2),
                                               normed=1,
                                               color='red')
                    # plt.title("LBP histogram")

                    fig.suptitle("{} image using LBP ({},{})".format(
                        lbl, descriptor.numPoints, descriptor.radius),
                                 fontsize=14)

                    plt.tight_layout()
                    plt.savefig(plt_name)
                    plt.show()

                    if first_r and lbl == "real":
                        first_r = False
                    elif first_a and lbl == "attack":
                        first_a = False
        numLabels = Counter(labels)
        # print(numLabels.keys())
        print("Data array contains {} items ({} real and {} attack)\n"
              "Labels array contains {} items\n".format(
                  len(data), numLabels['real'], numLabels['attack'],
                  len(labels)))
        # input("Press Enter to continue...")
    return data, labels
Example #17
0
def getFiles(path):
    samples = [altsep.join((path, f)) for f in listdir(path)
              if isfile(join(path, f)) and f.endswith('.txt')]
    return samples