示例#1
0
文件: views.py 项目: krishna23444/sbi
def testing(request):
    dists = []
    user1_sigs = [cv2.imread('temp_data/m%d-min.jpg' % i, 0) for i in [1, 2]]
    user2_sigs = [cv2.imread('temp_data/s%d-min.jpg' % i, 0) for i in [2, 3]]
    canvas_size = (952, 1360)
    processed_user1_sigs = np.array(
        [preprocess_signature(sig, canvas_size) for sig in user1_sigs])
    processed_user2_sigs = np.array(
        [preprocess_signature(sig, canvas_size) for sig in user2_sigs])
    user1_features = model.get_feature_vector_multiple(processed_user1_sigs,
                                                       layer='fc2')
    user2_features = model.get_feature_vector_multiple(processed_user2_sigs,
                                                       layer='fc2')
    diffSameUser = str(100.00 -
                       np.linalg.norm(user1_features[0] - user1_features[1]))
    diffFraud = str(100.00 -
                    np.linalg.norm(user1_features[0] - user2_features[0]))
    diffDifferentUser = str(100.00 - np.linalg.norm(user1_features[0] -
                                                    user2_features[1]))
    print(np.linalg.norm(user1_features[0] - user1_features[1]))
    print(np.linalg.norm(user1_features[0] - user2_features[0]))
    print(np.linalg.norm(user1_features[0] - user2_features[1]))
    for u2 in user2_features:
        for u1 in user1_features:
            dists.append(np.linalg.norm(u2 - u1))
    # dists = [np.linalg.norm(u1 - u2) for u1 in user1_features for u2 in user2_features]
    print(dists)
    return render(
        request, 'test.html', {
            'sameUser': diffSameUser,
            'fraud': diffFraud,
            'differentUser': diffDifferentUser
        })
示例#2
0
文件: augment.py 项目: tupm2208/sig
def cut_block(img_path, canvas_size=(952, 1360)):
    original = imread(img_path, flatten=1)
    processed = preprocess_signature(original, canvas_size)
    threshold, binarized_image = cv2.threshold(
        processed, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
    r, c = np.where(binarized_image > 0)

    h, w = processed.shape
    kernel_width = random.randint(35, 40)
    half = kernel_width // 2

    i = 0
    while i != 1:
        num = random.randrange(0, len(r), half)
        if r[num] - half >= 0 and r[num] + half <= w and c[
                num] - half >= 0 and c[num] + half <= h:
            i += 1
            processed[r[num] - half:r[num] + half,
                      c[num] - half:c[num] + half] = 0

    padding_folder = os.path.join(Path(img_path).parent, 'cut')

    os.makedirs(padding_folder, exist_ok=True)
    cv2.imwrite(os.path.join(padding_folder, Path(img_path).name), processed)
    # cv2.imshow('processed', processed)
    # # cv2.imshow('threshold', threshold)
    # cv2.waitKey(0)
    # cv2.destroyAllWindows()
    return processed
def get_features(img):

    model_weight_path = '../models/signet.pkl'
    model = CNNModel(signet, model_weight_path)

    processed_sig = np.array([preprocess_signature(img, canvas_size)])
    feature_vect = model.get_feature_vector_multiple(processed_sig,
                                                     layer='fc2')

    return feature_vect
示例#4
0
def extract_signature(image_path):
    """
    Function to extract features of signature for matching"""

    signature = imread(image_path, mode='L')

    canvas_size = (952, 1360)
    processed_signature = preprocess_signature(signature, canvas_size)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    features = model.get_feature_vector(sess, processed_signature)
    return features
示例#5
0
def extract_signatures_multiple(image_paths_list):
    signatures = [
        imread(image_path, mode='L') for image_path in image_paths_list
    ]
    processed_signatures = [
        preprocess_signature(signature, canvas_size)
        for signature in signatures
    ]

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    features = model.get_feature_vector_multiple(sess, processed_signatures)
    return features
示例#6
0
def get_dataset(data_path):
    images_name = os.listdir(data_path)
    train_X = []
    train_y = []
    for name in images_name:
        original = cv2.imread(os.path.join(data_path, name), cv2.CV_8UC1)
        processed = preprocess_signature(original, canvas_size)
        newImg = numpy.expand_dims(processed, 2)
        cv2.imshow("test",
                   cv2.dilate(newImg, np.ones((5, 5), np.uint8), iterations=1))
        cv2.waitKey(0)
        train_X.append(newImg)
        train_y.append(str(int(name[6:8]) - 1))
        print(name[6:8])
    train_X = numpy.asarray(train_X)
    train_y = numpy.asarray(train_y)
    print(train_X.shape)
    return train_X, train_y
示例#7
0
def read_new_dataset(path):
    folders = [os.path.join(path, e) for e in os.listdir(path)]
    train_X = []
    train_y = []
    for i, folder in enumerate(folders):
        images = [os.path.join(folder, e) for e in os.listdir(folder)]
        for image_path in images:
            original = cv2.imread(image_path, cv2.CV_8UC1)
            processed = preprocess_signature(original, canvas_size)
            newImg = numpy.expand_dims(processed, 2)
            # newImg = cv2.medianBlur(newImg, 9, 0)
            train_X.append(newImg)
            train_y.append(str(i))
            # cv2.imshow("test", newImg )
            # cv2.waitKey(0)
    train_X = numpy.asarray(train_X)
    train_y = numpy.asarray(train_y)
    print(train_X.shape)
    return train_X, train_y
示例#8
0
def get_dataset(path: str, prep_path: str):
    for dirpath, dirs, _ in os.walk(path):
        for directory in dirs:
            for _, _, files in os.walk(os.path.join(dirpath, directory)):
                for filename in files:
                    fname = os.path.join(dirpath, directory, filename)
                    processed = preprocess_signature(imread(fname, flatten=1),
                                                     canvas_size)
                    x = re.findall('\\d+', fname)
                    y = filename[0]

                    if not os.path.exists(
                            os.path.join(prep_path,
                                         r'' + y + ' (' + x[0] + ')')):
                        os.makedirs(
                            os.path.join(prep_path,
                                         r'' + y + ' (' + x[0] + ')'))
                    toimage(processed, cmin=0.0, cmax=...).save(
                        os.path.join(prep_path, r'' + y + ' (' + x[0] + ')') +
                        '/' + x[1] + '.png')
示例#9
0
def add_feature_vector_from_a_image(image, canvas, sets_processed):
    if image in images_dictionary.keys():
        sets_processed.append(images_dictionary[image])
    else:
        original = imread(image, flatten=1)
        height, width = original.shape
        if height > img_max_size[0]:
            diff = height - img_max_size[0]
            percentage = (100*diff)/height
            original = imresize(original, 100-percentage)
            height, width = original.shape
        if width > img_max_size[1]:
            diff = width - img_max_size[1]
            percentage = (100*diff)/width
            original = imresize(original, 100-percentage)
            height, width = original.shape

        
        processed = preprocess_signature(original, canvas)
        images_dictionary[image] = model.get_feature_vector(processed)[0]
        sets_processed.append(images_dictionary[image])
示例#10
0
文件: views.py 项目: krishna23444/sbi
def verify(request):
    score = 0
    inference = ""
    if request.method == 'POST':
        print request.POST
        try:
            acc_no = request.POST['acc_no']
            sig = request.FILES.get('signature', False)
        except ValueError:
            return HttpResponseForbidden('Malformed xml form')
        print acc_no
        url = "http://apiplatformcloudse-gseapicssbisecond-uqlpluu8.srv.ravcloud.com:8001/DigitalSignInfo/4916519817/" + acc_no + "/SIGNATURE"
        headers = {
            'api-key': "c0716b7d-c9f2-4587-bebc-b7daf71aafbc",
        }
        response = requests.request("GET", url, headers=headers)
        print sig
        fu = 'backend/static/' + acc_no + 'U.png'
        imgU = acc_no + 'U.png'
        fd = 'backend/static/' + acc_no + 'D.png'
        imgD = acc_no + 'D.png'
        img = Image.open(sig).convert('LA')
        img.save(fu)
        # f = open(fu, 'w')
        # f.write(sig.read())
        f = open(fd, 'w')
        f.write(response.content)
        f.close()

        img = Image.open(fd).convert('LA')
        img.save(fd)
        # f = open(fd, 'r')
        # print f
        print cv2.imread(fd, 0)
        user1_sigs = [cv2.imread(fu, 0)]
        user2_sigs = [cv2.imread(fd, 0)]
        print user1_sigs
        print user2_sigs
        canvas_size = (952, 1360)
        processed_user1_sigs = np.array(
            [preprocess_signature(sig, canvas_size) for sig in user1_sigs])
        processed_user2_sigs = np.array(
            [preprocess_signature(sig, canvas_size) for sig in user2_sigs])
        user1_features = model.get_feature_vector_multiple(
            processed_user1_sigs, layer='fc2')
        user2_features = model.get_feature_vector_multiple(
            processed_user2_sigs, layer='fc2')
        print('Euclidean distance between signatures from the same user')
        score = 100 - np.linalg.norm(user1_features[0] - user2_features[0])
        if (score > 85):
            inference = "Signature Belong to Same Person"
        if (score < 85 and score > 75):
            inference = "Possible Fake"
        else:
            inference = "Signatures Belong to Different Users"
        try:
            pass
        except:
            pass
        return render(request, 'result.html', {
            'score': score,
            'result': inference,
            'fu': imgU,
            'fd': imgD
        })
    else:
        return render(request, 'verify.html')
示例#11
0
    Note that loading and compiling the model takes time. It is preferable
    to load and process multiple signatures in the same python session.

"""
from scipy.misc import imread
from preprocess.normalize import preprocess_signature
import signet
from cnn_model import CNNModel
import numpy as np

canvas_size = (952, 1360)  # Maximum signature size

# Load and pre-process the signature
original = imread('data/some_signature.png', flatten=1)

processed = preprocess_signature(original, canvas_size)

# Load the model
model_weight_path = 'models/signet.pkl'
model = CNNModel(signet, model_weight_path)

# Use the CNN to extract features
feature_vector = model.get_feature_vector(processed)

# Compare the obtained feature vector to the expected value
# (to confirm same results obtained by the authors)

processed_correct = np.load('data/processed.npy')

assert np.allclose(processed_correct, processed), "The preprocessed image is different than expected. "+ \
                                                 "Check the version of packages 'scipy' and 'pillow'"
示例#12
0
def get_feature_from_link(path):
    original = imread(path, flatten=1)
    processed = preprocess_signature(original, canvas_size)
    feature_vector = model.get_feature_vector(sess, processed)

    return feature_vector
示例#13
0
def get_feature(sig_img):
    processed = preprocess_signature(sig_img, canvas_size)
    #Use the CNN to extract features
    feature_vector = model.get_feature_vector(processed)
    feature_vector = feature_vector[0]
    return feature_vector
示例#14
0
def compare_signatures(path1, path2, level):

    canvas_size = (952, 1360)
    max1 = 0
    max2 = 0

    tf.reset_default_graph()

    # Load the model
    model_weight_path = 'models/signet.pkl'
    #model = TF_CNNModel(signet, model_weight_path)
    model = TF_CNNModel(tf_signet, model_weight_path)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    original1 = imread(path1, flatten=1)
    processed1 = preprocess_signature(original1, canvas_size)

    original2 = imread(path2, flatten=1)
    processed2 = preprocess_signature(original2, canvas_size)

    feature_vector1 = model.get_feature_vector(sess, processed1)
    feature_vector2 = model.get_feature_vector(sess, processed2)
    feature_vector1 = feature_vector1.T
    feature_vector2 = feature_vector2.T

    dist = (abs(feature_vector1**2 - feature_vector2**2))**(0.5)
    #print(dist)

    for idx, val in enumerate(dist):
        if np.isnan(val):
            dist[idx] = 0

    dist = np.sum(dist)

    main_thr = 0.0
    decision = -1

    if level is 0:
        main_thr = main_thr_1
    elif level is 1:
        main_thr = main_thr_2
    elif level is 2:
        main_thr = main_thr_3

    if (dist < main_thr):
        decision = 1
    else:
        decision = 0

    same_per = 0.0
    forg_per = 0.0
    diff_per = 0.0

    # Calculating same_per
    if (dist < same_lower):
        same_per = 100 - ((dist - 0) / (same_lower - 0)) * 5.0
    elif (dist < same_middle):
        same_per = 95 - ((dist - same_lower) / (same_middle - same_lower)) * 45
    elif (dist < same_upper):
        same_per = 50 - ((dist - same_middle) /
                         (same_upper - same_middle)) * 45
    elif (dist > 1350):
        same_per = 0
    elif (dist > same_upper):
        same_per = 5 - ((dist - same_upper) / (1350 - same_upper)) * 5

    # Calculating forg_per
    if ((dist < forg_lower) & (dist >= 700)):
        forg_per = ((dist - 700) / (forg_lower - 700)) * 15
    elif (dist < 700):
        forg_per = 0.0
    elif (dist < forg_middle):
        forg_per = 15 + ((dist - forg_lower) / (forg_middle - forg_lower)) * 60
    elif (dist < forg_upper):
        forg_per = 15 + ((dist - forg_middle) /
                         (forg_upper - forg_middle)) * 60
    elif (dist >= 2000):
        forg_per = 0.0
    elif (dist > forg_upper):
        forg_per = ((dist - forg_upper) / (2000 - forg_upper)) * 15

    # Calculating diff_per
    if (dist <= 1000):
        diff_per = 0.0
    elif (dist < diff_lower):
        diff_per = ((dist - 1000) / (diff_lower - 1000)) * 5.0
    elif (dist < diff_middle):
        diff_per = 5 + ((dist - diff_lower) / (diff_middle - diff_lower)) * 45
    elif (dist < diff_upper):
        diff_per = 50 + ((dist - diff_middle) /
                         (diff_upper - diff_middle)) * 45
    elif (dist > diff_upper):
        diff_per = 95 + ((dist - same_upper) / (3000 - same_upper)) * 5

    if (dist >= 3000):
        same_per = 0.0
        forg_per = 0.0
        diff_per = 100.0

    same_per = float("{0:.2f}".format(same_per))
    forg_per = float("{0:.2f}".format(forg_per))
    diff_per = float("{0:.2f}".format(diff_per))

    return dist, decision, same_per, forg_per, diff_per
ax[0].imshow(original, cmap='Greys_r')
ax[1].imshow(normalized)
ax[2].imshow(resized)
ax[3].imshow(cropped)

ax[0].set_title('Original')
ax[1].set_title('Background removed/centered')
ax[2].set_title('Resized')
ax[3].set_title('Cropped center of the image')

user1_sigs  = [imread('data/a%d.png' % i) for i in  [1,2]]
user2_sigs  = [imread('data/b%d.png' % i) for i in  [1,2]]

canvas_size = (952, 1360)

processed_user1_sigs = np.array([preprocess_signature(sig, canvas_size) for sig in user1_sigs])
processed_user2_sigs = np.array([preprocess_signature(sig, canvas_size) for sig in user2_sigs])

# Shows pre-processed samples of the two users

f, ax = plt.subplots(2,2, figsize=(10,6))
ax[0,0].imshow(processed_user1_sigs[0])
ax[0,1].imshow(processed_user1_sigs[1])

ax[1,0].imshow(processed_user2_sigs[0])
ax[1,1].imshow(processed_user2_sigs[1])

# Path to the learned weights
model_weight_path = 'models/signet.pkl'

# Instantiate the model