def __init__(self, dim, samples , init_w= tf.random_uniform_initializer(minval= -0.01, maxval= 0.01) , ftype= tf.float32, scope= 'dbn'): self.dim, self.ftype = dim, ftype with tf.variable_scope(scope): self.rbm = tuple( Rbm(scope= "rbm{}".format(i) , dim_v= dim_v , dim_h= dim_h , samples= samples , init_w= init_w , ftype= self.ftype) for i, (dim_v, dim_h) in enumerate(zip(dim, dim[1:]), 1)) self.w = tuple(rbm.w for rbm in self.rbm[::-1]) self.wg = tuple(tf.transpose(w) for w in self.w) self.wr = tuple( tf.get_variable(name= "wr{}".format(i), shape= (dim_d, dim_a), initializer= init_w) for i, (dim_d, dim_a) in enumerate(zip(self.dim, self.dim[1:]), 1)) self.lr_ = tf.placeholder(name= 'lr_', dtype= self.ftype, shape= ()) # wake self.v_ = self.rbm[0].v_ with tf.name_scope('wake'): recogn = [self.v_] for w in self.wr: recogn.append(binary(tf.matmul(recogn[-1], w))) self.recogn = tuple(recogn) recogn = recogn[::-1] eps = self.lr_ / tf.cast(tf.shape(self.v_)[0], dtype= self.ftype) self.wake = tuple( w.assign_add(tf.matmul((sj - pj), sk, transpose_a= True) * eps).op for w, sk, sj, pj in zip( self.w, recogn, recogn[1:] , (tf.sigmoid(tf.matmul(s, w)) for w, s in zip(self.wg, recogn)))) # sleep top = self.rbm[-1] self.k_, (self.v, self.a) = top.k_, top.gibbs with tf.name_scope('sleep'): recons = [self.a, self.v] for w in self.wg[1::]: recons.append(binary(tf.matmul(recons[-1], w))) self.recons = tuple(recons) recons = recons[::-1] eps = self.lr_ / tf.cast(tf.shape(self.a)[0], dtype= self.ftype) self.sleep = tuple( w.assign_add(tf.matmul(sj, (sk - qk), transpose_a= True) * eps).op for w, sj, sk, qk in zip( self.wr, recons, recons[1:] , (tf.sigmoid(tf.matmul(s, w)) for w, s in zip(self.wr, recons)))) # the waking world is the amnesia of dream. self.v = self.recons[-1] with tf.name_scope('ances'): self.a = self.rbm[-1].h ances = [self.a] for w in self.wg: ances.append(binary(tf.matmul(ances[-1], w))) self.ances = ances[-1] self.step = 0
def gibbs(x): x = list(x) # update odd layers for i, (xl, xr, wl, wr) in enumerate(zip(x[::2], x[2::2], self.w, self.w[1:])): x[1+(2*i)] = binary(tf.matmul(xl, wl) + tf.matmul(xr, wr, transpose_b= True)) # update first layer x[0] = binary(tf.matmul(x[1], self.w[0], transpose_b= True)) # update even layers for i, (xl, xr, wl, wr) in enumerate(zip(x[1::2], x[3::2], self.w[1:], self.w[2:])): x[2+(2*i)] = binary(tf.matmul(xl, wl) + tf.matmul(xr, wr, transpose_b= True)) # update last layer x[-1] = binary(tf.matmul(x[-2], self.w[-1])) return tuple(x)
def _visualize_prediction(self, input, output, target): """format and display output and target data on tensorboard""" out_b1 = binary(output) out_b1 = impose_labels_on_image(input[0, 0, :, :], target[0, :, :], out_b1[0, 1, :, :]) self.writer.add_image('output', make_grid(out_b1, nrow=8, normalize=False))
def complement(num): num_in_binary = binary(num, 16) num_in_binary = num_in_binary.replace('0', 'x') num_in_binary = num_in_binary.replace('1', '0') num_in_binary = num_in_binary.replace('x', '1') return int(num_in_binary, 2)
def __init__(self, dim, samples , init_w= tf.random_uniform_initializer(minval= -0.01, maxval= 0.01) , ftype= tf.float32, scope= 'sbn'): self.dim, self.ftype, self.scope = dim, ftype, scope with tf.variable_scope(scope): self.wr = tuple( tf.get_variable(name= "wr{}".format(i), shape= (dim_d, dim_a), initializer= init_w) for i, (dim_d, dim_a) in enumerate(zip(self.dim, self.dim[1:]), 1)) self.wg = tuple( tf.get_variable(name= "wg{}".format(i), shape= (dim_a, dim_d), initializer= init_w) for i, (dim_d, dim_a) in enumerate(zip(self.dim, self.dim[1:]), 1))[::-1] self.lr_ = tf.placeholder(name= 'lr_', dtype= self.ftype, shape= ()) # wake self.v_ = tf.placeholder(name= 'v_', dtype= self.ftype, shape= (None, self.dim[0])) with tf.name_scope('wake'): recogn = [self.v_] for w in self.wr: recogn.append(binary(tf.matmul(recogn[-1], w))) self.recogn = tuple(recogn) recogn = recogn[::-1] eps = self.lr_ / tf.cast(tf.shape(self.v_)[0], dtype= self.ftype) self.wake = tuple( w.assign_add(tf.matmul(sk, (sj - pj), transpose_a= True) * eps).op for w, sk, sj, pj in zip( self.wg, recogn, recogn[1:] , (tf.sigmoid(tf.matmul(s, w)) for w, s in zip(self.wg, recogn)))) # sleep with tf.name_scope('a'): self.a = tf.round(tf.random_uniform(shape= (samples, self.dim[-1]))) with tf.name_scope('sleep'): recons = [self.a] for w in self.wg: recons.append(binary(tf.matmul(recons[-1], w))) self.recons = tuple(recons) recons = recons[::-1] eps = self.lr_ / tf.cast(tf.shape(self.a)[0], dtype= self.ftype) self.sleep = tuple( w.assign_add(tf.matmul(sj, (sk - qk), transpose_a= True) * eps).op for w, sj, sk, qk in zip( self.wr, recons, recons[1:] , (tf.sigmoid(tf.matmul(s, w)) for w, s in zip(self.wr, recons)))) # the waking world is the amnesia of dream. self.v = self.recons[-1] self.step = 0
def pre(self, sess, wtr, batchit, k= 4, lr= 0.01, steps= 0, step_plot= 0, sleep= 0): h2v = lambda x: x for rbm in self.rbm: # plot function from this rbm down to the bottom rbm.plot = plot_fn(rbm.scope) plot = lambda sess, wtr, v, step= None, rbm= rbm: rbm.plot( sess, wtr, step= rbm.step if step is None else step , v= h2v(v)) # train this rbm rbm.pcd(sess, wtr, batchit, k= k, lr= lr, steps= steps, step_plot= step_plot, plot= plot) # downward closure of this rbm, to be used by the next plot function rbm.h2v = binary(tf.matmul(rbm.h, rbm.w, transpose_b= True)) h2v = lambda h, rbm= rbm, h2v= h2v: h2v(sess.run(rbm.h2v, feed_dict= {rbm.h: h})) # # generate hidden states from this rbm # batchit = rbm.gen(sess, k= k, ret_v= False, ret_h= True) # upward closure of this rbm, translating visibles to hiddens rbm.v2h = binary(rbm.hgv, transform= False, threshold= False) v2h = lambda v, rbm= rbm: sess.run(rbm.v2h, feed_dict= {rbm.v_: v}) batchit = map(v2h, batchit) for _ in range(sleep): sess.run(self.sleep, feed_dict= {self.k_: k, self.lr_: lr})
def test(img, landmark=None, is_heatmap=False, binary_output=False, model=None): """Classify img""" net_input = img if is_heatmap: net_input = heat_map_compute(img, landmark, landmark_is_01=False, img_color=True, radius=occlu_param['radius']) if binary_output: return [ binary(_, threshold=0.5) for _ in classify(model, net_input) ] return classify(model, net_input)
def gibbs(v, _h): h = binary(tf.matmul(v, self.w)) v = binary(tf.matmul(h, self.w, transpose_b= True)) # todo real valued v # v = tf.sigmoid(tf.matmul(h, self.w, transpose_b= True)) return v, h
#!/usr/bin/python # -*- coding: utf-8 -*- import cv2 from utils.binary import * import sys img = cv2.imread(sys.argv[1], 0) img_gray = img img_sobel = cv.Sobel(img_gray, cv.CV_8U, 1, 0, 3) img_threshold = cv.threshold(img_sobel, 0, 255, cv.THRESH_OTSU + cv.THRESH_BINARY)[1] r1 = binary(img) r = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1] cv2.namedWindow('ddf', cv2.WINDOW_NORMAL) cv2.imshow('ddf', r) cv2.namedWindow('ddf2', cv2.WINDOW_NORMAL) cv2.imshow('ddf2', r1) cv2.namedWindow('ddf2er', cv2.WINDOW_NORMAL) cv2.imshow('ddf2er', img_threshold) cv2.waitKey(0)
def _white_area(self, img): bin = utils.binary(img, None, 240, 255) return bin.sum()
def _digit(self, img): bin = utils.binary(img, None, 240, 255) if bin.sum() < 10: return 0 dist = [(bin - number).sum() for number in self.NUMBERS] return dist.index(min(dist))
def problem_36(): double_palindrome = lambda n: is_palindome(n) and is_palindome(binary(n)) print(sum(filter(double_palindrome, range(10**6))))