def __init__(self, img_dir, anno_path, batch_size, pl=10, ps=4, h=480, w=272): self.img_dir = img_dir with open(anno_path, 'rb') as f: self.data = pickle.load(f) self.batch_size = batch_size self.index = 0 self.volumn = len(self.data) self.height = h self.width = w self.patch_l = pl self.patch_s = ps random.shuffle(self.data) self.patch = util.get_patch(pl, ps) self.limbs = util.get_limbs() self.mean = np.array([122.35131039, 115.17054545, 107.60200075]) self.var = np.array([35.77071304, 35.39201422, 37.7260754]) # np.random.seed(822) print('Reader initialized. Data volumn %d, batch size %d.' \ % (self.volumn, self.batch_size))
def __init__(self, img_dir, anno_path, batch_size, pl=10, ps=4, l1=368, l2=46): self.img_dir = img_dir with open(anno_path, 'rb') as f: self.data = pickle.load(f) self.batch_size = batch_size self.index = 0 self.volumn = len(self.data) self.length = l1 self.short = l2 self.patch_l = pl self.patch_s = ps random.shuffle(self.data) self.patch = util.get_patch(self.patch_l, self.patch_s) self.ones = np.ones((self.patch_l, self.patch_l, 2)) self.limbs = util.get_limbs() self.mean = np.array([122.35131039, 115.17054545, 107.60200075]) self.var = np.array([35.77071304, 35.39201422, 37.7260754]) # np.random.seed(822) print('DirReader initialized. Data volumn %d, batch size %d.' \ % (self.volumn, self.batch_size))
def flat(k_slice, x, y, r): ''' remove the response peak in kmap at x, y technically, we subtract the avg ''' h, w = k_slice.shape left, right, top, down = util.get_square_patch(x, y, w, h, r) patch = util.get_patch(r * 2, 16) for i in reversed(range(1, r)): left, right, top, down = util.get_square_patch(x, y, w, h, i) # k_slice[top:down, left:right] -= np.mean(k_slice[top:down, left:right]) k_slice[top:down, left:right] -= patch[r - (y - top):r + (down - y), r - (x - left):r + (right - x)] # k_slice[top:down, left:right] = 0 k_slice = np.maximum(k_slice, 0)
inflow, dmaps = network.v10() sess = tf.Session() saver = tf.train.Saver() ckpt = tf.train.get_checkpoint_state(model_path) if ckpt: saver.restore(sess, ckpt.model_checkpoint_path) print(ckpt.model_checkpoint_path) else: print('No available ckpt.') limbs = util.get_limbs() connections = util.get_connections() patch = util.get_patch(10, 4) result = [] mean = np.array([122.35131039, 115.17054545, 107.60200075]) var = np.array([35.77071304, 35.39201422, 37.7260754]) for name in names: tic = time.time() src = misc.imread(test_path+name) imgs, lefts, tops, rate = util.multi_resize(src, 368, 24) imgs -= mean imgs /= var rate /= 8 # due to pooling batch_dmaps = sess.run(dmaps, feed_dict={inflow:imgs})[-1] dmap = util.concat_dmaps(batch_dmaps, lefts, tops, 8)
def reconstruct(dmap, kmap, mask_r, pl=10, ps=4): h, w, _ = dmap.shape grid_h, grid_w = util.get_grid(h, w) connections = util.get_connections() limbs = util.get_limbs() patch = util.get_patch(pl, ps) humans = [] # store the annotation for each human q = queue.Queue() # store the layers to be extended from used = [False] * 14 # store wether the layer has been explored starters, layer = find_outstander(kmap, mask_r, patch) # print(layer) # print(starters) # print(dmap[8,41,0]) # print(dmap[8,41,1]) # print(dmap[8,41,26]) # print(dmap[8,41,27]) for p in starters: dic = {} dic[layer] = p humans.append(dic) q.put(layer) used[layer] = True debug_cnt = 0 while True: if q.empty(): # print('empty') break layer1 = q.get() # print('from', layer1) starters = [] for i,h in enumerate(humans): if layer1 in h.keys(): starters.append((h[layer1], i)) for piece in connections[layer1]: debug_cnt += 1 # if debug_cnt == 2: # return humans layer2, sign, d_layer = piece if used[layer2]: continue used[layer2] = True q.put(layer2) # print('finding', layer2) # print('sign', sign) # print('dmap layer', d_layer) k_slice = kmap[:,:,layer2] # in limbs, the vector is from START to END # in connections, sign = -1 means layer2 is START # forward is from layer1's view to layer2 # backward is from layer2's view to layer1 # d_layer is the layer stores vector from START to END # so, if sign = 1, layer1 is START, layer2 is END # and, dx_forward is from layer1 to layer2 # so dx_forward = dmap[:,:,d_layer*2] dy_forward = dmap[:,:,d_layer*2+1] dx_backward = dmap[:,:,d_layer*2+26] dy_backward = dmap[:,:,d_layer*2+26+1] # else, if sign = -1, layer1 is END, layer2 is START # and, dx_forward is from layer1 to layer2 # so if sign == -1: dx_forward, dx_backward = dx_backward, dx_forward dy_forward, dy_backward = dy_backward, dy_forward mask_log = group_match(starters, dx_forward, dy_forward, dx_backward, dy_backward, grid_h, grid_w, k_slice, mask_r, patch) trans_mask_log(mask_log, layer2, humans) # for h in humans: # print(h) # print('\n') # print(humans) # print('mask_log') # print(mask_log) return humans
for k in range(14): if k in h.keys(): tmp.append(int(round(h[k][0] / rate))) tmp.append(int(round(h[k][1] / rate))) tmp.append(1) else: tmp.append(0) tmp.append(0) tmp.append(0) ret['keypoint_annotations']['human%d'%(cnt+1)] = tmp return ret if __name__ == '__main__': dmap = np.load('dmap.npy') kmap = util.get_kmap_from_dmap(dmap, util.get_limbs()) humans = reconstruct(dmap, kmap, 5) annos = format(humans, 'sample', 0.05958549222797927461139896373057) # print(annos) for i in range(len(annos['keypoint_annotations'])): k_rev = util.get_key_hmap((772, 950), [annos['keypoint_annotations']['human%d'%(i+1)]], util.get_patch(40,32), r=20) src = misc.imread('ffa97d027dfc2f2fc62692a035535579c5be74e0.jpg') util.cover_key_map(src, k_rev) misc.imsave('vis_anno_%d.jpg'%i, src) # outstander, layer = find_outstander(kmap, 10) # print(layer) # print(outstander)
inflow, kmaps, amaps = network.a4() sess = tf.Session() saver = tf.train.Saver() ckpt = tf.train.get_checkpoint_state(model_path) if ckpt: saver.restore(sess, ckpt.model_checkpoint_path) print(ckpt.model_checkpoint_path) else: print('No available ckpt.') limbs = util.get_limbs() connections = util.get_connections() patch = util.get_patch(16, 8) result = [] mean = np.array([122.35131039, 115.17054545, 107.60200075]) var = np.array([35.77071304, 35.39201422, 37.7260754]) for name in names: tic = time.time() src = misc.imread(test_path + name) imgs, lefts, tops, rate = util.multi_resize(src, 368, 24) imgs -= mean imgs /= var rate /= 8 # due to dowsn sampling batch_k, batch_a = sess.run([kmaps, amaps], feed_dict={inflow: imgs}) batch_k = batch_k[-1]
print(model_path) except Exception as e: print(e) exit(0) else: ckpt = tf.train.get_checkpoint_state(model_path) if ckpt: saver.restore(sess, ckpt.model_checkpoint_path) print(ckpt.model_checkpoint_path) else: print('No available ckpt.') exit(0) limbs = util.get_limbs() connections = util.get_connections() patch = util.get_patch(10, 4) result = [] mean = np.array([122.35131039, 115.17054545, 107.60200075]) var = np.array([35.77071304, 35.39201422, 37.7260754]) cnt = 0 total = len(names) elapse = 0 for name in names: tic = time.time() src = misc.imread(test_path + name) h, w, _ = src.shape imgs, lefts, tops, rate = util.multi_resize(src, 368, 24) # imgs, lefts, tops, rate = util.multi_rect(src, 480, 272, 24)
tmp.append(1) else: tmp.append(0) tmp.append(0) tmp.append(0) ret['keypoint_annotations']['human%d' % (cnt + 1)] = tmp return ret if __name__ == '__main__': amap = np.load('amap.npy') kmap = np.load('kmap.npy') humans = reconstruct(amap, kmap, 10) annos = format(humans, 'ffa97d027dfc2f2fc62692a035535579c5be74e0', 0.05958549222797927461139896373057) # annos = format(humans, 'ffa97d027dfc2f2fc62692a035535579c5be74e0', 1) print(annos) for i in range(len(annos['keypoint_annotations'])): k_rev = util.get_key_hmap( (772, 950), [annos['keypoint_annotations']['human%d' % (i + 1)]], util.get_patch(40, 32), r=20) # k_slice = k_rev[:,:,8].copy() # k_rev[:,:,:] = 0 # k_rev[:,:,8] = k_slice src = misc.imread('ffa97d027dfc2f2fc62692a035535579c5be74e0.jpg') util.cover_key_map(src, k_rev) misc.imsave('vis_anno_%d.jpg' % i, src)