コード例 #1
0
    def optimize_Q(self):
        batch_size = self._config['minibatch_size']
        gamma = self._config['discount_factor']
        num_actions = self._config['num_actions']

        states, actions, next_states, rewards, dones, infos = self.memory.get_batch(
            batch_size)
        states = normalized(states)
        next_states = normalized(next_states)

        mask = np.zeros((batch_size, num_actions))
        mask[np.arange(batch_size), actions] = 1

        Qs_next = self.get_Q_values(next_states)
        # The Q values of the terminal states is 0 by definition, so override them
        Qs_next[dones] = 0

        ys = rewards + gamma * (np.amax(
            np.multiply(Qs_next, mask),
            axis=1,
        ))

        _, loss, loss_summary_str = self._tf_session.run(
            [self._train_op, self._loss_op, self._tf_summary['loss']],
            feed_dict={
                self._train_op_input['states']: states,
                self._train_op_input['actions']: actions,
                self._train_op_input['ys']: ys,
            })

        return (loss, loss_summary_str)
コード例 #2
0
ファイル: geodesic.py プロジェクト: fangde/splocs
    def __call__(self, idx):
        """ 
        computes geodesic distances to all vertices in the mesh
        idx can be either an integer (single vertex index) or a list of vertex indices
        or an array of bools of length n (with n the number of vertices in the mesh) 
        """
        u0 = np.zeros(len(self._verts))
        u0[idx] = 1.0
        # heat method, step 1
        u = self._factored_AtLc(u0).ravel()
        # heat method step 2
        grad_u = 1 / (2 * self._triangle_area)[:,np.newaxis] * (
              self._unit_normal_cross_e01 * u[self._tris[:,2]][:,np.newaxis]
            + self._unit_normal_cross_e12 * u[self._tris[:,0]][:,np.newaxis]
            + self._unit_normal_cross_e20 * u[self._tris[:,1]][:,np.newaxis]
        )
        X = - grad_u / veclen(grad_u)[:,np.newaxis]
        # heat method step 3
        div_Xs = np.zeros(len(self._verts))
        for i1, i2, i3 in [(0, 1, 2), (1, 2, 0), (2, 0, 1)]: # for edge i2 --> i3 facing vertex i1
            vi1, vi2, vi3 = self._tris[:,i1], self._tris[:,i2], self._tris[:,i3]
            e1 = self._verts[vi2] - self._verts[vi1]
            e2 = self._verts[vi3] - self._verts[vi1]
            e_opp = self._verts[vi3] - self._verts[vi2]
            cot1 = 1 / np.tan(np.arccos( 
                (normalized(-e2) * normalized(-e_opp)).sum(axis=1)))
            cot2 = 1 / np.tan(np.arccos(
                (normalized(-e1) * normalized( e_opp)).sum(axis=1)))
            div_Xs += np.bincount(
                vi1.astype(int), 
		0.5 * (cot1 * (e1 * X).sum(axis=1) + cot2 * (e2 * X).sum(axis=1)), 
		minlength=len(self._verts))
        phi = self._factored_L(div_Xs).ravel()
        phi -= phi.min()
        return phi
コード例 #3
0
ファイル: geodesic.py プロジェクト: caomw/cmm
 def __init__(self, verts, tris, m=1.0):
     self._verts = verts
     self._tris = tris
     # precompute some stuff needed later on
     e01 = verts[tris[:, 1]] - verts[tris[:, 0]]
     e12 = verts[tris[:, 2]] - verts[tris[:, 1]]
     e20 = verts[tris[:, 0]] - verts[tris[:, 2]]
     self._triangle_area = .5 * veclen(np.cross(e01, e12))
     unit_normal = normalized(np.cross(normalized(e01), normalized(e12)))
     self._un = unit_normal
     self._unit_normal_cross_e01 = np.cross(unit_normal, -e01)
     self._unit_normal_cross_e12 = np.cross(unit_normal, -e12)
     self._unit_normal_cross_e20 = np.cross(unit_normal, -e20)
     # parameters for heat method
     h = np.mean(map(veclen, [e01, e12, e20]))
     t = m * h**2
     # pre-factorize poisson systems
     Lc, vertex_area = compute_mesh_laplacian(verts,
                                              tris,
                                              area_type='lumped_mass')
     A = sparse.spdiags(vertex_area, 0, len(verts), len(verts))
     #self._factored_AtLc = splu((A - t * Lc).tocsc()).solve
     self._factored_AtLc = cholesky((A - t * Lc).tocsc(), mode='simplicial')
     #self._factored_L = splu(Lc.tocsc()).solve
     self._factored_L = cholesky(Lc.tocsc(), mode='simplicial')
コード例 #4
0
def train(args, model, sess, saver):
    
    if args.fine_tuning :
        saver.restore(sess, args.pre_trained_model)
        print("saved model is loaded for fine-tuning!")
        print("model path is %s"%(args.pre_trained_model))
        
    num_imgs = len(os.listdir(args.train_Sharp_path))
    
    merged = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter('./logs',sess.graph)
#    if args.test_with_train:
#        f = open("valid_logs.txt", 'w')
    
    epoch = 1
    step = num_imgs // args.batch_size
    
    
    blur_imgs = util.image_loader(args.train_Blur_path, args.load_X, args.load_Y)
    sharp_imgs = util.image_loader(args.train_Sharp_path, args.load_X, args.load_Y)
    
    while epoch <= args.max_epoch:
        random_index = np.random.permutation(len(blur_imgs))
        for k in range(step):
            s_time = time.time()
            blur_batch, sharp_batch = util.batch_gen(blur_imgs, sharp_imgs, args.patch_size, args.batch_size, random_index, k, args.augmentation)
            Knoise = np.random.randn(args.batch_size,64)
            for t in range(args.critic_updates):
                _, D_loss = sess.run([model.D_train, model.D_loss], feed_dict = {model.blur : blur_batch, model.sharp : sharp_batch,model.Knoise:Knoise, model.epoch : epoch})
            
            if (k+1) % args.log_freq == 0:
                _, G_loss,gene_K,gene_img,reg_loss,D_loss,G_loss,gp_loss = sess.run([model.G_train, model.G_loss,model.gene_K,model.gene_img,model.reg_loss,model.D_loss,model.G_loss,model.gp_loss], feed_dict = {model.blur : blur_batch, model.sharp : sharp_batch,model.Knoise:Knoise, model.epoch : epoch})
                gene_K=util.normalized(gene_K)
                gene_img=util.normalized(gene_img)
                util.imshow(gene_K[0,:,:,0],cmap='gray')
                toshow = np.hstack((sharp_batch[0]/255.0,blur_batch[0]/255.0,gene_img[0]))
                util.imshow(toshow)
                print("training with %d epoch %d/%d batch, D_loss: %0.2f, gp_loss: %0.2f, G_loss: %0.2f, reg_loss: %0.2f "%(epoch,k+1,step,D_loss,gp_loss,G_loss,reg_loss))
            else:
                _, G_loss = sess.run([model.G_train, model.G_loss], feed_dict = {model.blur : blur_batch, model.sharp : sharp_batch,model.Knoise:Knoise, model.epoch : epoch})
            
            e_time = time.time()
        
#        if epoch % args.log_freq == 0:
        summary = sess.run(merged, feed_dict = {model.blur : blur_batch, model.sharp: sharp_batch,model.Knoise:Knoise, model.epoch : epoch})
        train_writer.add_summary(summary, epoch)
#            if args.test_with_train:
#                test(args, model, sess, saver, f, epoch, loading = False)
        print("%d training epoch completed" %(epoch))
        print("D_loss : %0.4f, \t G_loss : %0.4f"%(D_loss, G_loss))
        print("Elpased time : %0.4f"%(e_time - s_time))
        saver.save(sess, './model/DeblurrGAN', global_step = epoch, write_meta_graph = False)
        
        epoch += 1

    saver.save(sess, './model/DeblurrGAN_last', write_meta_graph = False)
コード例 #5
0
ファイル: planefit.py プロジェクト: jwgu/camo
def sample_plane(plane, n, width, noise = 0):
  plane_repok(plane)
  e1, e2, _ = np.eye(3)
  v1 = e1 if (plane[0] == 0 and plane[1] == 0) else ut.normalized(np.array([-plane[1], plane[0], 0], 'd'))
  v2 = ut.normalized(np.cross(plane[:3], v1))
  #print 'dot', np.dot(v1, plane[:3]),   np.dot(v2, plane[:3])
  #print 'sample', np.sum(np.dot(np.array([v1, v2]).T, np.random.uniform(-width/2, width/2, (2, n))).T * plane[:3], 1)
  center = -plane[3]*plane[:3]
  #print 'dot2', np.dot(center, plane[:3]) + plane[3], plane
  pts = np.dot(np.array([v1, v2]).T, np.random.uniform(-width/2, width/2, (2, n))).T + center
  #print 'ins', len(plane_inliers(plane, pts, 0.05))
  pts += np.random.randn(*pts.shape)*noise
  return pts
コード例 #6
0
ファイル: counting.py プロジェクト: wrongu/sampling-dynamics
def analytic_marginal_states(net, conditioned_on={}):
    N = count_states(net)
    S = np.zeros(N)

    for i in range(N):
        id_to_state(net, i)
        S[i] = net.probability(conditioned_on)
    return normalized(S, order=1)
コード例 #7
0
    def get_recent_state(self, current_observation, n=3):
        # append current observation with last n observations
        state = self.memory.get_recent_state(current_observation, n=3)
        state = normalized(state)
        # (n,h,w,c)
        state = np.stack([state], axis=0)

        return state
コード例 #8
0
ファイル: counting.py プロジェクト: wrongu/sampling-dynamics
def analytic_marginal_states(net, conditioned_on={}):
	N = count_states(net)
	S = np.zeros(N)

	for i in range(N):
		id_to_state(net, i)
		S[i]  = net.probability(conditioned_on)
	return normalized(S, order=1)
コード例 #9
0
ファイル: geodesic.py プロジェクト: ziyeshanwai/splocs
 def __init__(self, verts, tris, m=10.0):
     self._verts = verts
     self._tris = tris
     # precompute some stuff needed later on
     e01 = verts[tris[:, 1]] - verts[tris[:, 0]]
     e12 = verts[tris[:, 2]] - verts[tris[:, 1]]
     e20 = verts[tris[:, 0]] - verts[tris[:, 2]]
     self._triangle_area = .5 * veclen(np.cross(e01, e12))
     unit_normal = normalized(np.cross(normalized(e01), normalized(e12)))
     self._unit_normal_cross_e01 = np.cross(unit_normal, e01)
     self._unit_normal_cross_e12 = np.cross(unit_normal, e12)
     self._unit_normal_cross_e20 = np.cross(unit_normal, e20)
     # parameters for heat method
     h = np.mean(map(veclen, [e01, e12, e20]))
     t = m * h**2
     # pre-factorize poisson systems
     Lc, A = compute_mesh_laplacian(verts, tris)
     self._factored_AtLc = splu((A - t * Lc).tocsc()).solve
     self._factored_L = splu(Lc.tocsc()).solve
コード例 #10
0
ファイル: geodesic.py プロジェクト: fangde/splocs
 def __init__(self, verts, tris, m=10.0):
     self._verts = verts
     self._tris = tris
     # precompute some stuff needed later on
     e01 = verts[tris[:,1]] - verts[tris[:,0]]
     e12 = verts[tris[:,2]] - verts[tris[:,1]]
     e20 = verts[tris[:,0]] - verts[tris[:,2]]
     self._triangle_area = .5 * veclen(np.cross(e01, e12))
     unit_normal = normalized(np.cross(normalized(e01), normalized(e12)))
     self._unit_normal_cross_e01 = np.cross(unit_normal, e01)
     self._unit_normal_cross_e12 = np.cross(unit_normal, e12)
     self._unit_normal_cross_e20 = np.cross(unit_normal, e20)
     # parameters for heat method
     h = np.mean(map(veclen, [e01, e12, e20]))
     t = m * h ** 2
     # pre-factorize poisson systems
     Lc, A = compute_mesh_laplacian(verts, tris)
     self._factored_AtLc = splu((A - t * Lc).tocsc()).solve
     self._factored_L = splu(Lc.tocsc()).solve
コード例 #11
0
        def backproj(p):
            ray = ut.normalized(
                np.dot(mvg.pixel_ray_matrix(scan.R(root), scan.K(root)),
                       ut.homog(p)))
            c = scan.center(root)

            dist = (-plane[3] - np.dot(c, plane[:3])) / np.dot(ray, plane[:3])
            assert dist >= 0
            pt = c + ray * dist
            print planefit.dist_to_plane(plane, pt[np.newaxis, :])
            return pt
コード例 #12
0
def parse(infile, lang):
    assert lang in ('en', 'es')
    ss, deps = _parser[lang].parse(infile)

    def key((sid, t)):
        return sid

    by_sid = normalized(groupby(deps, key),
                        key=key,
                        filler=lambda sid: (sid, ()),
                        start=1)
    return izip(ss, by_sid)
コード例 #13
0
ファイル: box.py プロジェクト: jwgu/camo
    def __init__(self, face_idx, mesh_pts, texsize=TEXSIZE):
        if texsize is None:
            texsize = 256

        self.face_idx = np.asarray(face_idx)
        self.nfaces = len(face_idx)
        self.mesh_pts = np.asarray(mesh_pts)

        self.texsize = texsize
        self.face_planes, self.face_center, self.face_edges, self.face_pts, self.face_juv = [], [], [], [], []

        u_grid, v_grid = [
            np.array(x, 'd') for x in np.mgrid[:texsize, :texsize]
        ]
        uf = u_grid.flatten()
        vf = v_grid.flatten()
        for j in xrange(len(face_idx)):
            p1 = mesh_pts[face_idx[j][0]]
            p2 = mesh_pts[face_idx[j][1]]
            p3 = mesh_pts[face_idx[j][3]]

            e1 = -p1 + p3
            e2 = -p1 + p2

            n = ut.normalized(np.cross(e1, e2))
            d = -np.dot(p1, n)  #np.dot(p1, n) + d = 0
            self.face_planes.append(np.concatenate([n, [d]]))

            pts = p1 + (uf / (texsize - 1.))[:, na] * e1[na, :] + (
                vf / (texsize - 1.))[:, na] * e2[na, :]

            juv = np.zeros((len(uf), 3), 'l')
            juv[:, 0] = j
            juv[:, 1] = uf
            juv[:, 2] = vf
            self.face_juv.append(juv)
            self.face_pts.append(pts)
            self.face_edges.append((p1, np.array([e1, e2])))
            self.face_center.append(p1 + 0.5 * e1 + 0.5 * e2)

        self.face_center = np.array(self.face_center)
        self.tex2juv = np.vstack(self.face_juv)
        self.juv2tex = np.zeros((len(self.face_idx), texsize, texsize), 'l')
        self.juv2tex[self.tex2juv[:, 0], self.tex2juv[:, 1],
                     self.tex2juv[:, 2]] = range(len(self.tex2juv))
        self.texel_pts = np.vstack(self.face_pts)
        self.face_planes = np.array(self.face_planes)
        self.on_border = ut.lor((self.tex2juv[:, 1] == 0),
                                (self.tex2juv[:, 2] == 0),
                                (self.tex2juv[:, 1] == self.texsize - 1),
                                (self.tex2juv[:, 2] == self.texsize - 1))

        self.ntexels = len(self.tex2juv)
コード例 #14
0
 def __call__(self, idx):
     """
     computes geodesic distances to all vertices in the mesh
     idx can be either an integer (single vertex index) or a list of vertex indices
     or an array of bools of length n (with n the number of vertices in the mesh)
     """
     u0 = np.zeros(len(self._verts))
     u0[idx] = 1.0
     # heat method, step 1
     u = self._factored_AtLc(u0).ravel()
     # heat method step 2
     grad_u = 1 / (2 * self._triangle_area)[:, np.newaxis] * (
         self._unit_normal_cross_e01 * u[self._tris[:, 2]][:, np.newaxis] +
         self._unit_normal_cross_e12 * u[self._tris[:, 0]][:, np.newaxis] +
         self._unit_normal_cross_e20 * u[self._tris[:, 1]][:, np.newaxis])
     X = -grad_u / veclen(grad_u)[:, np.newaxis]
     # heat method step 3
     div_Xs = np.zeros(len(self._verts))
     for i1, i2, i3 in [(0, 1, 2), (1, 2, 0),
                        (2, 0, 1)]:  # for edge i2 --> i3 facing vertex i1
         # 0 1 2
         # 1 2 0
         # 2 0 1
         vi1, vi2, vi3 = self._tris[:, i1], self._tris[:,
                                                       i2], self._tris[:,
                                                                       i3]
         e1 = self._verts[vi2] - self._verts[vi1]
         e2 = self._verts[vi3] - self._verts[vi1]
         e_opp = self._verts[vi3] - self._verts[vi2]
         cot1 = 1 / np.tan(
             np.arccos((normalized(-e2) * normalized(-e_opp)).sum(axis=1)))
         cot2 = 1 / np.tan(
             np.arccos((normalized(-e1) * normalized(+e_opp)).sum(axis=1)))
         div_Xs += np.bincount(vi1.astype(int),
                               0.5 * (cot1 * (e1 * X).sum(axis=1) + cot2 *
                                      (e2 * X).sum(axis=1)),
                               minlength=len(self._verts))
     phi = self._factored_L(div_Xs).ravel()
     phi -= phi.min()
     return phi
コード例 #15
0
ファイル: geodesic.py プロジェクト: KeeganRen/cmm
 def __init__(self, verts, tris, m=1.0):
     self._verts = verts
     self._tris = tris
     # precompute some stuff needed later on
     e01 = verts[tris[:,1]] - verts[tris[:,0]]
     e12 = verts[tris[:,2]] - verts[tris[:,1]]
     e20 = verts[tris[:,0]] - verts[tris[:,2]]
     self._triangle_area = .5 * veclen(np.cross(e01, e12))
     unit_normal = normalized(np.cross(normalized(e01), normalized(e12)))
     self._un = unit_normal
     self._unit_normal_cross_e01 = np.cross(unit_normal, -e01)
     self._unit_normal_cross_e12 = np.cross(unit_normal, -e12)
     self._unit_normal_cross_e20 = np.cross(unit_normal, -e20)
     # parameters for heat method
     h = np.mean(map(veclen, [e01, e12, e20]))
     t = m * h ** 2
     # pre-factorize poisson systems
     Lc, vertex_area = compute_mesh_laplacian(verts, tris, area_type='lumped_mass')
     A = sparse.spdiags(vertex_area, 0, len(verts), len(verts))
     #self._factored_AtLc = splu((A - t * Lc).tocsc()).solve
     self._factored_AtLc = cholesky((A - t * Lc).tocsc(), mode='simplicial')
     #self._factored_L = splu(Lc.tocsc()).solve
     self._factored_L = cholesky(Lc.tocsc(), mode='simplicial')
コード例 #16
0
def m_deep_with_shortcut(m,
                         p=None,
                         marg=None,
                         fro=None,
                         to=None,
                         cpt='marginal'):
    """constructs an m_deep_bistable model with a single additional connection from node 'fro' to node 'to'

	(fro must be greater than to to prevent cycles)
	"""
    net = m_deep_bistable(m, p, marg)
    if type(fro) is int:
        fro = 'X%d' % fro
    if type(to) is int:
        to = 'X%d' % to
    fro = net.get_node_by_name(fro)
    to = net.get_node_by_name(to)

    if cpt is 'random':
        cpt = np.random.random((2, 2))
        sums = cpt.sum(axis=0)
        cpt[:, 0] /= sums[0]
        cpt[:, 1] /= sums[1]
    elif cpt is 'marginal':
        if p is None: p = net.get_node_by_name('X1').get_table()[0, 0]
        dists = {}

        def store_dists(n, d):
            dists[n] = d

        net.bfs_traverse([fro], store_dists)
        marg = compute_marginal_for_given_p(dists[to], p)
        cpt = np.array([[marg, 1 - marg], [1 - marg, marg]])

    prev_table = to.get_table()
    prev_parents = net.parents(to)
    # making room for a new parent
    shape = tuple([fro.size()] + list(prev_table.shape))
    table = np.zeros(shape)
    # populate new table such that P(to|parents,fro) = P(to|fro)P(to|parents)
    for i in range(fro.size()):
        for j in range(to.size()):
            table[i, ..., j] = cpt[i, j] * prev_table[..., j]

    # renormalize such that P(to|a particular configuration of parents) = 1
    table = normalized(table, axis=-1, order=1)

    net.cpt([fro] + prev_parents + [to], table)
    return net
コード例 #17
0
ファイル: box.py プロジェクト: abhishah/camo
  def __init__(self, face_idx, mesh_pts, texsize = TEXSIZE):
    if texsize is None:
      texsize = 256
      
    self.face_idx = np.asarray(face_idx)
    self.nfaces = len(face_idx)
    self.mesh_pts = np.asarray(mesh_pts)
    
    self.texsize = texsize
    self.face_planes, self.face_center, self.face_edges, self.face_pts, self.face_juv = [], [], [], [], []
    
    u_grid, v_grid = [np.array(x, 'd') for x in np.mgrid[:texsize, :texsize]]
    uf = u_grid.flatten()
    vf = v_grid.flatten()
    for j in xrange(len(face_idx)):
      p1 = mesh_pts[face_idx[j][0]]
      p2 = mesh_pts[face_idx[j][1]]
      p3 = mesh_pts[face_idx[j][3]]

      e1 = -p1 + p3
      e2 = -p1 + p2

      n = ut.normalized(np.cross(e1, e2))
      d = -np.dot(p1, n) #np.dot(p1, n) + d = 0
      self.face_planes.append(np.concatenate([n, [d]]))
      
      pts = p1 + (uf/(texsize - 1.))[:, na]*e1[na, :] + (vf/(texsize - 1.))[:, na]*e2[na, :]

      juv = np.zeros((len(uf), 3), 'l')
      juv[:, 0] = j
      juv[:, 1] = uf
      juv[:, 2] = vf
      self.face_juv.append(juv)
      self.face_pts.append(pts)
      self.face_edges.append((p1, np.array([e1, e2])))
      self.face_center.append(p1 + 0.5*e1 + 0.5*e2)

    self.face_center = np.array(self.face_center) 
    self.tex2juv = np.vstack(self.face_juv)
    self.juv2tex = np.zeros((len(self.face_idx), texsize, texsize), 'l')
    self.juv2tex[self.tex2juv[:, 0], self.tex2juv[:, 1], self.tex2juv[:, 2]] = range(len(self.tex2juv))
    self.texel_pts = np.vstack(self.face_pts)
    self.face_planes = np.array(self.face_planes)
    self.on_border = ut.lor((self.tex2juv[:, 1] == 0),
                            (self.tex2juv[:, 2] == 0),
                            (self.tex2juv[:, 1] == self.texsize-1),
                            (self.tex2juv[:, 2] == self.texsize-1))
    
    self.ntexels = len(self.tex2juv)
コード例 #18
0
ファイル: camo.py プロジェクト: jwgu/camo
def greedy_project(scan, mesh, stable_angle=np.radians(70)):
    filled = np.zeros(mesh.ntexels, 'bool')
    texel_colors = np.zeros((mesh.ntexels, 3))

    visible_by_view = np.zeros((scan.length, mesh.ntexels), 'bool')
    stable_by_view = np.zeros((scan.length, mesh.ntexels), 'bool')

    for frame in xrange(scan.length):
        visible = mesh.texel_visible(scan, frame)
        visible_by_view[frame] = visible

        for j in xrange(mesh.nfaces):
            d = np.dot(
                ut.normalized(-mesh.face_center[j] + scan.center(frame)),
                mesh.face_planes[j][:3])
            assert -1 <= d <= 1
            angle = np.arccos(abs(d))
            print frame, j, np.rad2deg(angle), (angle <= stable_angle)
            on_face = (mesh.tex2juv[:, 0] == j)
            stable_by_view[frame,
                           on_face] = visible_by_view[frame, on_face] * (
                               angle <= stable_angle)
        print 'stable texels', np.sum(stable_by_view[frame])

    projectable = stable_by_view.copy()
    unstable = np.all(-stable_by_view, axis=0)
    print 'Unstable faces:', np.unique(mesh.tex2juv[unstable, 0])
    projectable[:, unstable] = visible_by_view[:, unstable]

    unused = range(scan.length)
    while np.sum(projectable) > 0:
        # it's ok to project onto a face if it's (1) unfilled (2) it's either stable, or it's unstable in every view
        frame = max(ut.shuffled(unused),
                    key=lambda f: np.sum(projectable[f, :]))

        projs = scan.project(frame, mesh.texel_pts)
        ok = projectable[frame]
        print 'chose', frame, np.sum(
            projectable[frame, :]), np.sum(ok), 'projecting to', np.unique(
                mesh.tex2juv[projectable[frame, :], 0])
        texel_colors[ok] = ig.lookup_bilinear(scan.im(frame), projs[ok, 0],
                                              projs[ok, 1])
        projectable[:, ok] = 0

        unused.remove(frame)
        print 'projected', frame, np.sum(filled)

    return [texel_colors]
コード例 #19
0
ファイル: counting.py プロジェクト: wrongu/sampling-dynamics
def sample_marginal_states(net, evidence, samples, when=None):
	"""Computes S[i] = vector of marginal probabilities that net is in state id i.

	If given, when(net) is evaluated to decide whether each sample is included
	"""
	n_states = count_states(net)

	# estimate starting distribution over states by sampling
	S = np.zeros(n_states)

	def do_count_state(i, net):
		if when is None or when(net):
			S[state_to_id(net, net.state_vector())] += 1

	gibbs_sample(net, evidence, do_count_state, samples, 1)

	return normalized(S, order=1)
コード例 #20
0
ファイル: counting.py プロジェクト: wrongu/sampling-dynamics
def sample_marginal_states(net, evidence, samples, when=None):
    """Computes S[i] = vector of marginal probabilities that net is in state id i.

	If given, when(net) is evaluated to decide whether each sample is included
	"""
    n_states = count_states(net)

    # estimate starting distribution over states by sampling
    S = np.zeros(n_states)

    def do_count_state(i, net):
        if when is None or when(net):
            S[state_to_id(net, net.state_vector())] += 1

    gibbs_sample(net, evidence, do_count_state, samples, 1)

    return normalized(S, order=1)
コード例 #21
0
ファイル: models.py プロジェクト: wrongu/sampling-dynamics
def m_deep_with_shortcut(m, p=None, marg=None, fro=None, to=None, cpt='marginal'):
	"""constructs an m_deep_bistable model with a single additional connection from node 'fro' to node 'to'

	(fro must be greater than to to prevent cycles)
	"""
	net = m_deep_bistable(m, p, marg)
	if type(fro) is int:
		fro = 'X%d' % fro
	if type(to) is int:
		to = 'X%d' % to
	fro = net.get_node_by_name(fro)
	to = net.get_node_by_name(to)

	if cpt is 'random':
		cpt = np.random.random((2,2))
		sums = cpt.sum(axis=0)
		cpt[:,0] /= sums[0]
		cpt[:,1] /= sums[1]
	elif cpt is 'marginal':
		if p is None: p = net.get_node_by_name('X1').get_table()[0,0]
		dists = {}
		def store_dists(n,d): dists[n] = d
		net.bfs_traverse([fro], store_dists)
		marg = compute_marginal_for_given_p(dists[to], p)
		cpt = np.array([[marg, 1-marg], [1-marg, marg]])

	prev_table = to.get_table()
	prev_parents = net.parents(to)
	# making room for a new parent
	shape = tuple([fro.size()] + list(prev_table.shape))
	table = np.zeros(shape)
	# populate new table such that P(to|parents,fro) = P(to|fro)P(to|parents)
	for i in range(fro.size()):
		for j in range(to.size()):
			table[i,...,j] = cpt[i,j] * prev_table[...,j];

	# renormalize such that P(to|a particular configuration of parents) = 1
	table = normalized(table, axis=-1, order=1)
	
	net.cpt([fro] + prev_parents + [to], table)
	return net
コード例 #22
0
ファイル: camo.py プロジェクト: abhishah/camo
def greedy_project(scan, mesh, stable_angle = np.radians(70)):
  filled = np.zeros(mesh.ntexels, 'bool')
  texel_colors = np.zeros((mesh.ntexels, 3))

  visible_by_view = np.zeros((scan.length, mesh.ntexels), 'bool')
  stable_by_view = np.zeros((scan.length, mesh.ntexels), 'bool')
  
  for frame in xrange(scan.length):
    visible = mesh.texel_visible(scan, frame)
    visible_by_view[frame] = visible
    
    for j in xrange(mesh.nfaces):
      d = np.dot(ut.normalized(-mesh.face_center[j] + scan.center(frame)), mesh.face_planes[j][:3])
      assert -1 <= d <= 1
      angle = np.arccos(abs(d))
      print frame, j, np.rad2deg(angle), (angle <= stable_angle)
      on_face = (mesh.tex2juv[:, 0] == j)
      stable_by_view[frame, on_face] = visible_by_view[frame, on_face]*(angle <= stable_angle)
    print 'stable texels', np.sum(stable_by_view[frame])

  projectable = stable_by_view.copy()
  unstable = np.all(-stable_by_view, axis = 0)
  print 'Unstable faces:', np.unique(mesh.tex2juv[unstable, 0])
  projectable[:, unstable] = visible_by_view[:, unstable]

  unused = range(scan.length)
  while np.sum(projectable) > 0:
    # it's ok to project onto a face if it's (1) unfilled (2) it's either stable, or it's unstable in every view
    frame = max(ut.shuffled(unused), key = lambda f : np.sum(projectable[f, :]))

    projs = scan.project(frame, mesh.texel_pts)
    ok = projectable[frame]
    print 'chose', frame, np.sum(projectable[frame, :]), np.sum(ok), 'projecting to', np.unique(mesh.tex2juv[projectable[frame, :], 0])
    texel_colors[ok] = ig.lookup_bilinear(scan.im(frame), projs[ok, 0], projs[ok, 1])
    projectable[:, ok] = 0

    unused.remove(frame)
    print 'projected', frame, np.sum(filled)
      
  return [texel_colors]
コード例 #23
0
ファイル: meme.py プロジェクト: rolando-contribute/aile
    def _fit_1(self, sequence, V, f0, m0, eps, max_iter, relax):
        """Run the Expectation-Maximization algorithm.

        Parameters:
            - sequence: sequence to fit
            - V       : erasure parameters
            - f       : initial estimate of position frequency matrix
            - m       : initial estimate of a priori model probability
            - eps     : stop when relative change in expected log-likelihood
                        is less than this
            - relax   : parameter to take probabilities away from zero
        """
        W = f0.shape[0] - 1                    # size of window
        n = len(V)                             # sequence length
        it = 0                                 # current iteration
        f1 = f0                                # current estimation of f
        m1 = m0                                # current estimation of m
        E1 = None                              # current expected log-likelihood

        while True:
            pM = np.ones((n,))
            pB = np.ones((n,))
            for i, X in self._roll(sequence, W):
                pM[i], pB[i] = MEME._score(X, f1)
            pM *= m1
            pB *= 1.0 - m1

            Z = pM/(pM + pB)
            E2 = np.sum(Z*np.log(pM) + (1 - Z)*np.log(pB))
            # Correct for overlapping, enforce Z[i:(i+W)] <= 1 for i=1...n
            # MEME hack to account for dependence between sub-sequences.
            for i in xrange(len(Z)):
                s = Z[i:i+W].sum()
                if s > 1.0:
                    Z[i:i+W] /= s

            # Update m and f
            qB = 0.0
            qM = 0.0
            c = np.zeros(f0.shape, dtype=float)
            for i, X in self._roll(sequence, W):
                ZB = 1.0 - Z[i]
                ZM = Z[i]*V[i]
                for j, k in enumerate(X):
                    c[0    , k] += ZB
                    c[j + 1, k] += ZM
                qB += ZB
                qM += ZM
            m2 = qM/(qB + qM)
            f2 = (1.0 - relax)*util.normalized(c) + relax
            # Check convergence
            if E1:
                err = (E1 - E2)/E1
                if err < eps:
                    break
            E1 = E2
            it += 1
            if it > max_iter:
                if self.logger:
                    self.logger.warning(
                        'MEME.fit: max iterations reached without convergence (err={0})'.format(err))
                break

            f1 = f2
            m1 = m2

        return f2, m2, E2, Z
コード例 #24
0
ファイル: counting.py プロジェクト: wrongu/sampling-dynamics
def eig_steadystate(A):
	# steady state distribution of A_ff transition matrix is largest eigenvector (eigenvalue=1)
	w,v = np.linalg.eig(A)
	inds = np.argsort(w)
	S_steady_state = np.abs(v[:,inds[-1]])
	return normalized(S_steady_state, order=1)
コード例 #25
0
def label_box(seq,
              root=0,
              side_len1=None,
              side_len2=None,
              side_len3=None,
              y_flip=True,
              mode='normal'):
    print seq

    if type(seq) == type(''):
        scan = dset.Scan(seq, None)
    else:
        scan = seq
        seq = scan.path

    if mode == 'normal':
        _, _, tracks = dset.read_bundler(scan.bundle_file, scan.full_shape)
        pts = np.array([t[0] for t in tracks])

        proj = scan.project(root, pts)

        w = 1

        pylab.clf()
        im_with_pts = ig.draw_pts(scan.im(root), proj, width=w)
        pylab.imshow(im_with_pts)
        rect = ut.bbox2d(pylab.ginput(2, timeout=-1))
        #rect = (1782.005828476269, 1431.7364696086595, 529.75936719400488, 354.40549542048279)
        print rect

        ok = ut.land(rect[0] <= proj[:, 0], proj[:, 0] <= rect[0] + rect[2],
                     rect[1] <= proj[:, 1], proj[:, 1] <= rect[1] + rect[3])
        pts_in_box = pts[ok]
        thresh = pylab.dist(scan.center(root), scan.center(root + 1)) / 50.
        plane, _ = planefit.fit_plane_ransac(pts_in_box, thresh)
        if plane[1] < 0 and y_flip:
            plane *= -1

        ins = planefit.plane_inliers(plane, pts, thresh)

        pylab.clf()
        colors = np.zeros_like(pts)
        colors[:, 0] = 255
        colors[ins] = (0, 255, 0)

        im_ins = ig.draw_pts(scan.im(root),
                             map(ut.itup, proj),
                             map(ut.itup, colors),
                             width=w)
        pylab.clf()
        pylab.imshow(im_ins)

        if not input('ok? '):
            return

        print 'click 2 points (used to recalibrate the plane)'
        rect = ut.bbox2d(pylab.ginput(2, timeout=-1))
        ok = ut.land(rect[0] <= proj[:, 0], proj[:, 0] <= rect[0] + rect[2],
                     rect[1] <= proj[:, 1], proj[:, 1] <= rect[1] + rect[3])
        pts_in_box = pts[ok]
        print 'plane before', plane
        plane[3] = -np.median(np.dot(pts_in_box, plane[:3]))
        print 'plane after', plane[3]

        if 1:
            print 'hack'
            im_ins = scan.im(root)

        pylab.clf()
        pylab.imshow(im_ins)
        print 'click 3 base points'
        px = pylab.ginput(3, timeout=-1)

        #px = [(2270.2989175686921, 1482.9937552039967), (2297.2764363030801, 1555.8330557868442), (2405.1865112406322, 1550.4375520399667)]

        def backproj(p):
            ray = ut.normalized(
                np.dot(mvg.pixel_ray_matrix(scan.R(root), scan.K(root)),
                       ut.homog(p)))
            c = scan.center(root)

            dist = (-plane[3] - np.dot(c, plane[:3])) / np.dot(ray, plane[:3])
            assert dist >= 0
            pt = c + ray * dist
            print planefit.dist_to_plane(plane, pt[np.newaxis, :])
            return pt

        sc = 1.
        while 1:
            cb = np.array(map(backproj, px))
            v1 = cb[0] - cb[1]
            v2 = cb[2] - cb[1]

            if side_len1 is None:
                side_len1 = 0.5 * (np.linalg.norm(v1) + np.linalg.norm(v2))
            if side_len2 is None:
                side_len2 = side_len1
            if side_len3 is None:
                side_len3 = side_len1

            a1 = sc * side_len1
            a2 = sc * side_len2
            a3 = sc * side_len3

            print 'side1', a1, 'side2', a2, 'side3', a3, 'thresh =', thresh, \
                  'v1 =', np.linalg.norm(v1), 'v2 = ', np.linalg.norm(v2)

            R = np.zeros((3, 3))
            cr = ut.normalized(np.cross(v1, plane[:3]))
            cr *= np.sign(np.dot(cr, v2))
            R[0] = a1 * ut.normalized(v1)
            R[1] = a2 * ut.normalized(cr)
            R[2] = a3 * ut.normalized(plane[:3])
            print ut.normax(R, 1)

            mesh_pts = []
            for zi in xrange(2):
                for yi in xrange(2):
                    for xi in xrange(2):
                        mesh_pts.append(cb[1] + R[0] * xi + R[1] * yi +
                                        R[2] * zi)
            face_idx = -1 + np.array(
                [[1, 2, 4, 3],
                 np.array([1, 2, 4, 3]) + 4, [1, 2, 2 + 4, 1 + 4],
                 [2, 4, 4 + 4, 2 + 4], [4, 3, 3 + 4, 4 + 4],
                 [3, 1, 1 + 4, 3 + 4]])
            mesh = box.Mesh(face_idx, mesh_pts, texsize=128)

            # show a preview
            scan_ = dset.Scan(seq)
            ig.show(
                [[1 + i,
                  box.draw_faces(mesh, scan_, i, hires=0),
                  scan_.im(i)] for i in [root, root + 1]])
            if input('ok? '):
                box.save_mesh(ut.pjoin(seq, 'cube.mat'), mesh)
                break
            else:
                sc = float(input('scale? '))
                time.sleep(2)

    else:
        mesh = box.load_from_mat(ut.pjoin(seq, 'cube.mat'))

    scan = dset.Scan(seq, use_cams_file=False)

    print 'Already marked as bad:'
    good_cams_file = os.path.join(scan.path, 'good_cams.txt')
    if os.path.exists(good_cams_file):
        inds = map(int, open(good_cams_file, 'r').read().split())
        file_ids = map(scan.file_index, scan.im_files)
        bad = sorted(set(file_ids) - set(inds))
        print '\n'.join(map(str, bad))

    if 1:
        ig.show([[
            scan.file_index(scan.im_files[frame]),
            box.draw_faces(mesh, scan, frame, hires=0)
        ] for frame in xrange(scan.length)])

    inp = input('Bad cameras (as string): ')
    if inp != 'skip':
        bad_cams = map(int, inp.split())
        all_idx = map(scan.file_index, scan.im_files)
        good_cams = sorted(set(all_idx) - set(bad_cams))
        ut.write_lines(ut.pjoin(seq, 'good_cams.txt'), map(str, good_cams))
コード例 #26
0
ファイル: counting.py プロジェクト: wrongu/sampling-dynamics
def eig_steadystate(A):
    # steady state distribution of A_ff transition matrix is largest eigenvector (eigenvalue=1)
    w, v = np.linalg.eig(A)
    inds = np.argsort(w)
    S_steady_state = np.abs(v[:, inds[-1]])
    return normalized(S_steady_state, order=1)
コード例 #27
0
ファイル: planefit.py プロジェクト: jwgu/camo
def plane_from_3(pts):
  assert pts.shape == (3, 3)
  w = ut.normalized(np.cross(pts[1] - pts[0], pts[2] - pts[0]))
  return np.array(list(w) + [-np.dot(w, pts[0])])