Ejemplo n.º 1
0
 def forward(self, x):
     x = torch.relu(self.conv1(x))
     x = torch.relu(self.conv2(x))
     before_pool = x
     if self.pooling:
         x = self.pool(x)
     return x, before_pool
Ejemplo n.º 2
0
 def forward(self, from_down, from_up):
     """ Forward pass
     Arguments:
         from_down: tensor from the encoder pathway
         from_up: upconv'd tensor from the decoder pathway
     """
     from_up = self.upconv(from_up)
     if self.merge_mode == "concat":
         x = torch.cat((from_up, from_down), 1)
     else:
         x = from_up + from_down
     x = torch.relu(self.conv1(x))
     x = torch.relu(self.conv2(x))
     return x
Ejemplo n.º 3
0
 def forward(self, x):
     out = torch.relu(self.bn1(self.conv1(x)))
     out = self.bn2(self.conv2(out))
     out += self.shortcut(x)
     out = torch.relu(out)
     return out
Ejemplo n.º 4
0
 def decode(self, text, dec_hidden):
     # [batch_size, seq_len]
     embeded = self.embedding(text)
     embeded = torch.relu(embeded)
     output, dec_hidden = self.dec_LSTM(embeded, dec_hidden)
     return output
Ejemplo n.º 5
0
    def run(self, mol_tree_batch, mol_tree_batch_lg, n_trees, tree_vec):
        times = []
        times.append((116,time.time()))
        node_offset = np.cumsum([0] + mol_tree_batch.batch_num_nodes)
        root_ids = node_offset[:-1]
        n_nodes = mol_tree_batch.number_of_nodes()
        n_edges = mol_tree_batch.number_of_edges()

        times.append((122,time.time()))
        mol_tree_batch.ndata.update({
            'x': self.embedding(mol_tree_batch.ndata['wid']),
            'h': torch.cuda.FloatTensor(n_nodes, self.hidden_size).fill_(0),
            'new': torch.cuda.ByteTensor(n_nodes).fill_(1)  # whether it's newly generated node
        })

        times.append((129,time.time()))
        mol_tree_batch.edata.update({
            's': torch.cuda.FloatTensor(n_edges, self.hidden_size).fill_(0),
            'm': torch.cuda.FloatTensor(n_edges, self.hidden_size).fill_(0),
            'r': torch.cuda.FloatTensor(n_edges, self.hidden_size).fill_(0),
            'z': torch.cuda.FloatTensor(n_edges, self.hidden_size).fill_(0),
            'src_x': torch.cuda.FloatTensor(n_edges, self.hidden_size).fill_(0),
            'dst_x': torch.cuda.FloatTensor(n_edges, self.hidden_size).fill_(0),
            'rm': torch.cuda.FloatTensor(n_edges, self.hidden_size).fill_(0),
            'accum_rm': torch.cuda.FloatTensor(n_edges, self.hidden_size).fill_(0)
        })

        times.append((141,time.time()))
        mol_tree_batch.apply_edges(
            func=lambda edges: {'src_x': edges.src['x'], 'dst_x': edges.dst['x']},
        )

        # input tensors for stop prediction (p) and label prediction (q)
        p_inputs = []
        p_targets = []
        q_inputs = []
        q_targets = []

        times.append((152,time.time()))
        # Predict root
        mol_tree_batch.pull(
            root_ids,
            dec_tree_node_msg,
            dec_tree_node_reduce,
            dec_tree_node_update,
        )
        
        times.append((161,time.time()))
        # Extract hidden states and store them for stop/label prediction
        h = mol_tree_batch.nodes[root_ids].data['h']
        x = mol_tree_batch.nodes[root_ids].data['x']
        p_inputs.append(torch.cat([x, h, tree_vec], 1))
        # If the out degree is 0 we don't generate any edges at all
        root_out_degrees = mol_tree_batch.out_degrees(root_ids).cuda()
        q_inputs.append(torch.cat([h, tree_vec], 1))
        q_targets.append(mol_tree_batch.nodes[root_ids].data['wid'])

        times.append((171,time.time()))
        # Traverse the tree and predict on children
        for eid, p in dfs_order(mol_tree_batch, root_ids):
            u, v = mol_tree_batch.find_edges(eid)
            
            p_target_list = torch.cuda.LongTensor(root_out_degrees.shape).fill_(0)
            p_target_list[root_out_degrees > 0] = 1 - p.cuda()
            p_target_list = p_target_list[root_out_degrees >= 0]
            p_targets.append(torch.tensor(p_target_list).cuda())

            root_out_degrees -= (root_out_degrees == 0).long().cuda()
            root_out_degrees -= torch.tensor(np.isin(root_ids, v).astype('int64')).cuda()

            mol_tree_batch_lg.pull(
                eid,
                dec_tree_edge_msg,
                dec_tree_edge_reduce,
                self.dec_tree_edge_update,
            )
            is_new = mol_tree_batch.nodes[v].data['new']
            mol_tree_batch.pull(
                v,
                dec_tree_node_msg,
                dec_tree_node_reduce,
                dec_tree_node_update,
            )
            # Extract
            n_repr = mol_tree_batch.nodes[v].data
            h = n_repr['h']
            x = n_repr['x']
            tree_vec_set = tree_vec[root_out_degrees >= 0]
            wid = n_repr['wid']
            p_inputs.append(torch.cat([x, h, tree_vec_set], 1))
            # Only newly generated nodes are needed for label prediction
            # NOTE: The following works since the uncomputed messages are zeros.

            q_input = torch.cat([h, tree_vec_set], 1)[is_new]
            q_target = wid[is_new]
            if q_input.shape[0] > 0:
                q_inputs.append(q_input)
                q_targets.append(q_target)
        p_targets.append(torch.zeros((root_out_degrees == 0).sum()).long().cuda())

        times.append((214,time.time()))
        # Batch compute the stop/label prediction losses
        p_inputs = torch.cat(p_inputs, 0)
        p_targets = cuda(torch.cat(p_targets, 0))
        q_inputs = torch.cat(q_inputs, 0)
        q_targets = torch.cat(q_targets, 0)

        times.append((221,time.time()))
        q = self.W_o(torch.relu(self.W(q_inputs)))
        p = self.U_s(torch.relu(self.U(p_inputs)))[:, 0]

        times.append((225,time.time()))
        p_loss = F.binary_cross_entropy_with_logits(
            p, p_targets.float(), size_average=False
        ) / n_trees
        q_loss = F.cross_entropy(q, q_targets, size_average=False) / n_trees
        p_acc = ((p > 0).long() == p_targets).sum().float() / p_targets.shape[0]
        q_acc = (q.max(1)[1] == q_targets).float().sum() / q_targets.shape[0]

        times.append((233,time.time()))
        self.q_inputs = q_inputs
        self.q_targets = q_targets
        self.q = q
        self.p_inputs =  p_inputs
        self.p_targets = p_targets
        self.p = p
        
        #print("Dec Profile:")
        #for i in range(len(times)-1):
        #    print("\t%d: %f" % (times[i][0], 
        #                        times[i+1][1]-times[i][1]))

        return q_loss, p_loss, q_acc, p_acc
Ejemplo n.º 6
0
    def step(self, input, decoder_state):
        """
        Inputs Shapes:
            input: (Variable) batch_size x len_tgt (wanna tranpose)
            context: (Variable) batch_size x len_src x d_model
            mask_src (Tensor) batch_size x len_src
            buffer (List of tensors) List of batch_size * len_tgt-1 * d_model for self-attention recomputing
        Outputs Shapes:
            out: batch_size x len_tgt x d_model
            coverage: batch_size x len_tgt x len_src

        """
        context = decoder_state.context
        buffers = decoder_state.attention_buffers
        src = decoder_state.src.transpose(
            0, 1) if decoder_state.src is not None else None
        atbs = decoder_state.tgt_atb

        if decoder_state.input_seq is None:
            decoder_state.input_seq = input
        else:
            # concatenate the last input to the previous input sequence
            decoder_state.input_seq = torch.cat(
                [decoder_state.input_seq, input], 0)
        input = decoder_state.input_seq.transpose(0, 1)
        input_ = input[:, -1].unsqueeze(1)
        """ Embedding: batch_size x 1 x d_model """
        emb = self.word_lut(input_)
        """ Adding positional encoding """
        if self.time == 'positional_encoding':
            emb = emb * math.sqrt(self.model_size)
            emb = self.time_transformer(emb, t=input.size(1))
        else:
            # prev_h = buffer[0] if buffer is None else None
            # emb = self.time_transformer(emb, prev_h)
            # buffer[0] = emb[1]
            raise NotImplementedError

        if isinstance(emb, tuple):
            emb = emb[0]
        # emb should be batch_size x 1 x dim

        if self.use_feature:
            atb_emb = self.attribute_embeddings(atbs).unsqueeze(
                1)  # B x H to B x 1 x H
            emb = torch.cat([emb, atb_emb], dim=-1)
            emb = torch.relu(self.feature_projector(emb))

        emb = emb.transpose(0, 1)

        # batch_size x 1 x len_src
        if context is not None:
            if self.encoder_type == "audio":
                if src.data.dim() == 3:
                    if self.encoder_cnn_downsampling:
                        long_mask = src.data.narrow(2, 0, 1).squeeze(2).eq(
                            onmt.Constants.PAD)
                        mask_src = long_mask[:, 0:context.size(0) *
                                             4:4].unsqueeze(1)
                    else:
                        mask_src = src.narrow(2, 0, 1).squeeze(2).eq(
                            onmt.Constants.PAD).unsqueeze(1)
                elif self.encoder_cnn_downsampling:
                    long_mask = src.eq(onmt.Constants.PAD)
                    mask_src = long_mask[:,
                                         0:context.size(0) * 4:4].unsqueeze(1)
                else:
                    mask_src = src.eq(onmt.Constants.PAD).unsqueeze(1)
            else:
                mask_src = src.eq(onmt.Constants.PAD).unsqueeze(1)
        else:
            mask_src = None

        len_tgt = input.size(1)
        mask_tgt = input.data.eq(
            onmt.Constants.PAD).unsqueeze(1) + self.mask[:len_tgt, :len_tgt]
        mask_tgt = torch.gt(mask_tgt, 0)
        mask_tgt = mask_tgt[:, -1, :].unsqueeze(1)

        output = emb.contiguous()

        for i, layer in enumerate(self.layer_modules):
            buffer = buffers[i] if i in buffers else None
            assert (output.size(0) == 1)

            output, coverage, buffer = layer.step(output,
                                                  context,
                                                  mask_tgt,
                                                  mask_src,
                                                  buffer=buffer)

            decoder_state.update_attention_buffer(buffer, i)

        # From Google T2T
        # if normalization is done in layer_preprocess, then it should also be done
        # on the output, since the output can grow very large, being the sum of
        # a whole stack of unnormalized layer outputs.
        output = self.postprocess_layer(output)

        return output, coverage
Ejemplo n.º 7
0
 def comparison(x):
     val = torch.relu(x)
     return torch.add(val, val)
Ejemplo n.º 8
0
 def pattern(x):
     return torch.neg(x) + torch.relu(x)
Ejemplo n.º 9
0
 def pattern(x):
     return torch.relu(x)
Ejemplo n.º 10
0
 def forward(self, x):
     return torch.relu(self.l1(x.view(x.size(0), -1)))
Ejemplo n.º 11
0
 def forward(self, x):
     x = torch.relu(self.fc1(x))
     x = torch.relu(self.fc2(x))
     x = torch.relu(self.fc3(x))
     actions_value = torch.sigmoid(self.out(x))
     return actions_value
Ejemplo n.º 12
0
	def forward(self, state, action):
		x = torch.cat([state, action], 1)
		x = torch.relu(self.linear1(x))
		x = torch.relu(self.linear2(x))
		x = self.linear3(x)
		return x
Ejemplo n.º 13
0
	def forward(self, state):
		x = torch.relu(self.linear1(state))
		x = torch.relu(self.linear2(x))
		x = self.linear3(x)
		return x
Ejemplo n.º 14
0
 def decode(self, z):
     z = self.decoder_hidden(z)
     z = torch.relu(z)
     z = self.decoder_out(z)
     return torch.tanh(z)
Ejemplo n.º 15
0
 def encode(self, x):
     x = self.encoder_hidden(x)
     x = torch.relu(x)
     return self.encoder_mu(x), self.encoder_sigma(x)
 def forward(self):
     y = torch.relu(self.x)
     return y
Ejemplo n.º 17
0
 def forward(self, state):
     a = t.relu(self.fc1(state))
     a = t.relu(self.fc2(a))
     return self.fc3(a)
Ejemplo n.º 18
0
 def forward(self, x):
     val = torch.neg(x) + torch.relu(x)
     return torch.add(val, val)
Ejemplo n.º 19
0
 def forward(self, x):
     x = x.view(x.size(0), -1)
     x = torch.relu(self.l1(x))
     x = torch.relu(self.l2(x))
     return x
Ejemplo n.º 20
0
 def replacement(x):
     return torch.relu(x)
Ejemplo n.º 21
0
    def composite(self, model, rays, z_samp, coarse=True, far=False, sb=0):
        """
        Render RGB and depth for each ray using NeRF alpha-compositing formula,
        given sampled positions along each ray (see sample_*)
        :param model should return (B, (r, g, b, sigma)) when called with (B, (x, y, z))
        should also support 'coarse' boolean argument
        :param rays ray [origins (3), directions (3), near (1), far (1)] (B, 8)
        :param z_samp z positions sampled for each ray (B, K)
        :param coarse whether to evaluate using coarse NeRF
        :param sb super-batch dimension; 0 = disable
        :return weights (B, K), rgb (B, 3), depth (B)
        """
        with profiler.record_function("renderer_composite"):
            B, K = z_samp.shape

            deltas = z_samp[:, 1:] - z_samp[:, :-1]  # (B, K-1)
            if far:
                delta_inf = 1e10 * torch.ones_like(
                    deltas[:, :1])  # infty (B, 1)
            else:
                delta_inf = rays[:, -1:] - z_samp[:, -1:]
            deltas = torch.cat([deltas, delta_inf], -1)  # (B, K)

            # (B, K, 3)
            points = rays[:,
                          None, :3] + z_samp.unsqueeze(2) * rays[:, None, 3:6]
            points = points.reshape(-1, 3)  # (B*K, 3)

            use_viewdirs = hasattr(model,
                                   "use_viewdirs") and model.use_viewdirs

            val_all = []
            if sb > 0:
                points = points.reshape(
                    sb, -1, 3)  # (SB, B'*K, 3) B' is real ray batch size
                eval_batch_size = (self.eval_batch_size - 1) // sb + 1
                eval_batch_dim = 1
            else:
                eval_batch_size = self.eval_batch_size
                eval_batch_dim = 0

            split_points = torch.split(points,
                                       eval_batch_size,
                                       dim=eval_batch_dim)
            if use_viewdirs:
                dim1 = K
                viewdirs = rays[:, None, 3:6].expand(-1, dim1, -1)  # (B, K, 3)
                if sb > 0:
                    viewdirs = viewdirs.reshape(sb, -1, 3)  # (SB, B'*K, 3)
                else:
                    viewdirs = viewdirs.reshape(-1, 3)  # (B*K, 3)
                split_viewdirs = torch.split(viewdirs,
                                             eval_batch_size,
                                             dim=eval_batch_dim)
                for pnts, dirs in zip(split_points, split_viewdirs):
                    val_all.append(model(pnts, coarse=coarse, viewdirs=dirs))
            else:
                for pnts in split_points:
                    val_all.append(model(pnts, coarse=coarse))
            points = None
            viewdirs = None
            # (B*K, 4) OR (SB, B'*K, 4)
            out = torch.cat(val_all, dim=eval_batch_dim)
            out = out.reshape(B, K, -1)  # (B, K, 4 or 5)

            rgbs = out[..., :3]  # (B, K, 3)
            sigmas = out[..., 3]  # (B, K)
            if self.training and self.noise_std > 0.0:
                sigmas = sigmas + torch.randn_like(sigmas) * self.noise_std

            # compute the gradients in log space of the alphas, for NV TV occupancy regularizer
            alphas = 1 - torch.exp(-deltas * torch.relu(sigmas))  # (B, K)
            deltas = None
            sigmas = None
            alphas_shifted = torch.cat(
                [torch.ones_like(alphas[:, :1]), 1 - alphas + 1e-10],
                -1)  # (B, K+1) = [1, a1, a2, ...]
            T = torch.cumprod(alphas_shifted, -1)  # (B)
            weights = alphas * T[:, :-1]  # (B, K)
            alphas = None
            alphas_shifted = None

            rgb_final = torch.sum(weights.unsqueeze(-1) * rgbs, -2)  # (B, 3)
            depth_final = torch.sum(weights * z_samp, -1)  # (B)
            if self.white_bkgd:
                # White background
                pix_alpha = weights.sum(dim=1)  # (B), pixel alpha
                rgb_final = rgb_final + 1 - pix_alpha.unsqueeze(-1)  # (B, 3)
            return (
                weights,
                rgb_final,
                depth_final,
            )
Ejemplo n.º 22
0
 def forward(self, x):
     x = a_non_torch_leaf(x, x)
     return torch.relu(x + 3.0)
Ejemplo n.º 23
0
    def forward(self, x):
        for layer in self.layers[:-1]:
            x = torch.relu(layer(x))

        return self.out_func(self.layers[-1](x))
Ejemplo n.º 24
0
 def forward(self, x):
   x = torch.relu(self.fc1(x))
   x = torch.relu(self.fc2(x))
   x = torch.sigmoid(self.fc3(x)) # .softmax(self.fc3(x), dim=1)
   return x, self.value
Ejemplo n.º 25
0
    def forward(self, x, new_weights=True):
        for layer in self.layers[:-1]:
            x = torch.relu(layer(x, new_weights))

        return self.out_func(self.layers[-1](x))
Ejemplo n.º 26
0
    def decode(self, mol_vec):
        assert mol_vec.shape[0] == 1

        mol_tree = MolTree(None) 

        init_hidden = torch.cuda.FloatTensor(1, self.hidden_size).fill_(0)

        root_hidden = torch.cat([init_hidden, mol_vec], 1)
        root_hidden = F.relu(self.W(root_hidden))
        root_score = self.W_o(root_hidden)
        _, root_wid = torch.max(root_score, 1)
        root_wid = root_wid.view(1)

        mol_tree.add_nodes(1)   # root
        mol_tree.nodes[0].data['wid'] = root_wid
        mol_tree.nodes[0].data['x'] = self.embedding(root_wid)
        mol_tree.nodes[0].data['h'] = init_hidden
        mol_tree.nodes[0].data['fail'] = cuda(torch.tensor([0]))
        mol_tree.nodes_dict[0] = root_node_dict = create_node_dict(
                self.vocab.get_smiles(root_wid))

        stack, trace = [], []
        stack.append((0, self.vocab.get_slots(root_wid)))

        all_nodes = {0: root_node_dict}
        h = {}
        first = True
        new_node_id = 0
        new_edge_id = 0

        for step in range(MAX_DECODE_LEN):
            u, u_slots = stack[-1]
            udata = mol_tree.nodes[u].data
            x = udata['x']
            h = udata['h']

            # Predict stop
            p_input = torch.cat([x, h, mol_vec], 1)
            p_score = torch.sigmoid(self.U_s(torch.relu(self.U(p_input))))
            backtrack = (p_score.item() < 0.5)

            if not backtrack:
                # Predict next clique.  Note that the prediction may fail due
                # to lack of assemblable components
                mol_tree.add_nodes(1)
                new_node_id += 1
                v = new_node_id
                mol_tree.add_edges(u, v)
                uv = new_edge_id
                new_edge_id += 1
                
                if first:
                    mol_tree.edata.update({
                        's': torch.cuda.FloatTensor(1, self.hidden_size).fill_(0),
                        'm': torch.cuda.FloatTensor(1, self.hidden_size).fill_(0),
                        'r': torch.cuda.FloatTensor(1, self.hidden_size).fill_(0),
                        'z': torch.cuda.FloatTensor(1, self.hidden_size).fill_(0),
                        'src_x': torch.cuda.FloatTensor(1, self.hidden_size).fill_(0),
                        'dst_x': torch.cuda.FloatTensor(1, self.hidden_size).fill_(0),
                        'rm': torch.cuda.FloatTensor(1, self.hidden_size).fill_(0),
                        'accum_rm': torch.cuda.FloatTensor(1, self.hidden_size).fill_(0),
                    })
                    first = False

                mol_tree.edges[uv].data['src_x'] = mol_tree.nodes[u].data['x']
                # keeping dst_x 0 is fine as h on new edge doesn't depend on that.

                # DGL doesn't dynamically maintain a line graph.
                mol_tree_lg = mol_tree.line_graph(backtracking=False, shared=True)

                mol_tree_lg.pull(
                    uv,
                    dec_tree_edge_msg,
                    dec_tree_edge_reduce,
                    self.dec_tree_edge_update.update_zm,
                )
                mol_tree.pull(
                    v,
                    dec_tree_node_msg,
                    dec_tree_node_reduce,
                )

                vdata = mol_tree.nodes[v].data
                h_v = vdata['h']
                q_input = torch.cat([h_v, mol_vec], 1)
                q_score = torch.softmax(self.W_o(torch.relu(self.W(q_input))), -1)
                _, sort_wid = torch.sort(q_score, 1, descending=True)
                sort_wid = sort_wid.squeeze()

                next_wid = None
                for wid in sort_wid.tolist()[:5]:
                    slots = self.vocab.get_slots(wid)
                    cand_node_dict = create_node_dict(self.vocab.get_smiles(wid))
                    if (have_slots(u_slots, slots) and can_assemble(mol_tree, u, cand_node_dict)):
                        next_wid = wid
                        next_slots = slots
                        next_node_dict = cand_node_dict
                        break

                if next_wid is None:
                    # Failed adding an actual children; v is a spurious node
                    # and we mark it.
                    vdata['fail'] = cuda(torch.tensor([1]))
                    backtrack = True
                else:
                    next_wid = cuda(torch.tensor([next_wid]))
                    vdata['wid'] = next_wid
                    vdata['x'] = self.embedding(next_wid)
                    mol_tree.nodes_dict[v] = next_node_dict
                    all_nodes[v] = next_node_dict
                    stack.append((v, next_slots))
                    mol_tree.add_edge(v, u)
                    vu = new_edge_id
                    new_edge_id += 1
                    mol_tree.edges[uv].data['dst_x'] = mol_tree.nodes[v].data['x']
                    mol_tree.edges[vu].data['src_x'] = mol_tree.nodes[v].data['x']
                    mol_tree.edges[vu].data['dst_x'] = mol_tree.nodes[u].data['x']

                    # DGL doesn't dynamically maintain a line graph.
                    mol_tree_lg = mol_tree.line_graph(backtracking=False, shared=True)
                    mol_tree_lg.apply_nodes(
                        self.dec_tree_edge_update.update_r,
                        uv
                        )

            if backtrack:
                if len(stack) == 1:
                    break   # At root, terminate

                pu, _ = stack[-2]
                u_pu = mol_tree.edge_id(u, pu)

                mol_tree_lg.pull(
                    u_pu,
                    dec_tree_edge_msg,
                    dec_tree_edge_reduce,
                    self.dec_tree_edge_update,
                )
                mol_tree.pull(
                    pu,
                    dec_tree_node_msg,
                    dec_tree_node_reduce,
                )
                stack.pop()

        effective_nodes = mol_tree.filter_nodes(lambda nodes: nodes.data['fail'] != 1)
        effective_nodes, _ = torch.sort(effective_nodes)
        return mol_tree, all_nodes, effective_nodes
Ejemplo n.º 27
0
 def forward(self, x, y):
     return y + torch.relu(self.nm(x))
Ejemplo n.º 28
0
 def forward(self, t: torch.Tensor) -> torch.Tensor:
     t = torch.relu(self.l1(t))
     t = torch.relu(self.l2(t))
     t = torch.relu(self.l3(t))
     t = self.out(t)
     return t
 def forward(self, x):
     # x:[batch,channel,seq_len,]-> x:[batch,channel,seq_len]
     x = self.layers(x)
     # return :[batch,seq_len]
     return self.out(self.out_bn(torch.relu(self.conv1d(self.bn(x))))).squeeze()
Ejemplo n.º 30
0
def testBIWI(model,modelin=args.model,outfile=args.out):
    if modelin != "":
        model.load_state_dict(torch.load(modelin))
    model.eval()

    # load 3dmm data
    data3dmm = dataloader.SyntheticLoader()
    mu_lm = torch.from_numpy(data3dmm.mu_lm).float()
    lm_eigenvec = torch.from_numpy(data3dmm.lm_eigenvec).float()
    shape = mu_lm
    shape[:,2] = shape[:,2] * -1

    loader = dataloader.BIWILoader()
    seterror_3d = []
    seterror_rel3d = []
    seterror_relf = []
    seterror_2d = []
    for sub in range(len(loader)):
        batch = loader[sub]

        x_cam_gt = batch['x_cam_gt']
        x_w_gt = batch['x_w_gt']
        f_gt = batch['f_gt']
        x_img = batch['x_img']
        x_img_gt = batch['x_img_gt']
        M = x_img_gt.shape[0]

        one  = torch.ones(M,1,68)
        x_img_one = torch.cat([x_img,one],dim=1)

        # run the model
        out, trans, transfeat = model(x_img_one)
        alphas = out[:,:199].mean(0)
        f = torch.relu(out[:,199]).mean()
        K = torch.zeros((3,3)).float()
        K[0,0] = f;
        K[1,1] = f;
        K[2,2] = 1;
        K[0,2] = 320;
        K[1,2] = 240;
        Xc,R,T = util.EPnP(x_img,shape,K)

        # apply 3DMM model from predicted parameters
        reproj_errors2 = util.getReprojError2(x_img,shape,R,T,K)
        reproj_errors3 = util.getReprojError3(x_cam_gt,shape,R,T)
        rel_errors = util.getRelReprojError3(x_cam_gt,shape,R,T)

        reproj_error = reproj_errors2.mean()
        reconstruction_error = reproj_errors3.mean()
        rel_error = rel_errors.mean()
        f_error = torch.abs(f_gt - f) / f_gt

        seterror_2d.append(reproj_error.cpu().data.item())
        seterror_3d.append(reconstruction_error.cpu().data.item())
        seterror_rel3d.append(rel_error.cpu().data.item())
        seterror_relf.append(f_error.cpu().data.item())

        print(f"fgt: {f_gt.mean().item():.3f}  | f_error_rel: {f_error.item():.4f}  | rmse: {reconstruction_error.item():.4f}  | rel rmse: {rel_error.item():.4f}    | 2d error: {reproj_error.item():.4f}")
        #end for

    matdata = {}
    matdata['seterror_2d'] = np.array(seterror_2d)
    matdata['seterror_3d'] = np.array(seterror_3d)
    matdata['seterror_rel3d'] = np.array(seterror_rel3d)
    matdata['seterror_relf'] = np.array(seterror_relf)
    scipy.io.savemat(outfile,matdata)

    print(f"MEAN seterror_2d: {np.mean(seterror_2d)}")
    print(f"MEAN seterror_3d: {np.mean(seterror_3d)}")
    print(f"MEAN seterror_rel3d: {np.mean(seterror_rel3d)}")
    print(f"MEAN seterror_relf: {np.mean(seterror_relf)}")
Ejemplo n.º 31
0
 def forward(self, x):
     return torch.neg(x) + torch.relu(x)
Ejemplo n.º 32
0
 def forward(self, x):
     y = torch.relu(x)
     return torch.neg(y) - y
Ejemplo n.º 33
0
Dependencies:
torch: 0.4
matplotlib
"""
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt

# fake data
x = torch.linspace(-5, 5, 200)  # x data (tensor), shape=(100, 1)
x = Variable(x)
x_np = x.data.numpy()   # numpy array for plotting

# following are popular activation functions
y_relu = torch.relu(x).data.numpy()
y_sigmoid = torch.sigmoid(x).data.numpy()
y_tanh = torch.tanh(x).data.numpy()
y_softplus = F.softplus(x).data.numpy() # there's no softplus in torch
# y_softmax = torch.softmax(x, dim=0).data.numpy() softmax is a special kind of activation function, it is about probability

# plt to visualize these activation function
plt.figure(1, figsize=(8, 6))
plt.subplot(221)
plt.plot(x_np, y_relu, c='red', label='relu')
plt.ylim((-1, 5))
plt.legend(loc='best')

plt.subplot(222)
plt.plot(x_np, y_sigmoid, c='red', label='sigmoid')
plt.ylim((-0.2, 1.2))