Ejemplo n.º 1
0
def sample(x_gen):
    n_comps = 10
    logits = x_gen[:, 0:n_comps, :, :]
    sel = torch.argmax(logits,  # -
                       # torch.log(- torch.log(self.float_tensor(logits.size()).uniform_(1e-5, 1-1e-5))),
                       dim=1, keepdim=True)
    one_hot = torch.zeros(logits.size())
    if torch.cuda.is_available():
        one_hot = one_hot.cuda()
    one_hot.scatter_(1, sel, 1.0)

    mean_x_r = torch.sum(x_gen[:, n_comps:2 * n_comps, :, :] * one_hot, 1, keepdim=True)
    # u_r = self.float_tensor(mean_x_r.size()).uniform_(1e-5, 1 - 1e-5)
    x_r = F.hardtanh(mean_x_r,  # + torch.exp(log_scale_r) * (torch.log(u_r) - torch.log(1. - u_r)),
                     min_val=0., max_val=1.)

    mean_x_g = torch.sum(x_gen[:, 2 * n_comps:3 * n_comps, :, :] * one_hot, 1, keepdim=True) + \
               torch.tanh(torch.sum(x_gen[:, 3 * n_comps:4 * n_comps] * one_hot, 1, keepdim=True)) * x_r
    # u_g = self.float_tensor(mean_x_g.size()).uniform_(1e-5, 1 - 1e-5)
    x_g = F.hardtanh(mean_x_g,  # + torch.exp(log_scale_g) * (torch.log(u_g) - torch.log(1. - u_g)),
                     min_val=0., max_val=1.)

    mean_x_b = torch.sum(x_gen[:, 4 * n_comps:5 * n_comps, :, :] * one_hot, 1, keepdim=True) + \
               torch.tanh(torch.sum(x_gen[:, 5 * n_comps:6 * n_comps] * one_hot, 1, keepdim=True)) * x_r + \
               torch.tanh(
                   torch.sum(x_gen[:, 6 * n_comps:7 * n_comps, :, :] * one_hot, 1, keepdim=True)) * x_g
    # u_b = self.float_tensor(mean_x_b.size()).uniform_(1e-5, 1 - 1e-5)
    x_b = F.hardtanh(mean_x_b,  # + torch.exp(log_scale_b) * (torch.log(u_b) - torch.log(1. - u_b)),
                     min_val=0., max_val=1.)

    sample = torch.cat([x_r, x_g, x_b], 1)
    return sample
Ejemplo n.º 2
0
 def get_mask(self, log_alpha, re_sample=True):
     if self.training:
         if self.u is None or re_sample:
             self.u = u = torch.rand(log_alpha.size(), device=self.args.device)
         else:
             u = self.u
         s_u = torch.log(u + self.constant_eps) - torch.log(1 - u + self.constant_eps)
         s = torch.sigmoid((s_u + log_alpha + self.constant_eps) / self.beta)
         s_bar = s * (self.zeta - self.gamma) + self.gamma
         mask = nfunc.hardtanh(s_bar, min_val=0, max_val=1)
     else:
         s = torch.sigmoid(log_alpha / self.beta)
         s_bar = s * (self.zeta - self.gamma) + self.gamma
         mask = nfunc.hardtanh(s_bar, min_val=0, max_val=1)
     if self.args.dataset in ["image", 'imagenet']:
         mask = nfunc.interpolate(mask, size=(224, 224), mode='nearest')
     if self.args.dataset in ["image64", 'imagenet64']:
         if 'pool' in self.args.layer:
             mask = nfunc.interpolate(mask, size=(64, 64), mode='nearest')
     if 'caltech' in self.args.dataset:
         if 'pool' in self.args.layer:
             if "incept" == self.args.arch:
                 mask = nfunc.interpolate(mask, size=(299, 299), mode='nearest')
             else:
                 mask = nfunc.interpolate(mask, size=(256, 256), mode='nearest')
     return mask
Ejemplo n.º 3
0
    def forward(self, input):
        if self.wbit == 32:
            input_q = input
        else:
            n_lv = 2**self.wbit
            scale = n_lv // 2 - 1

            # gradient friendly
            a = F.softplus(self.a)  # keep the learnable value positive
            c = F.softplus(self.c)

            if self.channel_wise == 1:
                input = input.div(a[:, None, None, None])
                input = F.hardtanh(input, -1, 1)

                scale = torch.ones_like(self.a.data).mul(scale)
                input_q = WeightQuant.apply(input, scale)
                input_q = input_q.mul(c[:, None, None, None])
            else:
                input = input.div(a)
                input = F.hardtanh(input, -1, 1)

                input_q = RoundQuant.apply(input, scale)
                input_q = input_q.mul(c)
        return input_q
Ejemplo n.º 4
0
    def forward(self, mono_feature, non_mono_feature):
        y = self.non_mono_fc_in(non_mono_feature)
        y = F.relu(y)

        if self.compress_non_mono:
            non_mono_feature = self.non_mono_feature_extractor(
                non_mono_feature)
            non_mono_feature = F.hardtanh(non_mono_feature,
                                          min_val=0.0,
                                          max_val=1.0)

        x = self.mono_fc_in(torch.cat([mono_feature, non_mono_feature], dim=1))
        x = F.relu(x)
        for i in range(int(len(self.mono_submods_out))):
            x = self.mono_submods_out[i](x)
            x = F.hardtanh(x, min_val=0.0, max_val=1.0)

            y = self.non_mono_submods_out[i](y)
            y = F.hardtanh(y, min_val=0.0, max_val=1.0)

            x = self.mono_submods_in[i](torch.cat([x, y], dim=1))
            x = F.relu(x)

            y = self.non_mono_submods_in[i](y)
            y = F.relu(y)

        x = self.mono_fc_last(x)

        y = self.non_mono_fc_last(y)

        out = x + y
        if self.normalize_regression:
            out = F.sigmoid(out)
        return out
Ejemplo n.º 5
0
 def forward(self, x):
     out = self.bn1(self.conv1(x))
     out = F.hardtanh(out)
     out = self.bn2(self.conv2(out))
     out += self.shortcut(x)
     out = F.hardtanh(out)
     return out
Ejemplo n.º 6
0
 def closure_wrapper():
     loss = closure()
     for group in optimizer.param_groups:
         for p in group['params']:
             if p.grad is not None:
                 hardtanh(p.grad, min, max, inplace=True)
     return loss
Ejemplo n.º 7
0
    def forward(self, x):
        if self.alpha is None:
            return x

        if self.training and self.init_state == 0:
            Qp = 2**(self.nbits - 1) - 1
            self.xmax.data.copy_(x.abs().max())
            self.alpha.data.copy_(self.xmax / Qp)
            # self.alpha[self.index].data.copy_(2 * x.abs().mean() / math.sqrt(Qp))
            # self.xmax[self.index].data.copy_(self.alpha[self.index] * Qp)
            self.init_state.fill_(1)

        self.xmax.data.copy_(self.xmax.clamp(self.qmin, self.qmax))
        self.alpha.data.copy_(self.alpha.clamp(self.dmin, self.dmax))
        Qp = (self.xmax / self.alpha).item()
        g = 1.0 / math.sqrt(x.numel() * Qp)
        alpha = grad_scale(self.alpha, g)
        xmax = grad_scale(self.xmax, g)

        if self.signed:
            x = F.hardtanh(x / xmax.abs(), -1, 1) * xmax.abs()
            # x = round_pass((torch.clamp(x/xmax, -1, 1)*xmax)/alpha) * alpha
        else:
            x = F.hardtanh(x / xmax.abs(), 0, 1) * xmax.abs()
            # x = round_pass((torch.clamp(x/xmax, 0, 1)*xmax)/alpha) * alpha
        x = x / alpha.abs()
        x = round_pass(x) * alpha.abs()

        return x
Ejemplo n.º 8
0
    def forward(self, agent_state: torch.FloatTensor, other_agent_states: torch.FloatTensor,
                other_agent_actions: Optional[torch.FloatTensor] = None, action: Optional[torch.FloatTensor] = None,  min_std=0.05, scale=1,):
        assert min_std > 0 and scale >= 0, (min_std, scale)

        if self.continuous_actions:
            action_mean = self.actor(agent_state)
            std = F.hardtanh(self.std, min_val=min_std, max_val=scale)
            dist = torch.distributions.Normal(action_mean, std)
        else:
            action_probs = self.actor(agent_state)
            dist = torch.distributions.Categorical(probs=action_probs)

        if action is None:
            action = dist.sample().to(device)

        if action.ndim > 1:
            action = action.squeeze().to(device)
        if other_agent_actions is None:
            if self.continuous_actions:
                other_agent_action_mean = self.actor(agent_state)
                std = F.hardtanh(self.std, min_val=min_std, max_val=scale)
                other_agent_dist = torch.distributions.Normal(other_agent_action_mean, std)
            else:
                other_action_probs = self.actor(other_agent_states)
                other_agent_dist = torch.distributions.Categorical(probs=other_action_probs)
            other_agent_actions = other_agent_dist.sample().to(device)

        critic_value = self.critic(agent_state, other_agent_states, other_agent_actions, action)

        log_probs = dist.log_prob(action)

        dist_entropy = dist.entropy().mean()
        return action, log_probs, dist_entropy, critic_value
Ejemplo n.º 9
0
def calc_targets(feat, label, noise, predict, opts=None):
    clip_low = opts["value_low"]
    clip_high = opts["value_high"]
    compress_function = opts["compress_function"]

    if clip_low and clip_high and predict:
        predict = F.hardtanh(predict, clip_low, clip_high)
    if compress_function and predict:
        predict = compress_function(predict)

    with torch.no_grad():
        feat = feat.sqrt()
        label = label.sqrt()
        target = label / feat
        target[torch.isinf(target)] = 0.
        target[torch.isnan(target)] = 0.
        if clip_low and clip_high:
            target = F.hardtanh(target, clip_low, clip_high)
        if compress_function:
            target = compress_function(target)
        ideal = target * feat
    if predict is None:
        predict = target
    enhanced = feat * predict
    return {"target": target, "predict": predict, "enhanced": enhanced,
            "ideal": ideal, "real_data": ideal, "fake_data": enhanced}
Ejemplo n.º 10
0
    def forward(self, data):
        pos = data.pos  # xyz

        # ref: https://github.com/yanx27/Pointnet_Pointnet2_pytorch/blob/master/data_utils/S3DISDataLoader.py
        rgb = data.x  # rgb and 3 additional features

        batch = torch.max(data.batch) + 1

        pos_list = []
        rgb_list = []
        for i in range(batch):
            pos_list.append(pos[data.batch == i])
            rgb_list.append(rgb[data.batch == i])

        pos = torch.stack(pos_list).permute(0, 2, 1).contiguous()
        rgb = torch.stack(rgb_list).permute(0, 2, 1).contiguous()
        if self.more_features:
            point_cloud = torch.cat([pos, rgb], dim=1)
        else:
            point_cloud = pos

        x, trans, trans_feat = self.feat(point_cloud)
        x = F.hardtanh(self.bn1(self.conv1(x)))
        x = F.hardtanh(self.bn2(self.conv2(x)))
        x = F.hardtanh(self.bn3(self.conv3(x)))
        x = self.conv4(x)
        x = x.transpose(2,1).contiguous()
        x = F.log_softmax(x.view(-1,self.k), dim=-1)
        x = x.view(-1, self.k)

        return {
            'out': x,
            'trans': trans,
            'trans_feat': trans_feat,
        }
Ejemplo n.º 11
0
 def cpt_gate(self, semantic_score: T) -> Tuple[T, T]:
     assert semantic_score.size()[0] > 4
     score = semantic_score[1:-1]  # (num_score - 2)
     fwd_score = torch.cat([torch.zeros(score.size(0)), score], dim=0)
     bwd_score = torch.cat([score, torch.zeros(score.size(0))], dim=0)
     fwd_score_hat = torch.stack([
         fwd_score[i:i + score.size()[0]]
         for i in range(score.size()[0] - 1, 0, -1)
     ],
                                 dim=0)
     bwd_score_hat = torch.stack([
         bwd_score[i:i + score.size(0)] for i in range(1,
                                                       score.size()[0])
     ],
                                 dim=0)
     if self.hard:
         fwd_gate = (F.hardtanh(
             (fwd_score_hat - score[None, :]) / self.resolution * 2 + 1) +
                     1) / 2
         bwd_gate = (F.hardtanh(
             (bwd_score_hat - score[None, :]) / self.resolution * 2 + 1) +
                     1) / 2
     else:
         fwd_gate = F.sigmoid((fwd_score_hat - score[None, :]) /
                              self.resolution * 10 + 5)
         bwd_gate = F.sigmoid((bwd_score_hat - score[None, :]) /
                              self.resolution * 10 + 5)
     fwd_gate = torch.cumprod(fwd_gate, dim=0)  # seq x seq - 1
     bwd_gate = torch.cumprod(bwd_gate, dim=0)  # seq x seq - 1
     return (fwd_gate, bwd_gate)
Ejemplo n.º 12
0
    def forward(self, input):
        # 1. input data and weight quantization
        with torch.no_grad():
            self.delta_w = self.weight.abs().max() / self.h_lvl_w * self.scaler_dw
            if self.training:
                self.counter.data += 1
                self.delta_x = input.abs().max() / self.h_lvl_i
                self.delta_in_sum.data += self.delta_x
            else:
                self.delta_x = self.delta_in_sum.data / self.counter.data

        input_clip = F.hardtanh(input, min_val=-self.h_lvl_i * self.delta_x.item(),
                                max_val=self.h_lvl_i * self.delta_x.item())
        input_quan = quantize_input(input_clip, self.delta_x)  # * self.delta_x  # convert to voltage
        weight_quan = quantize_weight(self.weight, self.delta_w)  # * self.delta_w
        if self.bias is not None:
            bias_quan = quantize_weight(self.bias, self.delta_x)
        else:
            bias_quan = None

        output_crxb = F.linear(input, weight_quan, bias_quan)

        with torch.no_grad():
            if self.training:
                self.delta_i = output_crxb.abs().max() / self.h_lvl_a
                self.delta_out_sum.data += self.delta_i
            else:
                self.delta_i = self.delta_out_sum.data / self.counter.data
            self.delta_y = self.delta_w * self.delta_x * self.delta_i
        #         print('adc LSB ration:', self.delta_i/self.max_i_LSB)
        output_clip = F.hardtanh(output_crxb, min_val=-self.h_lvl_a * self.delta_i.item(),
                                 max_val=self.h_lvl_a * self.delta_i.item())
        output_adc = adc(output_clip, self.delta_i, 1.)

        return output_adc
Ejemplo n.º 13
0
 def forward(self, x):
     out = F.hardtanh(self.conv1(x))
     out = F.hardtanh(self.conv2(out))
     out = F.hardtanh(self.conv3(out))
     out = out.view(-1, 16)
     out = F.hardtanh(self.fc1(out))
     out = self.fc2(out)
     return out
Ejemplo n.º 14
0
 def forward(self, x):
     out = self.bn1(self.conv1(x))
     out += self.shortcut(x) 
     out = F.hardtanh(out, inplace=True)
     x1 = out
     out = self.bn2(self.conv2(out))
     out += x1 
     out = F.hardtanh(out, inplace=True)
     return out  
Ejemplo n.º 15
0
 def sample_z(self, batch_size, sample=True):
     """Sample the hard-concrete gates for training and use a deterministic value for testing"""
     if sample:
         eps = self.get_eps(self.floatTensor(batch_size, self.dim_z))
         z = self.quantile_concrete(eps).view(batch_size, self.dim_z, 1, 1)
         return F.hardtanh(z, min_val=0, max_val=1)
     else:  # mode
         pi = F.sigmoid(self.qz_loga).view(1, self.dim_z, 1, 1)
         return F.hardtanh(pi * (limit_b - limit_a) + limit_a, min_val=0, max_val=1)
Ejemplo n.º 16
0
    def forward(self, x):
        if self.act_func:
            x = x * (F.hardtanh(x + 3, 0, 6) / 6)

        if len(self.bits) == 1 and self.bits[0] == 32:
            return x
        else:
            x = F.hardtanh(x, 0, 1)
            x = RoundQuant.apply(x, self.n_lvs)
            return x
Ejemplo n.º 17
0
    def forward(self, belief, state):
        x = F.hardtanh(self.belief1(belief))
        x = F.hardtanh(self.belief2(x))
        x = F.hardtanh(self.belief_interm(x))
        x = torch.cat((x, state), 0)

        x = F.hardtanh(self.bs1(x))
        x = F.hardtanh(self.bs2(x))
        x = F.softmax(self.bs4(self.bs3(x)))
        return x
    def forward(self, x):
        # x = f.hardtanh(self.bn1(self.fc1(x)))
        # x = f.hardtanh(self.bn2(self.fc2(x)))
        # x = self.bn3(self.fc3(x))

        x = f.hardtanh(self.fc1(x))
        x = f.hardtanh(self.fc2(x))
        x = self.fc3(x)

        return x
Ejemplo n.º 19
0
    def update(self, inputs: Tensor, denominator: Tensor) -> Tensor:

        # Step 5: Apply denominator
        # inputs has shape (m, R) if self.flow == 'source_to_target' else (n, R)
        # denominator has shape (m) if self.flow == 'source_to_target' else (n)
        inputs /= denominator.unsqueeze(-1)

        if self.flow == 'source_to_target':
            return F.hardtanh(inputs + self.vertex_translate)
        else:
            return F.hardtanh(inputs + self.edge_translate)
Ejemplo n.º 20
0
    def forward(self, x):
        B, D, N = x.size()
        if self.tnet:
            trans = self.stn(x)
        else:
            trans = None

        x = x.transpose(2, 1)
        if D == 6:
            x, feature = x.split(3, dim=2)
        elif D == 9:
            x, feature = x.split([3, 6], dim=2)
        if self.tnet:
            x = torch.bmm(x, trans)
        if D > 3:
            x = torch.cat([x, feature], dim=2)
        x = x.transpose(2, 1)
        if self.use_bn:
            x = F.hardtanh(self.bn1(self.conv1(x)))
        else:
            x = F.hardtanh(self.conv1(x))

        if self.tnet and self.feature_transform:
            trans_feat = self.fstn(x)
            x = x.transpose(2, 1)
            x = torch.bmm(x, trans_feat)
            x = x.transpose(2, 1)
        else:
            trans_feat = None

        pointfeat = x
        if self.use_bn:
            x = F.hardtanh(self.bn2(self.conv2(x)))
            x = self.bn3(self.conv3(x))
        else:
            x = F.hardtanh(self.conv2(x))
            x = self.conv3(x)

        if self.pool == 'max':
            x = torch.max(x, 2, keepdim=True)[0]
        elif self.pool == 'mean':
            x = torch.mean(x, 2, keepdim=True)
        elif self.pool == 'ema-max':
            if self.use_bn:
                x = torch.max(x, 2, keepdim=True)[0] + offset_map[N]
            else:
                x = torch.max(x, 2, keepdim=True)[0] - 0.3
            x = x.view(-1, 1024)
        x = x.view(-1, 1024)
        if self.global_feat:
            return x, trans, trans_feat
        else:
            x = x.view(-1, 1024, 1).repeat(1, 1, N)
            return torch.cat([x, pointfeat], 1), trans, trans_feat
    def features(self, x):

        x = F.max_pool2d(F.hardtanh(self.conv1(x)), 2)
        x = F.max_pool2d(F.hardtanh(self.conv2(x)), 2)
        x = F.max_pool2d(F.hardtanh(self.conv3(x)), 2)
        x_tanh = self.conv4(x)
        x = F.hardtanh(x_tanh)

        x = x.view(-1, 256)
        x = self.dropout(x)
        return x
Ejemplo n.º 22
0
 def sample_z(self, batch_size, sample=True):
     # Sample the hard-concrete gates for training and use a deterministic value for testing
     # training
     if sample:
         eps = self.get_eps(self.floatTensor(batch_size, self.feature))
         z = self.quantile_concrete(eps)
         return F.hardtanh(z, min_val=0, max_val=1)
     # testing
     else:
         pi = torch.sigmoid(self.qz_loga).view(1, self.feature).expand(batch_size, self.feature)
         return F.hardtanh(pi * (limit_b - limit_a) + limit_a, min_val=0, max_val=1)
    def sample_weight(self):
        if self.training:
            z = self.quantile_concrete(
                self.get_eps(self.floatTensor(self.loga.size())))
            mask = F.hardtanh(z, min_val=0, max_val=1)
        else:
            pi = torch.sigmoid(self.loga)
            mask = F.hardtanh(pi * (LIMIT_B - LIMIT_A) + LIMIT_A,
                              min_val=0,
                              max_val=1)

        return mask * self.weight
Ejemplo n.º 24
0
    def forward(self, x):
        if self.act_func:
            x = x * (F.hardtanh(x + 3, 0, 6) / 6)

        if self.n_lv == 0:
            return x
        else:
            a = F.softplus(self.a)
            x = x - self.b
            x = F.hardtanh(x / a, 0, 1)
            x = RoundQuant.apply(x, self.n_lv)
            return x
Ejemplo n.º 25
0
    def forward(self, predict, target):
        num = 0
        for i in range(1, len(target)):
            for j in range(i):
                #num += torch.sign(predict[i][0] - predict[j][0]) * torch.sign(target[i] - target[j])
                num += F.hardtanh(predict[i] -
                                  predict[j]) * F.hardtanh(target[i] -
                                                           target[j])

        x = 1 - 2 * num / (len(target) * (len(target) - 1))
        #print(torch.autograd.grad(x, predict))
        return x
Ejemplo n.º 26
0
    def forward(self, x, blur_sigma=None, epsilon=None):
        """
        Parameters
        ----------
        x : torch.Tensor
            [n,H,W] pre-conv image probabilities
        blur_sigma : float | None
            amount of blur. 'None' means use value from __init__ call

        Returns
        -------
        x : torch.Tensor
            [n,H,W] post-conv image probabilities
        """
        if self.is_cuda:
            x = x.cuda()

        if blur_sigma is None:
            H_blur = self.H_blur
            blur_sigma = self.blur_sigma
        else:
            blur_sigma = check_float_tesnor(blur_sigma, self.device)
            H_blur = blur_filter(self.blur_fsize,
                                 blur_sigma,
                                 device=self.device)

        if epsilon is None:
            epsilon = self.epsilon
        else:
            epsilon = check_float_tesnor(epsilon, self.device)

        # unsqueeze
        x = x.unsqueeze(1)
        # apply broaden
        for i in range(self.nbroad):
            x = F.conv2d(x, self.H_broaden, padding=1)
        x = F.hardtanh(x, 0., 1.)
        # return if no blur
        if blur_sigma == 0:
            x = x.squeeze(1)
            return x
        # apply blur
        for i in range(2):
            x = F.conv2d(x, H_blur, padding=self.blur_pad)
        x = F.hardtanh(x, 0., 1.)
        # apply pixel noise
        if epsilon > 0:
            x = (1 - epsilon) * x + epsilon * (1 - x)
        # squeeze
        x = x.squeeze(1)

        return x
Ejemplo n.º 27
0
Archivo: linear.py Proyecto: pnnl/slim
 def effective_W(self):
     if self.training:
         z = self.quantile_concrete(
             self.get_eps(
                 self.floatTensor(self.in_features, self.out_features)))
         mask = F.hardtanh(z, min_val=0, max_val=1)
     else:
         pi = F.sigmoid(self.qz_loga)
         mask = F.hardtanh(pi * (self.limit_b - self.limit_a) +
                           self.limit_a,
                           min_val=0,
                           max_val=1)
     return mask * self.weight
Ejemplo n.º 28
0
    def forward(self, input):
        #print('input shape is: ' + str(input.shape)) (10, 40960)
        # generate a random matrix
        #print(self.loss_prob)
        r = torch.rand((input.shape[0], self.pieces)) > self.loss_prob
        #print(r)
        # then extend it to the block random
        # u = np.concatenate((np.repeat(r.numpy(),self.block_size_y,axis = 1),np.ones((self.pieces,1))),axis = 1)
        u = torch.tensor(np.repeat(r.numpy(), self.block_size_x,
                                   axis=1)).float().cuda()
        input = input * u
        (input0, input1, input2, input3, input4) = torch.chunk(input, 5, 1)
        #print('shape of input0 is ' + str(input0.shape))
        s0 = F.linear(input0, self.weight0, self.bias)
        s1 = F.linear(input1, self.weight1, self.bias)
        s2 = F.linear(input2, self.weight2, self.bias)
        s3 = F.linear(input3, self.weight3, self.bias)
        s4 = F.linear(input4, self.weight4, self.bias)

        s0 = F.hardtanh(s0, self.lower_bound, self.upper_bound)
        s1 = F.hardtanh(s1, self.lower_bound, self.upper_bound)
        s2 = F.hardtanh(s2, self.lower_bound, self.upper_bound)
        s3 = F.hardtanh(s3, self.lower_bound, self.upper_bound)
        s4 = F.hardtanh(s4, self.lower_bound, self.upper_bound)
        s0 = torch.round((s0 - self.lower_bound) /
                         self.delta) * self.delta + self.lower_bound
        s1 = torch.round((s1 - self.lower_bound) /
                         self.delta) * self.delta + self.lower_bound
        s2 = torch.round((s2 - self.lower_bound) /
                         self.delta) * self.delta + self.lower_bound
        s3 = torch.round((s3 - self.lower_bound) /
                         self.delta) * self.delta + self.lower_bound
        s4 = torch.round((s4 - self.lower_bound) /
                         self.delta) * self.delta + self.lower_bound
        '''
        print(torch.max(s0))
        print(torch.min(s0))
        print(torch.max(s1))
        print(torch.min(s1))
        print(torch.max(s2))
        print(torch.min(s2))
        print(torch.max(s3))
        print(torch.min(s3))
        print(torch.max(s4))
        print(torch.min(s4))
        '''
        #print('mask shape is ' + str(self.weight.shape))
        #print(self.weight.shape)
        #print(self.block_size_y)
        #print(self.block_size_x)
        return s0 + s1 + s2 + s3 + s4
Ejemplo n.º 29
0
    def forward(self, x, meta_net):

        # out = binary_modules.BinActive().apply(x)
        out = self.conv1(x)
        out = self.bn1(out)
        out = F.hardtanh(out)

        # out = binary_modules.BinActive().apply(out)
        out = self.conv2(out)
        out = self.bn2(out)

        out += self.shortcut(x)
        out = F.hardtanh(out)
        return out
Ejemplo n.º 30
0
    def forward(self, x):
        if self.act_func:
            x = x * (F.hardtanh(x + 3, 0, 6) / 6)

        if len(self.bits) == 1 and self.bits[0] == 32:
            return x
        else:
            a = F.softplus(self.a)
            c = F.softplus(self.c)
            x = x + self.b
            x = F.hardtanh(x / a, 0, 1)
            x = RoundQuant.apply(x, self.n_lvs) * c
            x = x + self.d
            return x
Ejemplo n.º 31
0
    def forward(self, input, lengths=None):
        "See :obj:`onmt.modules.EncoderBase.forward()`"
        # (batch_size, 1, nfft, t)
        # layer 1
        input = self.batch_norm1(self.layer1(input[:, :, :, :]))

        # (batch_size, 32, nfft/2, t/2)
        input = F.hardtanh(input, 0, 20, inplace=True)

        # (batch_size, 32, nfft/2/2, t/2)
        # layer 2
        input = self.batch_norm2(self.layer2(input))

        # (batch_size, 32, nfft/2/2, t/2)
        input = F.hardtanh(input, 0, 20, inplace=True)

        batch_size = input.size(0)
        length = input.size(3)
        input = input.view(batch_size, -1, length)
        input = input.transpose(0, 2).transpose(1, 2)

        output, hidden = self.rnn(input)

        return hidden, output