def _get_random_data(n, **tkwargs):
    train_x1 = torch.linspace(0, 0.95, n + 1, **tkwargs) + 0.05 * torch.rand(
        n + 1, **tkwargs
    )
    train_x2 = torch.linspace(0, 0.95, n, **tkwargs) + 0.05 * torch.rand(n, **tkwargs)
    train_y1 = torch.sin(train_x1 * (2 * math.pi)) + 0.2 * torch.randn_like(train_x1)
    train_y2 = torch.cos(train_x2 * (2 * math.pi)) + 0.2 * torch.randn_like(train_x2)
    return train_x1.unsqueeze(-1), train_x2.unsqueeze(-1), train_y1, train_y2
Esempio n. 2
0
			def forward(self, variableInput, variableFlow):
				if hasattr(self, 'tensorGrid') == False or self.tensorGrid.size(0) != variableInput.size(0) or self.tensorGrid.size(2) != variableInput.size(2) or self.tensorGrid.size(3) != variableInput.size(3):
					torchHorizontal = torch.linspace(-1.0, 1.0, variableInput.size(3)).view(1, 1, 1, variableInput.size(3)).expand(variableInput.size(0), 1, variableInput.size(2), variableInput.size(3))
					torchVertical = torch.linspace(-1.0, 1.0, variableInput.size(2)).view(1, 1, variableInput.size(2), 1).expand(variableInput.size(0), 1, variableInput.size(2), variableInput.size(3))

					self.tensorGrid = torch.cat([ torchHorizontal, torchVertical ], 1).cuda()
				# end

				variableFlow = torch.cat([ variableFlow[:, 0:1, :, :] / ((variableInput.size(3) - 1.0) / 2.0), variableFlow[:, 1:2, :, :] / ((variableInput.size(2) - 1.0) / 2.0) ], 1)

				variableGrid = torch.autograd.Variable(data=self.tensorGrid, volatile=not self.training) + variableFlow

				return torch.nn.functional.grid_sample(input=variableInput, grid=variableGrid.clamp(-1.0, 1.0).permute(0, 2, 3, 1), mode='bilinear')
Esempio n. 3
0
 def _setUp(self, double=False, cuda=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     dtype = torch.double if double else torch.float
     train_x = torch.linspace(0, 1, 10, device=device, dtype=dtype).unsqueeze(-1)
     train_y = torch.sin(train_x * (2 * math.pi)).squeeze(-1)
     train_yvar = torch.tensor(0.1 ** 2, device=device)
     noise = torch.tensor(NOISE, device=device, dtype=dtype)
     self.train_x = train_x
     self.train_y = train_y + noise
     self.train_yvar = train_yvar
     self.bounds = torch.tensor([[0.0], [1.0]], device=device, dtype=dtype)
     model_st = SingleTaskGP(self.train_x, self.train_y)
     self.model_st = model_st.to(device=device, dtype=dtype)
     self.mll_st = ExactMarginalLogLikelihood(
         self.model_st.likelihood, self.model_st
     )
     self.mll_st = fit_gpytorch_model(self.mll_st, options={"maxiter": 5})
     model_fn = FixedNoiseGP(
         self.train_x, self.train_y, self.train_yvar.expand_as(self.train_y)
     )
     self.model_fn = model_fn.to(device=device, dtype=dtype)
     self.mll_fn = ExactMarginalLogLikelihood(
         self.model_fn.likelihood, self.model_fn
     )
     self.mll_fn = fit_gpytorch_model(self.mll_fn, options={"maxiter": 5})
Esempio n. 4
0
    def __call__(self, spec_f):

        spec_f, is_variable = _check_is_variable(spec_f)
        n_fft = spec_f.size(2)

        m_min = 0. if self.f_min == 0 else 2595 * np.log10(1. + (self.f_min / 700))
        m_max = 2595 * np.log10(1. + (self.f_max / 700))

        m_pts = torch.linspace(m_min, m_max, self.n_mels + 2)
        f_pts = (700 * (10**(m_pts / 2595) - 1))

        bins = torch.floor(((n_fft - 1) * 2) * f_pts / self.sr).long()

        fb = torch.zeros(n_fft, self.n_mels)
        for m in range(1, self.n_mels + 1):
            f_m_minus = bins[m - 1].item()
            f_m = bins[m].item()
            f_m_plus = bins[m + 1].item()

            if f_m_minus != f_m:
                fb[f_m_minus:f_m, m - 1] = (torch.arange(f_m_minus, f_m) - f_m_minus) / (f_m - f_m_minus)
            if f_m != f_m_plus:
                fb[f_m:f_m_plus, m - 1] = (f_m_plus - torch.arange(f_m, f_m_plus)) / (f_m_plus - f_m)

        fb = Variable(fb)
        spec_m = torch.matmul(spec_f, fb)  # (c, l, n_fft) dot (n_fft, n_mels) -> (c, l, n_mels)
        return spec_m if is_variable else spec_m.data
def main():
    x = torch.unsqueeze(torch.linspace(-1, 1, 100), dim=1)
    y = x.pow(2) + 0.2 * torch.rand(x.size())

    x = Variable(x)
    y = Variable(y)

    net = RegreNN(1,1)
    optm = torch.optim.SGD(net.parameters(),lr=0.5e-1)
    loss_func = torch.nn.MSELoss()

    plt.ion()
    for i in range(600):
        v = net(x)
        loss = loss_func(v,y)
        optm.zero_grad()
        loss.backward()
        optm.step()

        if i % 100 == 0:
            print(loss)
            plt.cla()
            plt.scatter(x.data.numpy(), y.data.numpy())
            plt.plot(x.data.numpy(), v.data.numpy(), 'r-', lw=5)
            plt.text(0.5, 0, 'Loss=%.4f' % loss.data[0], fontdict={'size': 20, 'color':  'red'})
            plt.pause(0.1)

    plt.ioff()
    plt.show()
 def __init__(self, n_bins=15):
     """
     n_bins (int): number of confidence interval bins
     """
     super(_ECELoss, self).__init__()
     bin_boundaries = torch.linspace(0, 1, n_bins + 1)
     self.bin_lowers = bin_boundaries[:-1]
     self.bin_uppers = bin_boundaries[1:]
Esempio n. 7
0
 def forward(ctx, theta, size):
     assert type(size) == torch.Size
     N, C, H, W = size
     ctx.size = size
     if theta.is_cuda:
         AffineGridGenerator._enforce_cudnn(theta)
         assert False
     ctx.is_cuda = False
     base_grid = theta.new(N, H, W, 3)
     linear_points = torch.linspace(-1, 1, W) if W > 1 else torch.Tensor([-1])
     base_grid[:, :, :, 0] = torch.ger(torch.ones(H), linear_points).expand_as(base_grid[:, :, :, 0])
     linear_points = torch.linspace(-1, 1, H) if H > 1 else torch.Tensor([-1])
     base_grid[:, :, :, 1] = torch.ger(linear_points, torch.ones(W)).expand_as(base_grid[:, :, :, 1])
     base_grid[:, :, :, 2] = 1
     ctx.base_grid = base_grid
     grid = torch.bmm(base_grid.view(N, H * W, 3), theta.transpose(1, 2))
     grid = grid.view(N, H, W, 2)
     return grid
Esempio n. 8
0
    def __init__(self, policy, cmdl):
        """Assumes policy returns an autograd.Variable"""
        self.name = "CP"
        self.cmdl = cmdl
        self.policy = policy

        self.dtype = dtype = TorchTypes(cmdl.cuda)
        self.support = torch.linspace(cmdl.v_min, cmdl.v_max, cmdl.atoms_no)
        self.support = self.support.type(dtype.FloatTensor)
Esempio n. 9
0
 def _getModel(self, double=False, cuda=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     dtype = torch.double if double else torch.float
     train_x = torch.linspace(0, 1, 10, device=device, dtype=dtype).unsqueeze(-1)
     noise = torch.tensor(NOISE, device=device, dtype=dtype)
     train_y = torch.sin(train_x.view(-1) * (2 * math.pi)) + noise
     model = SingleTaskGP(train_x, train_y)
     mll = ExactMarginalLogLikelihood(model.likelihood, model)
     return mll.to(device=device, dtype=dtype)
Esempio n. 10
0
    def act(self, state, epsilon, exp_model, evaluation=False):
        # self.T += 1
        self.dqn.eval()
        orig_state = state[:, :, -1:]
        state = torch.from_numpy(state).float().transpose_(0, 2).unsqueeze(0)
        q_values_distributions = self.dqn(Variable(state, volatile=True)).cpu().data[0]
        # TODO: Log Q-Value distributions
        # print(q_values_distributions)
        values = torch.linspace(self.args.v_min, self.args.v_max, steps=self.args.atoms)
        values = values.view(1, self.args.atoms)
        values = values.expand(self.args.actions, self.args.atoms)

        # print(values, q_values_distributions, torch.sum(q_values_distributions * values, dim=1))
        q_value_expectations = torch.sum(q_values_distributions * values, dim=1)
        q_values_numpy = q_value_expectations.numpy()

        extra_info = {}

        if self.args.optimistic_init and not evaluation:
            raise NotImplementedError
            q_values_pre_bonus = np.copy(q_values_numpy)
            if not self.args.ucb:
                for a in range(self.args.actions):
                    _, info = exp_model.bonus(orig_state, a, dont_remember=True)
                    action_pseudo_count = info["Pseudo_Count"]
                    # TODO: Log the optimism bonuses
                    optimism_bonus = self.args.optimistic_scaler / np.sqrt(action_pseudo_count + 0.01)
                    self.log("Bandit/Action_{}".format(a), optimism_bonus, step=self.T)
                    q_values[a] += optimism_bonus
            else:
                action_counts = []
                for a in range(self.args.actions):
                    _, info = exp_model.bonus(orig_state, a, dont_remember=True)
                    action_pseudo_count = info["Pseudo_Count"]
                    action_counts.append(action_pseudo_count)
                total_count = sum(action_counts)
                for ai, a in enumerate(action_counts):
                    # TODO: Log the optimism bonuses
                    optimisim_bonus = self.args.optimistic_scaler * np.sqrt(2 * np.log(max(1, total_count)) / (a + 0.01))
                    self.log("Bandit/UCB/Action_{}".format(ai), optimisim_bonus, step=self.T)
                    q_values[ai] += optimisim_bonus

            extra_info["Action_Bonus"] = q_values_numpy - q_values_pre_bonus

        extra_info["Q_Values"] = q_values_numpy

        if np.random.random() < epsilon:
            action = np.random.randint(low=0, high=self.args.actions)
        else:
            action = int(np.argmax(q_values_numpy))
            # action = q_values.max(0)[1][0]  # Torch...

        extra_info["Action"] = action

        return action, extra_info
Esempio n. 11
0
    def gaussian_robustness(self, loader, device, sigmas_range=(0, 0.5, 30)):
        '''Compute the robustness in the presence of Gaussian noise.

        We first define the accuracy at a give input noise level sigma
        as a function rho(sigma). Then, we define the robustness to be the
        area under the curve of rho when sigma is in the given range.
        Note: rho(sigma) is independent of the ground truth labels
            and the noise is generated using `utils.rand.ring()`.

        Args:
            loader: A torch.utils.data.DataLoader without shuffling.
            device: The device to do the forward passes on.
            sigmas_range: (min_sigma, max_sigma, num_sigmas)
                The noise levels are `linspace(*sigmas_range)`.
                The generated noise will be multiplied by `self.input_range()`.

        Returns:
            (robustness: The robustness of the classifier under Gaussian noise,
             (sigmas: The noise levels, accuracies: the accuracies))
        '''
        self.to(device).eval()

        # compute the output predictions for the clean images
        count = 0
        clean_labels = []
        with torch.no_grad():
            for images, _ in loader:
                labels = self.forward(images.to(device)).argmax(dim=-1)
                clean_labels.append(labels)
                count += images.size(0)

        # get the noise levels (sigmas)
        input_range = self.input_range()
        sigmas = torch.linspace(*sigmas_range, device=device)
        accuracies = torch.zeros(sigmas.numel(), device=device)
        kwargs = {
            'device': device,
            'dtype': images.dtype,
            'size': images[0, ...].size(),
            'tolerance': float((sigmas[1] - sigmas[0]) / 2),
        }

        # seeding the device to get deterministic output
        with RNG(seed=0, devices=[device]):
            for i, sigma in enumerate(verbosify(sigmas)):
                sigma = float(sigma * input_range)
                for (images, _), labels in zip(loader, clean_labels):
                    noise = ring(batch=images.size(0), sigma=sigma, **kwargs)
                    out = self.forward(noise.add_(images.to(device)))
                    accuracies[i] += int(out.argmax(dim=-1).eq(labels).sum())

        accuracies /= count
        robustness = trapz(accuracies, x=sigmas) / (sigmas[-1] - sigmas[0])
        return robustness, (sigmas, accuracies)
Esempio n. 12
0
def _get_random_data(batch_shape, num_outputs, n=10, **tkwargs):
    train_x = torch.linspace(0, 0.95, n, **tkwargs).unsqueeze(-1) + 0.05 * torch.rand(
        n, 1, **tkwargs
    ).repeat(batch_shape + torch.Size([1, 1]))
    train_y = torch.sin(train_x * (2 * math.pi)) + 0.2 * torch.randn(
        n, num_outputs, **tkwargs
    ).repeat(batch_shape + torch.Size([1, 1]))

    if num_outputs == 1:
        train_y = train_y.squeeze(-1)
    return train_x, train_y
Esempio n. 13
0
def _get_random_mt_data(**tkwargs):
    train_x = torch.linspace(0, 0.95, 10, **tkwargs) + 0.05 * torch.rand(10, **tkwargs)
    train_y1 = torch.sin(train_x * (2 * math.pi)) + torch.randn_like(train_x) * 0.2
    train_y2 = torch.cos(train_x * (2 * math.pi)) + torch.randn_like(train_x) * 0.2
    train_i_task1 = torch.full_like(train_x, dtype=torch.long, fill_value=0)
    train_i_task2 = torch.full_like(train_x, dtype=torch.long, fill_value=1)
    full_train_x = torch.cat([train_x, train_x])
    full_train_i = torch.cat([train_i_task1, train_i_task2])
    full_train_y = torch.cat([train_y1, train_y2])
    train_X = torch.stack([full_train_x, full_train_i.type_as(full_train_x)], dim=-1)
    train_Y = full_train_y
    return train_X, train_Y
def main():
    LR = 1e-2

    BATCH_SIZE=4

    x = torch.unsqueeze(torch.linspace(-1, 1, 100), dim=1)
    y = x.pow(2) + 0.2 * torch.rand(x.size())


    torch_dataset = Data.TensorDataset(data_tensor=x, target_tensor=y)
    loader = Data.DataLoader(dataset=torch_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=2, )

    net_SGD = Net()
    net_Momentum = Net()
    net_RMSprop = Net()
    net_Adam = Net()
    nets = [net_SGD, net_Momentum, net_RMSprop, net_Adam]

    loss_func = torch.nn.MSELoss()

    optimizer_list = [
        torch.optim.SGD(net_SGD.parameters(),lr=LR),
        torch.optim.SGD(net_Momentum.parameters(),lr=LR, momentum=0.8),
        torch.optim.RMSprop(net_RMSprop.parameters(),lr=LR, alpha=0.9),
        torch.optim.Adam(net_Adam.parameters(),lr=LR, betas=(0.9, 0.99)),
    ]

    losses_his = [[], [], [], []]  # record loss

    for i in range(BATCH_SIZE):
        for step, (batch_x, batch_y) in enumerate(loader):
            b_x = Variable(x)
            b_y = Variable(y)
            for net, opt, l_his in zip(nets, optimizer_list, losses_his):
                output = net(b_x)  # get output for every net
                loss = loss_func(output, b_y)  # compute loss for every net
                opt.zero_grad()  # clear gradients for next train
                loss.backward()  # backpropagation, compute gradients
                opt.step()  # apply gradients
                l_his.append(loss.data[0])  # loss recoder

    labels = ['SGD', 'Momentum', 'RMSprop', 'Adam']
    for i, l_his in enumerate(losses_his):
        plt.plot(l_his, label=labels[i])
    plt.legend(loc='best')
    plt.xlabel('Steps')
    plt.ylabel('Loss')
    plt.ylim((0, 0.2))
    plt.show()
    def _get_categorical(self, next_states, rewards, mask):
        batch_sz = next_states.size(0)
        gamma = self.gamma
        rewards = rewards.data

        # Compute probabilities p(x, a)
        probs = self.target_policy(next_states).data
        argmax_a = torch.mul(
            probs, self.support.expand_as(probs)).sum(2).max(1)[1].squeeze(1)
        action_mask = argmax_a.unsqueeze(2).expand(batch_sz, 1, self.atoms_no)
        qa_probs = probs.gather(1, action_mask).squeeze()

        # Mask gamma and reshape it torgether with rewards to fit p(x,a).
        rewards = rewards.unsqueeze(1).expand_as(qa_probs)
        gamma = (mask.float() * gamma).unsqueeze(1).expand_as(qa_probs)

        # Compute projection of the application of the Bellman operator.
        bellman_op = rewards + gamma * self.support.unsqueeze(0).expand_as(rewards)
        bellman_op = torch.clamp(bellman_op, self.v_min, self.v_max)

        # Compute categorical indices for distributing the probability
        m = torch.zeros(batch_sz, self.atoms_no).type(self.dtype.FloatTensor)
        b = (bellman_op - self.v_min) / self.delta_z
        l = b.floor().type(self.dtype.LongTensor)
        u = b.ceil().type(self.dtype.LongTensor)

        # Distribute probability
        """
        for i in range(batch_sz):
            for j in range(self.atoms_no):
                uidx = u[i][j]
                lidx = l[i][j]
                m[i][lidx] = m[i][lidx] + qa_probs[i][j] * (uidx - b[i][j])
                m[i][uidx] = m[i][uidx] + qa_probs[i][j] * (b[i][j] - lidx)
        for i in range(batch_sz):
            m[i].index_add_(0, l[i], qa_probs[i] * (u[i].float() - b[i]))
            m[i].index_add_(0, u[i], qa_probs[i] * (b[i] - l[i].float()))

        """
        # Optimized by https://github.com/tudor-berariu
        offset = torch.linspace(0, ((batch_sz - 1) * self.atoms_no), batch_sz)\
            .type(self.dtype.LongTensor)\
            .unsqueeze(1).expand(batch_sz, self.atoms_no)

        m.view(-1).index_add_(0, (l + offset).view(-1),
                              (qa_probs * (u.float() - b)).view(-1))
        m.view(-1).index_add_(0, (u + offset).view(-1),
                              (qa_probs * (b - l.float())).view(-1))
        return Variable(m.type(self.dtype.FloatTensor))
def get_precision_recall(args, score, label, num_samples, beta=1.0, sampling='log', predicted_score=None):
    '''
    :param args:
    :param score: anomaly scores
    :param label: anomaly labels
    :param num_samples: the number of threshold samples
    :param beta:
    :param scale:
    :return:
    '''
    if predicted_score is not None:
        score = score - torch.FloatTensor(predicted_score).squeeze().to(args.device)

    maximum = score.max()
    if sampling=='log':
        # Sample thresholds logarithmically
        # The sampled thresholds are logarithmically spaced between: math:`10 ^ {start}` and: math:`10 ^ {end}`.
        th = torch.logspace(0, torch.log10(torch.tensor(maximum)), num_samples).to(args.device)
    else:
        # Sample thresholds equally
        # The sampled thresholds are equally spaced points between: attr:`start` and: attr:`end`
        th = torch.linspace(0, maximum, num_samples).to(args.device)

    precision = []
    recall = []

    for i in range(len(th)):
        anomaly = (score > th[i]).float()
        idx = anomaly * 2 + label
        tn = (idx == 0.0).sum().item()  # tn
        fn = (idx == 1.0).sum().item()  # fn
        fp = (idx == 2.0).sum().item()  # fp
        tp = (idx == 3.0).sum().item()  # tp

        p = tp / (tp + fp + 1e-7)
        r = tp / (tp + fn + 1e-7)

        if p != 0 and r != 0:
            precision.append(p)
            recall.append(r)

    precision = torch.FloatTensor(precision)
    recall = torch.FloatTensor(recall)


    f1 = (1 + beta ** 2) * (precision * recall).div(beta ** 2 * precision + recall + 1e-7)

    return precision, recall, f1
Esempio n. 17
0
    def loss_categorical(self, transitions):
        num_atoms = self.config.num_atoms
        batch_size = len(transitions)
        batch = Transition(*zip(*transitions))

        non_final_mask = torch.tensor(tuple(map(lambda s: s is not None, batch.next_state)), dtype=torch.uint8, device=self.config.device)

        state_batch = torch.cat(batch.state).to(torch.float32)
        action_batch = torch.cat(batch.action)
        reward_batch = torch.cat(batch.reward).to(torch.float32)
        
        next_states = [s for s in batch.next_state if s is not None]

        with torch.no_grad():
            non_final_next_state = torch.cat(next_states).to(torch.float32)

            best_actions = self._get_Q(self.model, non_final_next_state).argmax(dim=1)

            self.target_model.reset_noise()
            p_next = self.target_model(non_final_next_state, ApplySoftmax.NORMAL)

            p_next_best = torch.zeros(0).to(self.config.device, dtype=torch.float32).new_full((batch_size, num_atoms), 1.0 / num_atoms)
            p_next_best[non_final_mask] = p_next[range(len(non_final_next_state)), best_actions]

            gamma = torch.zeros(batch_size, num_atoms).to(self.config.device)
            gamma[non_final_mask] = self.config.gamma ** self.config.num_multi_step_reward

            Tz = (reward_batch.unsqueeze(1) + gamma * self.support.unsqueeze(0)).clamp(self.Vmin, self.Vmax)
            b = (Tz - self.Vmin) / self.delta_z
            l = b.floor().long()
            u = b.ceil().long()

            l[(l == u) * (0 < l)] -= 1
            # the values in l has already changed, so same index is not processed for u
            u[(l == u) * (u < num_atoms - 1)] += 1

            m = torch.zeros(batch_size, num_atoms).to(self.config.device, dtype=torch.float32)
            offset = torch.linspace(0, ((batch_size-1) * num_atoms), batch_size).unsqueeze(1).expand(batch_size, num_atoms).to(l)
            m.view(-1).index_add_(0, (l + offset).view(-1), (p_next_best * (u.float() - b)).view(-1))
            m.view(-1).index_add_(0, (u + offset).view(-1), (p_next_best * (b - l.float())).view(-1))

        self.model.reset_noise()
        log_p = self.model(state_batch, ApplySoftmax.LOG)
        log_p_a = log_p[range(batch_size), action_batch.squeeze()]

        return -torch.sum(m * log_p_a, dim=1)
Esempio n. 18
0
def plot_cont_traversal(model: infogan.InfoGAN, c, nrow, nstep=9):
    values = torch.linspace(-2, 2, nstep).to(model.device)
    latent = model.sample_latent(nrow).repeat(nstep, 1)
    for r in range(nrow):
        latent[r::nrow, model.cont_idx[c]] = values
    samples = model.gen(latent).detach()
    fig, axs = plot_grid(samples, nrow=nrow, figsize=(nstep, nrow),
                         gridspec_kw=dict(wspace=0, hspace=0))
    # plt.suptitle(f"$c_{{{c + 2}}}$: Continuous (-2 to 2)")

    for i in [0, -1]:
        _prep_ax(axs[i, 0])
    axs[0,  0].set_xlabel(f'${values[ 0]:+g}$', ha='center', va='bottom', size=_TICK_LABEL_SIZE)
    axs[-1, 0].set_xlabel(f'${values[-1]:+g}$', ha='center', va='bottom', size=_TICK_LABEL_SIZE)

    ypos = axs[0, 0].get_position().y1

    fig.text(.5, ypos, f'$c_{{{c + 2}}}$', ha='center', va='bottom', size=_VAR_LABEL_SIZE)
    def __init__(self, policy, target_policy, cmdl):
        self.name = "Categorical-PI"
        self.policy = policy
        self.target_policy = target_policy
        self.lr = cmdl.lr
        self.gamma = cmdl.gamma
        self.cmdl = cmdl

        self.optimizer = optim.Adam(self.policy.parameters(), lr=self.lr)
        self.optimizer.zero_grad()
        self.grads_decoupled = False

        self.dtype = dtype = TorchTypes(cmdl.cuda)
        self.v_min, self.v_max = v_min, v_max = cmdl.v_min, cmdl.v_max
        self.atoms_no = atoms_no = cmdl.atoms_no
        self.support = torch.linspace(v_min, v_max, atoms_no)

        self.support = self.support.type(dtype.FloatTensor)
        self.delta_z = (cmdl.v_max - cmdl.v_min) / (cmdl.atoms_no - 1)
Esempio n. 20
0
 def _setUp(self, double=False, cuda=False, expand=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     dtype = torch.double if double else torch.float
     train_x = torch.linspace(0, 1, 10, device=device, dtype=dtype).unsqueeze(-1)
     train_y = torch.sin(train_x * (2 * math.pi)).squeeze(-1)
     noise = torch.tensor(NOISE, device=device, dtype=dtype)
     self.train_x = train_x
     self.train_y = train_y + noise
     if expand:
         self.train_x = self.train_x.expand(-1, 2)
         ics = torch.tensor([[0.5, 1.0]], device=device, dtype=dtype)
     else:
         ics = torch.tensor([[0.5]], device=device, dtype=dtype)
     self.initial_conditions = ics
     self.f_best = self.train_y.max().item()
     model = SingleTaskGP(self.train_x, self.train_y)
     self.model = model.to(device=device, dtype=dtype)
     self.mll = ExactMarginalLogLikelihood(self.model.likelihood, self.model)
     self.mll = fit_gpytorch_model(self.mll, options={"maxiter": 1})
Esempio n. 21
0
    def __init__(self, scale, taps, samplerate):
        super(SincLayer, self).__init__()
        self.samplerate = int(samplerate)
        self.taps = taps
        self.scale = scale

        # each filter requires two parameters to define the filter bandwidth
        filter_parameters = torch.FloatTensor(len(scale), 2)

        self.linear = nn.Parameter(
            torch.linspace(-math.pi, math.pi, steps=taps), requires_grad=False)
        self.window = nn.Parameter(
            torch.hamming_window(self.taps), requires_grad=False)

        for i, band in enumerate(scale):
            start = self.samplerate / band.start_hz
            stop = self.samplerate / band.stop_hz
            filter_parameters[i, 0] = start
            filter_parameters[i, 1] = stop

        self.filter_parameters = nn.Parameter(filter_parameters)
Esempio n. 22
0
def plot_cont_cont_traversal(model: infogan.InfoGAN, c1, c2, nstep=9):
    values = torch.linspace(-1.5, 1.5, nstep).to(model.device)
    latent = model.sample_latent(1).repeat(nstep ** 2, 1)
    for s in range(nstep):
        latent[s::nstep, model.cont_idx[c2]] = values
        latent[s * nstep:(s + 1) * nstep, model.cont_idx[c1]] = values
    samples = model.gen(latent).detach()
    fig, axs = plot_grid(samples, nrow=nstep, figsize=(nstep, nstep),
                         gridspec_kw=dict(wspace=0, hspace=0))
    # plt.suptitle(rf"$c_{{{c1 + 2}}} \times c_{{{c2 + 2}}}$: Continuous (-2 to 2)")

    for i in [(0, 0), (0, -1), (-1, 0)]:
        _prep_ax(axs[i])
    axs[ 0, 0].set_xlabel(f'${values[ 0]:+g}$', ha='center', va='bottom', size=_TICK_LABEL_SIZE)
    axs[-1, 0].set_xlabel(f'${values[-1]:+g}$', ha='center', va='bottom', size=_TICK_LABEL_SIZE)
    axs[ 0, 0].set_ylabel(f'${values[ 0]:+g}$', ha='right', va='center', rotation=0, size=_TICK_LABEL_SIZE)
    axs[ 0,-1].set_ylabel(f'${values[-1]:+g}$', ha='right', va='center', rotation=0, size=_TICK_LABEL_SIZE)

    xpos = axs[ 0, 0].get_position().x0
    ypos = axs[ 0, 0].get_position().y1

    fig.text(.5, ypos, f'$c_{{{c1 + 2}}}$', ha='center', va='bottom', size=_VAR_LABEL_SIZE)
    fig.text(xpos, .5, f'$c_{{{c2 + 2}}}$', ha='right', va='center', size=_VAR_LABEL_SIZE)
Esempio n. 23
0
 def _get_model(self, cuda=False, dtype=torch.float):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     state_dict = {
         "mean_module.constant": torch.tensor([-0.0066]),
         "covar_module.raw_outputscale": torch.tensor(1.0143),
         "covar_module.base_kernel.raw_lengthscale": torch.tensor([[-0.99]]),
         "covar_module.base_kernel.lengthscale_prior.concentration": torch.tensor(
             3.0
         ),
         "covar_module.base_kernel.lengthscale_prior.rate": torch.tensor(6.0),
         "covar_module.outputscale_prior.concentration": torch.tensor(2.0),
         "covar_module.outputscale_prior.rate": torch.tensor(0.1500),
     }
     train_x = torch.linspace(0, 1, 10, device=device, dtype=dtype)
     train_y = torch.sin(train_x * (2 * math.pi))
     noise = torch.tensor(NEI_NOISE, device=device, dtype=dtype)
     train_y += noise
     train_yvar = torch.full_like(train_y, 0.25 ** 2)
     train_x = train_x.view(-1, 1)
     model = FixedNoiseGP(train_X=train_x, train_Y=train_y, train_Yvar=train_yvar)
     model.load_state_dict(state_dict)
     model.to(train_x)
     model.eval()
     return model
Esempio n. 24
0
parser.add_argument('--version', type=str, choices=['standard','steer','normal'], default='steer')
args = parser.parse_args()
torch.manual_seed(6)
if args.adjoint:
    from torchdiffeq import odeint_adjoint as odeint
    from torchdiffeq import odeint_adjoint_stochastic_end_v3 as odeint_stochastic_end_v3
    from torchdiffeq import odeint_adjoint_stochastic_end_normal as odeint_stochastic_end_normal
else:
    from torchdiffeq import odeint_stochastic_end_v3
    from torchdiffeq import odeint

device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')

true_y0 = torch.tensor([0.])
#true_y0 = torch.tensor([1.])
t = torch.linspace(0., 15., args.data_size)
test_t = torch.linspace(0., 25., args.data_size)
true_A = torch.tensor([[-0.1, 2.0], [-2.0, -0.1]])


class Lambda(nn.Module):

    def forward(self, t, y):
        t = t.unsqueeze(0)
        equation = -1000*y + 3000 - 2000 * torch.exp(-t) + 1000 * torch.sin(t)
        #equation = -1000*y + 3000 - 2000 * torch.exp(-t)
        #equation = -100*y + 300 - 200 * torch.exp(-1*t)
        #equation = -1*y*args.stiffness_ratio + 3*args.stiffness_ratio - 2*args.stiffness_ratio * torch.exp(-1*t)# - 2*args.stiffness_ratio * torch.exp(-10000*t)

        #equation = 0.12*y #-2000*torch.exp(-t)#+ 3000 - 2000 * torch.exp(-t)
        #equation = 10 * torch.sin(t)
def get_region_boxes(output,
                     conf_thresh,
                     num_classes,
                     anchors,
                     num_anchors,
                     only_objectness=1,
                     validation=False):
    anchor_step = len(anchors) / num_anchors
    if output.dim() == 3:
        output = output.unsqueeze(0)
    batch = output.size(0)
    assert (output.size(1) == (5 + num_classes) * num_anchors)
    h = output.size(2)
    w = output.size(3)

    t0 = time.time()
    all_boxes = []
    output = output.view(batch * num_anchors, 5 + num_classes,
                         h * w).transpose(0, 1).contiguous().view(
                             5 + num_classes, batch * num_anchors * h * w)

    grid_x = torch.linspace(0, w - 1, w).repeat(h, 1).repeat(
        batch * num_anchors, 1, 1).view(batch * num_anchors * h * w).cuda()
    grid_y = torch.linspace(0, h - 1, h).repeat(w, 1).t().repeat(
        batch * num_anchors, 1, 1).view(batch * num_anchors * h * w).cuda()
    xs = torch.sigmoid(output[0]) + grid_x
    ys = torch.sigmoid(output[1]) + grid_y

    anchor_w = torch.Tensor(anchors).view(num_anchors,
                                          anchor_step).index_select(
                                              1, torch.LongTensor([0]))
    anchor_h = torch.Tensor(anchors).view(num_anchors,
                                          anchor_step).index_select(
                                              1, torch.LongTensor([1]))
    anchor_w = anchor_w.repeat(batch, 1).repeat(1, 1, h * w).view(
        batch * num_anchors * h * w).cuda()
    anchor_h = anchor_h.repeat(batch, 1).repeat(1, 1, h * w).view(
        batch * num_anchors * h * w).cuda()
    ws = torch.exp(output[2]) * anchor_w
    hs = torch.exp(output[3]) * anchor_h

    det_confs = torch.sigmoid(output[4])

    cls_confs = torch.nn.Softmax()(Variable(
        output[5:5 + num_classes].transpose(0, 1))).data
    cls_max_confs, cls_max_ids = torch.max(cls_confs, 1)
    cls_max_confs = cls_max_confs.view(-1)
    cls_max_ids = cls_max_ids.view(-1)
    t1 = time.time()

    sz_hw = h * w
    sz_hwa = sz_hw * num_anchors
    det_confs = convert2cpu(det_confs)
    cls_max_confs = convert2cpu(cls_max_confs)
    cls_max_ids = convert2cpu_long(cls_max_ids)
    xs = convert2cpu(xs)
    ys = convert2cpu(ys)
    ws = convert2cpu(ws)
    hs = convert2cpu(hs)
    if validation:
        cls_confs = convert2cpu(cls_confs.view(-1, num_classes))
    t2 = time.time()

    for b in range(batch):
        boxes = []
        for cy in range(h):
            for cx in range(w):
                for i in range(num_anchors):
                    ind = b * sz_hwa + i * sz_hw + cy * w + cx
                    det_conf = det_confs[ind]
                    if only_objectness:
                        conf = det_confs[ind]
                    else:
                        conf = det_confs[ind] * cls_max_confs[ind]

                    if conf > conf_thresh:
                        bcx = xs[ind]
                        bcy = ys[ind]
                        bw = ws[ind]
                        bh = hs[ind]
                        cls_max_conf = cls_max_confs[ind]
                        cls_max_id = cls_max_ids[ind]
                        box = [
                            bcx / w, bcy / h, bw / w, bh / h, det_conf,
                            cls_max_conf, cls_max_id
                        ]
                        if (not only_objectness) and validation:
                            for c in range(num_classes):
                                tmp_conf = cls_confs[ind][c]
                                if c != cls_max_id and det_confs[
                                        ind] * tmp_conf > conf_thresh:
                                    box.append(tmp_conf)
                                    box.append(c)
                        boxes.append(box)
        all_boxes.append(boxes)
    t3 = time.time()
    if False:
        print('---------------------------------')
        print('matrix computation : %f' % (t1 - t0))
        print('        gpu to cpu : %f' % (t2 - t1))
        print('      boxes filter : %f' % (t3 - t2))
        print('---------------------------------')
    return all_boxes
Esempio n. 26
0
    def forward(self,
                feed_dict_q,
                feed_dict_k=None,
                metadata=None,
                is_eval=False,
                cluster_result=None,
                index=None,
                is_viewpoint_eval=False,
                feed_dicts_N=None,
                forward_type=None):
        """
        Input:
            feed_dict_q: a batch of query images and bounding boxes
            feed_dict_k: a batch of key images and bounding boxes
            is_eval: return momentum embeddings (used for clustering)
            cluster_result: cluster assignments, centroids, and density
            index: indices for training samples
        Output:
            logits, targets, proto_logits, proto_targets
        """

        mode = self.mode
        hyp_N = feed_dict_q["objects"][0].item()

        rel_viewpoint = metadata["rel_viewpoint"]

        if mode == "node":
            rel_viewpoint = None

        if is_viewpoint_eval and mode == "spatial":
            with torch.no_grad():
                k = self.encoder_q(feed_dict_q)
                k = self.merge_pose_with_scene_embeddings(
                    k, rel_viewpoint)  #merge
                for batch_ind in range(len(k)):
                    k[batch_ind][1] = self.spatial_viewpoint_transformation(
                        k[batch_ind][1]
                    )  # Do viewpoint transformation on spatial embeddings
                k = stack_features_across_batch(k, mode)
                k = nn.functional.normalize(k, dim=1)
            return k

        if is_eval:
            with torch.no_grad():
                # the output from encoder is a list of features from the batch where each batch element (image)
                # might contain different number of objects
                k = self.encoder_q(feed_dict_q)

                # encoder output features in the list are stacked to form a tensor of features across the batch
                k = stack_features_across_batch(k, mode)

                # normalize feature across the batch
                k = nn.functional.normalize(k, dim=1)
            return k

        # k_o : spatial embeddings before viewpoint transformation
        # k_t : spatial embeds after viewpoint transformation

        # update the key encoder
        k_o = self.encoder_q(feed_dict_k)  # callculate the embeddings

        # Do viewpoint transformation on embeddings if the pose is fed as input
        if mode == "spatial" and rel_viewpoint is not None:
            k_t = self.merge_pose_with_scene_embeddings(
                k_o, rel_viewpoint)  #merge pose with spatial embeddings
            for batch_ind in range(len(k_t)):
                k_t[batch_ind][1] = self.spatial_viewpoint_transformation(
                    k_t[batch_ind]
                    [1])  # Do viewpoint transformation on spatial embeddings

        k_o = stack_features_across_batch(k_o, mode)
        k_o = nn.functional.normalize(k_o, dim=1)

        k_t = stack_features_across_batch(k_t, mode)
        k_t = nn.functional.normalize(k_t, dim=1)

        q = self.encoder_q(feed_dict_q)  # queries: NxC
        q = stack_features_across_batch(q, mode)
        q = nn.functional.normalize(q, dim=1)

        if forward_type == "scene":
            # compute logits
            # Einstein sum is more intuitive
            # positive logits: Nx1
            l_pos = torch.einsum('nc,nc->n', [q, k_t]).unsqueeze(-1)
            # negative logits: Nxr
            l_neg = torch.einsum('nc,ck->nk',
                                 [q, self.queue_scene.clone().detach()])
            # logits: Nx(1+r)
            logits = torch.cat([l_pos, l_neg], dim=1)
            # apply temperature
            logits /= self.T
            # labels: positive key indicators
            labels = torch.zeros(logits.shape[0], dtype=torch.long).cuda()

            # dequeue and enqueue
            #             self._dequeue_and_enqueue_scene(k_t.clone().detach())
            self._dequeue_and_enqueue_scene(k_o.clone().detach())

            return logits, labels, None, None

            index = convert_indices(index, hyp_N, mode)

            # prototypical contrast
            if cluster_result is not None:
                proto_labels = []
                proto_logits = []
                for n, (im2cluster, prototypes, density) in enumerate(
                        zip(cluster_result['im2cluster'],
                            cluster_result['centroids'],
                            cluster_result['density'])):
                    # get positive prototypes
                    pos_proto_id = im2cluster[index]
                    pos_prototypes = prototypes[pos_proto_id]

                    # sample negative prototypes
                    all_proto_id = [i for i in range(im2cluster.max())]

                    #print(len(pos_prototypes), len(all_proto_id))
                    neg_proto_id = set(all_proto_id) - set(
                        pos_proto_id.tolist())
                    neg_proto_id = sample(
                        neg_proto_id,
                        self.scene_r)  #sample r negative prototypes
                    neg_prototypes = prototypes[neg_proto_id]

                    proto_selected = torch.cat(
                        [pos_prototypes, neg_prototypes], dim=0)

                    # compute prototypical logits
                    logits_proto = torch.mm(q, proto_selected.t())

                    # targets for prototype assignment
                    labels_proto = torch.linspace(
                        0, q.size(0) - 1, steps=q.size(0)).long().cuda()

                    # scaling temperatures for the selected prototypes
                    temp_proto = density[torch.cat(
                        [pos_proto_id,
                         torch.LongTensor(neg_proto_id).cuda()],
                        dim=0)]
                    logits_proto /= temp_proto

                    proto_labels.append(labels_proto)
                    proto_logits.append(logits_proto)
                return logits, labels, proto_logits, proto_labels

        elif forward_type == "view":

            self.queue_view = torch.randn(self.dim, self.view_r).cuda()
            self.queue_view_ptr[0] = 0

            negative_view_index = [feed_n[1] for feed_n in feed_dicts_N]

            # getting encoding for scene_negatives
            for feed_dict_ in feed_dicts_N:
                #                 with torch.no_grad():
                k_n = self.encoder_q(feed_dict_[0])
                # encoder output features in the list are stacked to form a tensor of features across the batch
                k_n = stack_features_across_batch(k_n, mode)
                # normalize feature across the batch
                scene_negatives = nn.functional.normalize(k_n, dim=1)
                # append negagives to queue_view
                self._dequeue_and_enqueue_view(scene_negatives)

            self._dequeue_and_enqueue_view(k_o)

            # positive logits: Nx1
            l_pos = torch.einsum('nc,nc->n', [q, k_t]).unsqueeze(-1)
            # negative logits: Nxr
            l_neg = torch.einsum('nc,ck->nk', [q, self.queue_view])
            # logits: Nx(1+r)
            logits = torch.cat([l_pos, l_neg], dim=1)
            # apply temperature
            logits /= self.T
            # labels: positive key indicators
            labels = torch.zeros(logits.shape[0], dtype=torch.long).cuda()

            return logits, labels, None, None

        else:
            raise ValueError("Forward type of the mode must be defined")
Esempio n. 27
0
def loss(input, target, border=10):
    return torch.sum(
        torch.square(input[:, :, border:-border, border:-border] -
                     target[:, :, border:-border, border:-border]))


cap = cv2.VideoCapture("IMG_0004.mp4")
# Stores the last k frames of the video
k = 25
prev_k_frames = []

# As I understand, the grid sample method uses the x, y values of the outputs of the pixel. It may be easier for the model to not need to learn this
# so I am adding it in back at the end
shape = 223
single_vector_x = torch.linspace(-1, 1, steps=shape).reshape(1, -1)
single_vector_y = torch.linspace(-1, 1, steps=shape).reshape(-1, 1)
baseline = torch.zeros(1, shape, shape, 2)
baseline[:, :, :, 0] = single_vector_x
baseline[:, :, :, 1] = single_vector_y
baseline = baseline.to(torch.device("cuda:0"))
frame_count = 0
# Opens video and trains model
while (cap.isOpened()):
    ret, frame = cap.read()
    if frame is None:
        break
    frame = Image.fromarray(frame)
    #frame = frame.convert('1')
    frame = frame.resize((shape, shape))
Esempio n. 28
0
    def train(self):

        if self.T - self.target_sync_T > self.args.target:
            self.sync_target_network()
            self.target_sync_T = self.T

        info = {}

        for _ in range(self.args.iters):
            self.dqn.eval()

            batch, indices, is_weights = self.replay.Sample_N(self.args.batch_size, self.args.n_step, self.args.gamma)
            columns = list(zip(*batch))

            states = Variable(torch.from_numpy(np.array(columns[0])).float().transpose_(1, 3))
            actions = Variable(torch.LongTensor(columns[1]))
            terminal_states = Variable(torch.FloatTensor(columns[5]))
            rewards = Variable(torch.FloatTensor(columns[2]))
            # Have to clip rewards for DQN
            rewards = torch.clamp(rewards, -1, 1)
            steps = Variable(torch.FloatTensor(columns[4]))
            new_states = Variable(torch.from_numpy(np.array(columns[3])).float().transpose_(1, 3))

            target_dqn_qvals = self.target_dqn(new_states).cpu()
            # Make a new variable with those values so that these are treated as constants
            target_dqn_qvals_data = Variable(target_dqn_qvals.data)

            q_value_gammas = (Variable(torch.ones(terminal_states.size()[0])) - terminal_states)
            inter = Variable(torch.ones(terminal_states.size()[0]) * self.args.gamma)
            # print(steps)
            q_value_gammas = q_value_gammas * torch.pow(inter, steps)

            values = torch.linspace(self.args.v_min, self.args.v_max, steps=self.args.atoms)
            values = Variable(values)
            values = values.view(1, 1, self.args.atoms)
            values = values.expand(self.args.batch_size, self.args.actions, self.args.atoms)
            # print(values)

            q_value_gammas = q_value_gammas.view(self.args.batch_size, 1, 1)
            q_value_gammas = q_value_gammas.expand(self.args.batch_size, self.args.actions, self.args.atoms)
            # print(q_value_gammas)
            gamma_values = q_value_gammas * values
            # print(gamma_values)
            rewards = rewards.view(self.args.batch_size, 1, 1)
            rewards = rewards.expand(self.args.batch_size, self.args.actions, self.args.atoms)
            # print(rewards)
            operator_q_values = rewards + gamma_values
            # print(operator_q_values)

            clipped_operator_q_values = torch.clamp(operator_q_values, self.args.v_min, self.args.v_max)

            delta_z = (self.args.v_max - self.args.v_min) / (self.args.atoms - 1)
            # Using the notation from the categorical paper
            b_j = (clipped_operator_q_values - self.args.v_min) / delta_z
            # print(b_j)
            lower_bounds = torch.floor(b_j)
            upper_bounds = torch.ceil(b_j)

            # Work out the max action
            atom_values = Variable(torch.linspace(self.args.v_min, self.args.v_max, steps=self.args.atoms))
            atom_values = atom_values.view(1, 1, self.args.atoms)
            atom_values = atom_values.expand(self.args.batch_size, self.args.actions, self.args.atoms)

            # Sum over the atoms dimension
            target_expected_qvalues = torch.sum(target_dqn_qvals_data * atom_values, dim=2)
            # Get the maximum actions index across the batch size
            max_actions = target_expected_qvalues.max(dim=1)[1].view(-1)

            # Project back onto the original support for the max actions
            q_value_distribution_targets = torch.zeros(self.args.batch_size, self.args.atoms)

            # Distributions for the max actions
            # print(target_dqn_qvals_data, max_actions)
            q_value_max_actions_distribs = target_dqn_qvals_data.index_select(dim=1, index=max_actions)[:,0,:]
            # print(q_value_max_actions_distribs)

            # Lower_bounds_actions
            lower_bounds_actions = lower_bounds.index_select(dim=1, index=max_actions)[:,0,:]
            upper_bounds_actions = upper_bounds.index_select(dim=1, index=max_actions)[:,0,:]
            b_j_actions = b_j.index_select(dim=1, index=max_actions)[:,0,:]

            lower_bound_values_to_add = q_value_max_actions_distribs * (upper_bounds_actions - b_j_actions)
            upper_bound_values_to_add = q_value_max_actions_distribs * (b_j_actions - lower_bounds_actions)
            # print(lower_bounds_actions)
            # print(lower_bound_values_to_add)
            # Naive looping
            for b in range(self.args.batch_size):
                for l, pj in zip(lower_bounds_actions.data.type(torch.LongTensor)[b], lower_bound_values_to_add[b].data):
                    q_value_distribution_targets[b][l] += pj
                for u, pj in zip(upper_bounds_actions.data.type(torch.LongTensor)[b], upper_bound_values_to_add[b].data):
                    q_value_distribution_targets[b][u] += pj

            self.dqn.train()
            if self.args.gpu:
                actions = actions.cuda()
                # q_value_targets = q_value_targets.cuda()
                q_value_distribution_targets = q_value_distribution_targets.cuda()
            model_predictions = self.dqn(states).index_select(1, actions.view(-1))[:,0,:]
            q_value_distribution_targets = Variable(q_value_distribution_targets)
            # print(q_value_distribution_targets)
            # print(model_predictions) 

            # Cross entropy loss
            ce_loss = -torch.sum(q_value_distribution_targets * torch.log(model_predictions), dim=1)
            ce_batch_loss = ce_loss.mean()

            info = {}

            self.log("DQN/X_Entropy_Loss", ce_batch_loss.data[0], step=self.T)

            # Update
            self.optimizer.zero_grad()
            ce_batch_loss.backward()

            # Taken from pytorch clip_grad_norm
            # Remove once the pip version it up to date with source
            gradient_norm = clip_grad_norm(self.dqn.parameters(), self.args.clip_value)
            if gradient_norm is not None:
                info["Norm"] = gradient_norm

            self.optimizer.step()

            if "States" in info:
                states_trained = info["States"]
                info["States"] = states_trained + columns[0]
            else:
                info["States"] = columns[0]

        # Pad out the states to be of size batch_size
        if len(info["States"]) < self.args.batch_size:
            old_states = info["States"]
            new_states = old_states[0] * (self.args.batch_size - len(old_states))
            info["States"] = new_states

        return info
Esempio n. 29
0
    def learn(self, mem):
        # Sample transitions
        idxs, states, actions, returns, next_states, nonterminals, weights = mem.sample(
            self.batch_size)

        # Calculate current state probabilities (online network noise already sampled)
        log_ps = self.online_net(
            states, log=True)  # Log probabilities log p(s_t, ·; θonline)
        log_ps_a = log_ps[range(self.batch_size),
                          actions]  # log p(s_t, a_t; θonline)

        with torch.no_grad():
            # Calculate nth next state probabilities
            pns = self.online_net(
                next_states)  # Probabilities p(s_t+n, ·; θonline)
            dns = (self.support.expand_as(pns) * pns
                   )  # Distribution d_t+n = (z, p(s_t+n, ·; θonline))
            argmax_indices_ns = dns.sum(2).argmax(
                1
            )  # Perform argmax action selection using online network: argmax_a[(z, p(s_t+n, a; θonline))]
            self.target_net.reset_noise()  # Sample new target net noise
            pns = self.target_net(
                next_states)  # Probabilities p(s_t+n, ·; θtarget)
            pns_a = pns[range(
                self.batch_size
            ), argmax_indices_ns]  # Double-Q probabilities p(s_t+n, argmax_a[(z, p(s_t+n, a; θonline))]; θtarget)

            # Compute Tz (Bellman operator T applied to z)
            Tz = returns.unsqueeze(1) + nonterminals * (
                self.discount**self.n) * self.support.unsqueeze(
                    0)  # Tz = R^n + (γ^n)z (accounting for terminal states)
            Tz = Tz.clamp(min=self.Vmin,
                          max=self.Vmax)  # Clamp between supported values
            # Compute L2 projection of Tz onto fixed support z
            b = (Tz - self.Vmin) / self.delta_z  # b = (Tz - Vmin) / Δz
            l, u = b.floor().to(torch.int64), b.ceil().to(torch.int64)
            # Fix disappearing probability mass when l = b = u (b is int)
            l[(u > 0) * (l == u)] -= 1
            u[(l < (self.atoms - 1)) * (l == u)] += 1

            # Distribute probability of Tz
            m = states.new_zeros(self.batch_size, self.atoms)
            offset = (torch.linspace(0, ((self.batch_size - 1) * self.atoms),
                                     self.batch_size).unsqueeze(1).expand(
                                         self.batch_size,
                                         self.atoms).to(actions))
            m.view(-1).index_add_(
                0, (l + offset).view(-1),
                (pns_a *
                 (u.float() - b)).view(-1))  # m_l = m_l + p(s_t+n, a*)(u - b)
            m.view(-1).index_add_(
                0, (u + offset).view(-1),
                (pns_a *
                 (b - l.float())).view(-1))  # m_u = m_u + p(s_t+n, a*)(b - l)

        loss = -torch.sum(
            m * log_ps_a,
            1)  # Cross-entropy loss (minimises DKL(m||p(s_t, a_t)))
        self.online_net.zero_grad()
        (weights * loss).mean().backward(
        )  # Backpropagate importance-weighted minibatch loss
        self.optimiser.step()

        mem.update_priorities(idxs,
                              loss.detach().cpu().numpy()
                              )  # Update priorities of sampled transitions
Esempio n. 30
0
    def forward(self, gt_depths):
        """
        In the forward function we accept a Tensor of input data and we must return
        a Tensor of output data. We can use Modules defined in the constructor as
        well as arbitrary operators on Tensors.
        """

        #### Calculate current coding functions based on learned parameters
        ModFs_func = torch.zeros(self.N,
                                 1,
                                 device=device,
                                 dtype=dtype,
                                 requires_grad=False)
        DemodFs_func = torch.zeros(self.N,
                                   self.K,
                                   device=device,
                                   dtype=dtype,
                                   requires_grad=False)
        ModFs = torch.zeros(self.N,
                            1,
                            device=device,
                            dtype=dtype,
                            requires_grad=False)
        DemodFs = torch.zeros(self.N,
                              self.K,
                              device=device,
                              dtype=dtype,
                              requires_grad=False)
        p = torch.linspace(0, 2 * math.pi, self.N, device=device)
        for order in range(0, self.order):
            ModFs_func[:, 0] += self.alpha_mod[0, order] * torch.cos(
                (p + self.phi_mod[0, order]) * (order + 1))
            for k in range(0, self.K):
                DemodFs_func[:, k] += self.alpha_demod[k, order] * torch.cos(
                    (p + self.phi_demod[k, order]) * (order + 1))

        # Normalize ModFs and DemodFs
        min_ModFs_func, _ = torch.min(ModFs_func, dim=0)
        ModFs = ModFs_func - min_ModFs_func  # ModFs can't be lower than zero (negative light)
        min_DemodFs_func, _ = torch.min(DemodFs_func, dim=0)
        max_DemodFs_func, _ = torch.max(DemodFs_func, dim=0)
        DemodFs = (DemodFs_func - min_DemodFs_func) / (
            max_DemodFs_func - min_DemodFs_func)  # DemodFs can only be 0->1

        #################### Simulation
        ## Set area under the curve of outgoing ModF to the totalEnergy
        ModFs = ModFs.repeat(1, self.K)
        ModFs_scaled = Utils.ScaleMod(ModFs,
                                      device,
                                      tau=self.tauMin,
                                      pAveSource=self.pAveSourcePerPixel)
        # Calculate correlation functions (NxK matrix) and normalize it (zero mean, unit variance)
        CorrFs = Utils.GetCorrelationFunctions(ModFs_scaled,
                                               DemodFs,
                                               device,
                                               dt=self.dt)
        NormCorrFs = (CorrFs.t() - torch.mean(CorrFs, 1)) / torch.std(
            CorrFs, 1)
        NormCorrFs = NormCorrFs.t()
        # Compute brightness values
        BVals = Utils.ComputeBrightnessVals(ModFs=ModFs_scaled, DemodFs=DemodFs, CorrFs=CorrFs, depths=gt_depths, \
                pAmbient=self.pAveAmbientPerPixel, beta=self.meanBeta, T=self.T, tau=self.tau, dt=self.dt, gamma=self.gamma)

        #### Add noise
        # Calculate variance
        noiseVar = BVals * self.gamma + math.pow(self.readNoise * self.gamma,
                                                 2)
        # Add noise to all brightness values
        BVals = Utils.GetClippedBSamples(nSamples=1,
                                         BMean=BVals,
                                         BVar=noiseVar,
                                         device=device)
        BVals = BVals.permute(0, 3, 1,
                              2)  # Put channel dimension at right position

        # Normalize BVals
        BVals_mean = torch.mean(BVals)
        BVals_std = torch.std(BVals)
        BVals = (BVals - BVals_mean) / BVals_std

        #### CNN Network
        out = CNN.network(self, self.architecture, BVals)

        decodedDepths = torch.squeeze(out, 1)  # Remove channel dimension
        return decodedDepths
    def load(self, file_path):
        self.network_training.load_state_dict(torch.load(file_path))
        self.network_release.load_state_dict(
            self.network_training.state_dict())


if __name__ == "__main__":
    input_dimension = 16
    critic = Critic(input_dimension,
                    lr=1e-3,
                    lr_step_size=100,
                    lr_gamma=0.5,
                    weight_decay=1e-7,
                    lag=10)

    x_data = torch.linspace(0., 10.,
                            input_dimension * 11).view(-1, input_dimension)
    y_data = torch.linspace(0., 10., 11).view(-1, 1)
    epochs = 100

    for epoch in range(epochs):
        critic.fit(x_data, y_data)

        preds = critic(x_data)
        SSQ = ((preds - y_data) * (preds - y_data)).sum()

        preds_trainer = critic.network_training(x_data)
        SSQ_trainer = ((preds_trainer - y_data) *
                       (preds_trainer - y_data)).sum()

        print("epoch %d, error %f, (%f)" % (epoch, SSQ, SSQ_trainer))
Esempio n. 32
0
def eval_conditional_density(
    density: Any,
    condition: Tensor,
    limits: Tensor,
    dim1: int,
    dim2: int,
    resolution: int = 50,
    eps_margins1: Union[Tensor, float] = 1e-32,
    eps_margins2: Union[Tensor, float] = 1e-32,
) -> Tensor:
    r"""
    Return the unnormalized conditional along `dim1, dim2` given parameters `condition`.

    We compute the unnormalized conditional by evaluating the joint distribution:
        $p(x1 | x2) = p(x1, x2) / p(x2) \propto p(x1, x2)$

    Args:
        density: Probability density function with `.log_prob()` method.
        condition: Parameter set that all dimensions other than dim1 and dim2 will be
            fixed to. Should be of shape (1, dim_theta), i.e. it could e.g. be
            a sample from the posterior distribution. The entries at `dim1` and `dim2`
            will be ignored.
        limits: Bounds within which to evaluate the density. Shape (dim_theta, 2).
        dim1: First dimension along which to evaluate the conditional.
        dim2: Second dimension along which to evaluate the conditional.
        resolution: Resolution of the grid along which the conditional density is
            evaluated.
        eps_margins1: We will evaluate the posterior along `dim1` at
            `limits[0]+eps_margins` until `limits[1]-eps_margins`. This avoids
            evaluations potentially exactly at the prior bounds.
        eps_margins2: We will evaluate the posterior along `dim2` at
            `limits[0]+eps_margins` until `limits[1]-eps_margins`. This avoids
            evaluations potentially exactly at the prior bounds.

    Returns: Conditional probabilities. If `dim1 == dim2`, this will have shape
        (resolution). If `dim1 != dim2`, it will have shape (resolution, resolution).
    """

    condition = ensure_theta_batched(condition)

    theta_grid_dim1 = torch.linspace(
        float(limits[dim1, 0] + eps_margins1),
        float(limits[dim1, 1] - eps_margins1),
        resolution,
    )
    theta_grid_dim2 = torch.linspace(
        float(limits[dim2, 0] + eps_margins2),
        float(limits[dim2, 1] - eps_margins2),
        resolution,
    )

    if dim1 == dim2:
        repeated_condition = condition.repeat(resolution, 1)
        repeated_condition[:, dim1] = theta_grid_dim1

        log_probs_on_grid = density.log_prob(repeated_condition)
    else:
        repeated_condition = condition.repeat(resolution**2, 1)
        repeated_condition[:, dim1] = theta_grid_dim1.repeat(resolution)
        repeated_condition[:, dim2] = torch.repeat_interleave(
            theta_grid_dim2, resolution)

        log_probs_on_grid = density.log_prob(repeated_condition)
        log_probs_on_grid = torch.reshape(log_probs_on_grid,
                                          (resolution, resolution))

    # Subtract maximum for numerical stability.
    return torch.exp(log_probs_on_grid - torch.max(log_probs_on_grid))
    def plot(self,
             axes=None,
             block=False,
             title=None,
             plotting=False,
             Ndiv=41,
             showtickslabels=True,
             showticks=True,
             xlabel=None,
             ylabel=None,
             clear_axes=True,
             legend=False,
             labelsize=None,
             normalize=False,
             colorbar=False,
             color=None,
             label=None,
             local_axes=None,
             x_next=None,
             alpha_next=None):

        if plotting == False:
            return None

        if self.dim > 1:
            return None

        if local_axes is None and axes is None:
            self.fig, (local_axes) = plt.subplots(1,
                                                  1,
                                                  sharex=True,
                                                  figsize=(10, 7))
        elif local_axes is None:
            local_axes = axes
        elif axes is None:
            pass  # If the internal axes already have some value, and no new axes passed, do nothing
        elif local_axes is not None and axes is not None:
            local_axes = axes

        local_pp = PlotProbability()

        if x_next is not None and alpha_next is not None:
            x_next_local = x_next
            alpha_next_local = alpha_next
        else:
            x_next_local = None
            alpha_next_local = 1.0

        test_x_vec = torch.linspace(0.0, 1.0, Ndiv)[:, None]
        test_x_vec = test_x_vec.unsqueeze(
            1
        )  # Make this [Ntest x q x dim] = [n_batches x n_design_points x dim], with q=1 -> Double-check in the documentation!
        var_vec = self.forward(X=test_x_vec).detach().cpu().numpy()

        if self.dim == 1:
            local_axes = local_pp.plot_acquisition_function(
                var_vec=var_vec,
                xpred_vec=test_x_vec.squeeze(1),
                x_next=x_next_local,
                acqui_next=alpha_next_local,
                xlabel=xlabel,
                ylabel=ylabel,
                title=title,
                legend=legend,
                axes=local_axes,
                clear_axes=clear_axes,
                xlim=np.array([0., 1.]),
                block=block,
                labelsize=labelsize,
                showtickslabels=showtickslabels,
                showticks=showticks,
                what2plot=self.iden,
                color=color,
                ylim=None)
            plt.pause(0.25)

        elif self.dim == 2:
            if self.x_next is not None:
                Xs = np.atleast_2d(self.x_next)
            else:
                Xs = self.x_next
            local_axes = local_pp.plot_GP_2D_single(
                var_vec=var_vec,
                Ndiv_dim=Ndiv * np.ones(self.dim, dtype=np.int64),
                Xs=Xs,
                Ys=self.alpha_next,
                x_label=xlabel,
                y_label=ylabel,
                title=title,
                axes=local_axes,
                clear_axes=clear_axes,
                legend=legend,
                block=block,
                colorbar=colorbar,
                color_Xs="gold")
            plt.pause(0.25)

        return local_axes
Esempio n. 34
0
'''
    use: comment out 2 out of 3 in the main codes and run
'''
import torch
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt

batch_size = 200

x = torch.unsqueeze(torch.linspace(-5, 5, batch_size), dim=1)
y = x.pow(2) + torch.rand(x.size())

x, y = Variable(x), Variable(y)


def save():
    model = torch.nn.Sequential(
        torch.nn.Linear(1, 10),
        torch.nn.ReLU(),
        torch.nn.Linear(10, 1),
    )
    optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
    loss_fn = torch.nn.MSELoss()

    plt.ion()
    plt.show()

    for epoch in range(1000 + 1):
        y_pred = model(x)
Esempio n. 35
0
    def gen_encoder_output_proposals(self, memory, memory_padding_mask,
                                     spatial_shapes):
        """Generate proposals from encoded memory.

        Args:
            memory (Tensor) : The output of encoder,
                has shape (bs, num_key, embed_dim).  num_key is
                equal the number of points on feature map from
                all level.
            memory_padding_mask (Tensor): Padding mask for memory.
                has shape (bs, num_key).
            spatial_shapes (Tensor): The shape of all feature maps.
                has shape (num_level, 2).

        Returns:
            tuple: A tuple of feature map and bbox prediction.

                - output_memory (Tensor): The input of decoder,  \
                    has shape (bs, num_key, embed_dim).  num_key is \
                    equal the number of points on feature map from \
                    all levels.
                - output_proposals (Tensor): The normalized proposal \
                    after a inverse sigmoid, has shape \
                    (bs, num_keys, 4).
        """

        N, S, C = memory.shape
        proposals = []
        _cur = 0
        for lvl, (H, W) in enumerate(spatial_shapes):
            mask_flatten_ = memory_padding_mask[:, _cur:(_cur + H * W)].view(
                N, H, W, 1)
            valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1)
            valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1)

            grid_y, grid_x = torch.meshgrid(
                torch.linspace(0,
                               H - 1,
                               H,
                               dtype=torch.float32,
                               device=memory.device),
                torch.linspace(0,
                               W - 1,
                               W,
                               dtype=torch.float32,
                               device=memory.device))
            grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1)

            scale = torch.cat([valid_W.unsqueeze(-1),
                               valid_H.unsqueeze(-1)], 1).view(N, 1, 1, 2)
            grid = (grid.unsqueeze(0).expand(N, -1, -1, -1) + 0.5) / scale
            wh = torch.ones_like(grid) * 0.05 * (2.0**lvl)
            proposal = torch.cat((grid, wh), -1).view(N, -1, 4)
            proposals.append(proposal)
            _cur += (H * W)
        output_proposals = torch.cat(proposals, 1)
        output_proposals_valid = ((output_proposals > 0.01) &
                                  (output_proposals < 0.99)).all(-1,
                                                                 keepdim=True)
        output_proposals = torch.log(output_proposals / (1 - output_proposals))
        output_proposals = output_proposals.masked_fill(
            memory_padding_mask.unsqueeze(-1), float('inf'))
        output_proposals = output_proposals.masked_fill(
            ~output_proposals_valid, float('inf'))

        output_memory = memory
        output_memory = output_memory.masked_fill(
            memory_padding_mask.unsqueeze(-1), float(0))
        output_memory = output_memory.masked_fill(~output_proposals_valid,
                                                  float(0))
        output_memory = self.enc_output_norm(self.enc_output(output_memory))
        return output_memory, output_proposals
 def forward(self, states):
     ns = torch.linspace(0, 1, states.size(0)).view((-1, 1))
     x = torch.cat((states, ns), 1)
     return self.linear(x)
Esempio n. 37
0
    DemodFs_func = torch.zeros(model.N,
                               model.K,
                               device=device,
                               dtype=dtype,
                               requires_grad=False)
    ModFs = torch.zeros(model.N,
                        1,
                        device=device,
                        dtype=dtype,
                        requires_grad=False)
    DemodFs = torch.zeros(model.N,
                          model.K,
                          device=device,
                          dtype=dtype,
                          requires_grad=False)
    p = torch.linspace(0, 2 * math.pi, model.N, device=device)
    for order in range(0, model.order):
        ModFs_func[:, 0] += model.alpha_mod[0, order] * torch.cos(
            (p + model.phi_mod[0, order]) * (order + 1))
        for k in range(0, model.K):
            DemodFs_func[:, k] += model.alpha_demod[k, order] * torch.cos(
                (p + model.phi_demod[k, order]) * (order + 1))

    # Normalize ModFs and DemodFs
    min_ModFs_func, _ = torch.min(ModFs_func, dim=0)
    ModFs = ModFs_func - min_ModFs_func  # ModFs can't be lower than zero (negative light)
    min_DemodFs_func, _ = torch.min(DemodFs_func, dim=0)
    max_DemodFs_func, _ = torch.max(DemodFs_func, dim=0)
    DemodFs = (DemodFs_func - min_DemodFs_func) / (
        max_DemodFs_func - min_DemodFs_func)  # DemodFs can only be 0->1
    ModFs = ModFs.repeat(1, model.K)
Esempio n. 38
0
def test(
        data,
        weights=None,
        batch_size=32,
        imgsz=640,
        conf_thres=0.001,
        iou_thres=0.6,  # for NMS
        save_json=False,
        single_cls=False,
        augment=False,
        verbose=False,
        model=None,
        dataloader=None,
        save_dir=Path(''),  # for saving images
        save_txt=False,  # for auto-labelling
        save_hybrid=False,  # for hybrid auto-labelling
        save_conf=False,  # save auto-label confidences
        plots=True,
        log_imgs=0):  # number of logged images

    # li = []
    # temp_path = "/content/drive/MyDrive/00Colab Notebooks/07Datasets/COCO/cocomax/"
    # for i in os.listdir(temp_path):
    #   li.append(cv2.imread(temp_path + i))
    # logger.info("log:{}{}".format(len(li), li[0].shape))

    # Initialize/load model and set device
    training = model is not None
    if training:  # called by train.py
        device = next(model.parameters()).device  # get model device

    else:  # called directly
        ###############コメントアウト
        # set_logging()
        device = select_device(opt.device, batch_size=batch_size)

        # Directories
        save_dir = Path(
            increment_path(Path(opt.project) / opt.name,
                           exist_ok=opt.exist_ok))  # increment run
        (save_dir / 'labels' if save_txt else save_dir).mkdir(
            parents=True, exist_ok=True)  # make dir

        # Load model
        model = attempt_load(weights, map_location=device)  # load FP32 model
        imgsz = check_img_size(imgsz, s=model.stride.max())  # check img_size

        # Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99
        # if device.type != 'cpu' and torch.cuda.device_count() > 1:
        #     model = nn.DataParallel(model)

    # Half
    half = device.type != 'cpu'  # half precision only supported on CUDA
    if half:
        model.half()

    logger.info("gpu_model:{}".format(gpu_used.gpuinfo()))
    # Configure
    model.eval()
    ###############コメントアウト
    # is_coco = data.endswith('coco.yaml')  # is COCO dataset
    with open(data) as f:
        data = yaml.load(f, Loader=yaml.FullLoader)  # model dict
    check_dataset(data)  # check
    nc = 1 if single_cls else int(data['nc'])  # number of classes
    iouv = torch.linspace(0.5, 0.95,
                          10).to(device)  # iou vector for [email protected]:0.95
    niou = iouv.numel()

    # Logging
    log_imgs, wandb = min(log_imgs, 100), None  # ceil
    try:
        import wandb  # Weights & Biases
    except ImportError:
        log_imgs = 0

    # Dataloader
    if not training:
        #下二行、デバック
        img = torch.zeros((1, 3, imgsz, imgsz), device=device)  # init img
        _ = model(img.half() if half else img
                  ) if device.type != 'cpu' else None  # run once
        path = data['test'] if opt.task == 'test' else data[
            'val']  # path to val/test images
        ##################追加
        path = "/content/drive/MyDrive/00Colab Notebooks/07Datasets/COCO/cocomax"
        logger.info(path)
        # dataloader = create_dataloader(path, imgsz, batch_size, model.stride.max(), opt, pad=0.5, rect=True)[0]
        dataloader = create_dataloader(path,
                                       imgsz,
                                       batch_size,
                                       model.stride.max(),
                                       opt,
                                       pad=0.5,
                                       rect=True)[0]

    seen = 0
    confusion_matrix = ConfusionMatrix(nc=nc)
    names = {
        k: v
        for k, v in enumerate(
            model.names if hasattr(model, 'names') else model.module.names)
    }
    # coco91class = coco80_to_coco91_class()
    s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R',
                                 '[email protected]', '[email protected]:.95')
    p, r, f1, mp, mr, map50, map, t0, t1, t_ex1, t_ex2,load_time = 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., []
    loss = torch.zeros(3, device=device)
    jdict, stats, ap, ap_class, wandb_images = [], [], [], [], []

    time_veri.time_start()
    for batch_i, (img, targets, paths,
                  shapes) in enumerate(tqdm(dataloader, desc=s)):
        time_veri2.start_point(time_veri2.t_point0)

        #前処理
        time_veri.start_point(time_veri.t_point1)
        gpu_used.gpu_clear()

        img = img.to(device, non_blocking=True)
        img = img.half() if half else img.float()  # uint8 to fp16/32
        img /= 255.0  # 0 - 255 to 0.0 - 1.0
        targets = targets.to(device)
        nb, _, height, width = img.shape  # batch size, channels, height, width
        time_veri.end_point(time_veri.t_point1)
        # gpu_used.gpuinfo()

        with torch.no_grad():
            time_veri.start_point(time_veri.t_point2)
            # Run model
            inf_out, train_out = model(
                img, augment=augment)  # inference and training outputs
            time_veri.end_point(time_veri.t_point2)

            ###################################コメントアウト
            # # Compute loss
            # if training:
            #     loss += compute_loss([x.float() for x in train_out], targets, model)[1][:3]  # box, obj, cls

            # Run NMS
            time_veri.start_point(time_veri.t_point3)
            targets[:, 2:] *= torch.Tensor([width, height, width,
                                            height]).to(device)  # to pixels
            lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)
                  ] if save_hybrid else []  # for autolabelling
            output = non_max_suppression(inf_out,
                                         conf_thres=conf_thres,
                                         iou_thres=iou_thres,
                                         labels=lb)
            time_veri.end_point(time_veri.t_point3)

        # Statistics per image
        time_veri.start_point(time_veri.t_point4)
        for si, pred in enumerate(output):
            ##############################コメントアウト
            # labels = targets[targets[:, 0] == si, 1:]
            # nl = len(labels)
            # tcls = labels[:, 0].tolist() if nl else []  # target class
            # path = Path(paths[si])
            # seen += 1

            # if len(pred) == 0:
            #     if nl:
            #         stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
            #     continue

            # Predictions→元画像の大きさにリサイズ

            predn = pred.clone()
            # scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1])  # native-space pred
            ########################引数、元座標うけとり
            ex_coords = scale_coords(img[si].shape[1:], predn[:, :4],
                                     shapes[si][0],
                                     shapes[si][1])  # native-space pred
            ###############################追加
            # continue
        time_veri.end_point(time_veri.t_point4)
        time_veri2.end_point(time_veri2.t_point0)

################ ################ ################ ################ ################ ################ ################ ################ ################ ################ ################ ################ ################ ################ ################ ################ ################
# # Append to text file
# if save_txt:
#     gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]]  # normalization gain whwh
#     for *xyxy, conf, cls in predn.tolist():
#         xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()  # normalized xywh
#         line = (cls, *xywh, conf) if save_conf else (cls, *xywh)  # label format
#         with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f:
#             f.write(('%g ' * len(line)).rstrip() % line + '\n')

# # W&B logging
# if plots and len(wandb_images) < log_imgs:
#     box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
#                  "class_id": int(cls),
#                  "box_caption": "%s %.3f" % (names[cls], conf),
#                  "scores": {"class_score": conf},
#                  "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()]
#     boxes = {"predictions": {"box_data": box_data, "class_labels": names}}  # inference-space
#     wandb_images.append(wandb.Image(img[si], boxes=boxes, caption=path.name))

# # Append to pycocotools JSON dictionary
# if save_json:
#     # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
#     image_id = int(path.stem) if path.stem.isnumeric() else path.stem
#     box = xyxy2xywh(predn[:, :4])  # xywh
#     box[:, :2] -= box[:, 2:] / 2  # xy center to top-left corner
#     for p, b in zip(pred.tolist(), box.tolist()):
#         jdict.append({'image_id': image_id,
#                       'category_id': coco91class[int(p[5])] if is_coco else int(p[5]),
#                       'bbox': [round(x, 3) for x in b],
#                       'score': round(p[4], 5)})

# # Assign all predictions as incorrect
# correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device)
# if nl:
#     detected = []  # target indices
#     tcls_tensor = labels[:, 0]

#     # target boxes
#     tbox = xywh2xyxy(labels[:, 1:5])
#     scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1])  # native-space labels
#     if plots:
#         confusion_matrix.process_batch(pred, torch.cat((labels[:, 0:1], tbox), 1))

#     # Per target class
#     for cls in torch.unique(tcls_tensor):
#         ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1)  # prediction indices
#         pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1)  # target indices

#         # Search for detections
#         if pi.shape[0]:
#             # Prediction to target ious
#             ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1)  # best ious, indices

#             # Append detections
#             detected_set = set()
#             for j in (ious > iouv[0]).nonzero(as_tuple=False):
#                 d = ti[i[j]]  # detected target
#                 if d.item() not in detected_set:
#                     detected_set.add(d.item())
#                     detected.append(d)
#                     correct[pi[j]] = ious[j] > iouv  # iou_thres is 1xn
#                     if len(detected) == nl:  # all targets already located in image
#                         break

# # Append statistics (correct, conf, pcls, tcls)
# stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))
################ ################ ################ ################ ################ ################ ################ ################ ################ ################ ################ ################ ################ ################

#####################追加
# time_veri.end_point(time_veri.t_point4)
# time_veri2.start_point(time_veri2.t_point0)
# continue

# # Plot images
# if plots and batch_i < 3:
#     f = save_dir / f'test_batch{batch_i}_labels.jpg'  # labels
#     Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start()
#     f = save_dir / f'test_batch{batch_i}_pred.jpg'  # predictions
#     Thread(target=plot_images, args=(img, output_to_target(output), paths, f, names), daemon=True).start()


#####################################追加##################################
    time_veri.time_end()
    logger.info("log:{}".format("処理中断終了"))

    logger.info("for文全体:{}".format(time_veri.t_end - time_veri.t_start))
    temp = (time_veri.t_end - time_veri.t_start)
    logger.info("for文の中:{}".format(time_veri2.time_sum_list(return_all=True)))
    logger.info("ミニバッチのロード時間:{}".format(temp - time_veri2.all))
    logger.info("①~④の処理時間:{}sum:{}".format(time_veri.time_sum_list(),
                                           time_veri.all))

    logger.info("for文の中対fps:{}".format(1 / (time_veri2.all / 512)))
    temp = time_veri.t_end - time_veri.t_start
    logger.info("for文全体対fps_all:{}".format(1 / (temp / 512)))

    logger.info("gpu_exit:{}".format(gpu_used.gpuinfo()))
    logger.info("gpu_max,list:{}{}".format(gpu_used.used_max,
                                           gpu_used.used_list))
    import sys
    sys.exit()
    #####################################追加##################################

    # Compute statistics
    stats = [np.concatenate(x, 0) for x in zip(*stats)]  # to numpy
    if len(stats) and stats[0].any():
        p, r, ap, f1, ap_class = ap_per_class(*stats,
                                              plot=plots,
                                              save_dir=save_dir,
                                              names=names)
        p, r, ap50, ap = p[:, 0], r[:, 0], ap[:, 0], ap.mean(
            1)  # [P, R, [email protected], [email protected]:0.95]
        mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
        nt = np.bincount(stats[3].astype(np.int64),
                         minlength=nc)  # number of targets per class
    else:
        nt = torch.zeros(1)

    # Print results
    pf = '%20s' + '%12.3g' * 6  # print format
    print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))

    # Print results per class
    if verbose and nc > 1 and len(stats):
        for i, c in enumerate(ap_class):
            print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))

    # Print speeds
    t = tuple(x / seen * 1E3
              for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size)  # tuple
    if not training:
        print(
            'Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g'
            % t)

    # Plots
    if plots:
        confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
        if wandb and wandb.run:
            wandb.log({"Images": wandb_images})
            wandb.log({
                "Validation": [
                    wandb.Image(str(f), caption=f.name)
                    for f in sorted(save_dir.glob('test*.jpg'))
                ]
            })

    # Save JSON
    if save_json and len(jdict):
        w = Path(weights[0] if isinstance(weights, list) else weights
                 ).stem if weights is not None else ''  # weights
        anno_json = '../coco/annotations/instances_val2017.json'  # annotations json
        pred_json = str(save_dir / f"{w}_predictions.json")  # predictions json
        print('\nEvaluating pycocotools mAP... saving %s...' % pred_json)
        with open(pred_json, 'w') as f:
            json.dump(jdict, f)

        try:  # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
            from pycocotools.coco import COCO
            from pycocotools.cocoeval import COCOeval

            anno = COCO(anno_json)  # init annotations api
            pred = anno.loadRes(pred_json)  # init predictions api
            eval = COCOeval(anno, pred, 'bbox')
            if is_coco:
                eval.params.imgIds = [
                    int(Path(x).stem) for x in dataloader.dataset.img_files
                ]  # image IDs to evaluate
            eval.evaluate()
            eval.accumulate()
            eval.summarize()
            map, map50 = eval.stats[:
                                    2]  # update results ([email protected]:0.95, [email protected])
        except Exception as e:
            print(f'pycocotools unable to run: {e}')

    # Return results
    if not training:
        s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
        print(f"Results saved to {save_dir}{s}")
    model.float()  # for training
    maps = np.zeros(nc) + map
    for i, c in enumerate(ap_class):
        maps[c] = ap[i]
    return (mp, mr, map50, map,
            *(loss.cpu() / len(dataloader)).tolist()), maps, t
#coding: utf-8
import torch
from torch.autograd import Variable
import matplotlib.pyplot as plt

x = torch.unsqueeze(torch.linspace(-1, 1, 100), dim=1)  # x data (tensor), shape=(100, 1)
y = x.pow(2) + 0.2*torch.rand(x.size())  # noisy y data (tensor), shape=(100, 1)
x, y = Variable(x, requires_grad=False), Variable(y, requires_grad=False)


def save():
    net = torch.nn.Sequential(
        torch.nn.Linear(1,30),
        torch.nn.ReLU(),
        torch.nn.Linear(30,30),
        torch.nn.ReLU(),
        torch.nn.Linear(30,1)
    )

    # optm = torch.optim.SGD(net.parameters(),lr=5e-2)
    optm = torch.optim.Adam(net.parameters())

    loss_func = torch.nn.MSELoss()

    for i in range(100):
        pred = net(x)
        loss = loss_func(pred,y)
        optm.zero_grad()
        loss.backward()
        optm.step()
    torch.save(net, 'net.pkl')  # 保存整个网络
Esempio n. 40
0
    def __init__(self, model_config: AttrDict, model_name: str):
        super().__init__()

        assert model_config.INPUT_TYPE in ["rgb",
                                           "bgr"], "Input type not supported"
        trunk_config = copy.deepcopy(model_config.TRUNK.VISION_TRANSFORMERS)

        logging.info("Building model: Vision Transformer from yaml config")
        # Hacky workaround
        trunk_config = AttrDict(
            {k.lower(): v
             for k, v in trunk_config.items()})

        img_size = trunk_config.image_size
        patch_size = trunk_config.patch_size
        in_chans = 3
        embed_dim = trunk_config.hidden_dim
        depth = trunk_config.num_layers
        num_heads = trunk_config.num_heads
        mlp_ratio = 4.0
        qkv_bias = trunk_config.qkv_bias
        qk_scale = trunk_config.qk_scale
        drop_rate = trunk_config.dropout_rate
        attn_drop_rate = trunk_config.attention_dropout_rate
        drop_path_rate = trunk_config.drop_path_rate
        hybrid_backbone_string = None
        # TODO Implement hybrid backbones
        if "HYBRID" in trunk_config.keys():
            hybrid_backbone_string = trunk_config.HYBRID
        norm_layer = nn.LayerNorm

        self.num_features = (
            self.embed_dim
        ) = embed_dim  # num_features for consistency with other models

        # TODO : Enable Hybrid Backbones
        if hybrid_backbone_string:
            self.patch_embed = globals()[hybrid_backbone_string](
                out_dim=embed_dim, img_size=img_size)
        # if hybrid_backbone is not None:
        #     self.patch_embed = HybridEmbed(
        #         hybrid_backbone,
        #         img_size=img_size,
        #         in_chans=in_chans,
        #         embed_dim=embed_dim,
        #     )
        else:
            self.patch_embed = PatchEmbed(
                img_size=img_size,
                patch_size=patch_size,
                in_chans=in_chans,
                embed_dim=embed_dim,
            )
        num_patches = self.patch_embed.num_patches

        self.class_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
        self.pos_embedding = nn.Parameter(
            torch.zeros(1, num_patches + 1, embed_dim))
        self.pos_drop = nn.Dropout(p=drop_rate)

        dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)
               ]  # stochastic depth decay rule
        self.blocks = nn.ModuleList([
            Block(
                dim=embed_dim,
                num_heads=num_heads,
                mlp_ratio=mlp_ratio,
                qkv_bias=qkv_bias,
                qk_scale=qk_scale,
                drop=drop_rate,
                attn_drop=attn_drop_rate,
                drop_path=dpr[i],
                norm_layer=norm_layer,
            ) for i in range(depth)
        ])
        self.norm = norm_layer(embed_dim)

        # NOTE as per official impl, we could have a pre-logits
        # representation dense layer + tanh here
        # self.repr = nn.Linear(embed_dim, representation_size)
        # self.repr_act = nn.Tanh()

        trunc_normal_(self.pos_embedding, std=0.02)
        trunc_normal_(self.class_token, std=0.02)
        self.apply(self._init_weights)
Esempio n. 41
0
    def __init__(self, model_config, model_name):
        super().__init__()
        trunk_config = copy.deepcopy(model_config.TRUNK.CONVIT)
        trunk_config.update(model_config.TRUNK.VISION_TRANSFORMERS)

        logging.info("Building model: ConViT from yaml config")
        # Hacky workaround
        trunk_config = AttrDict(
            {k.lower(): v
             for k, v in trunk_config.items()})

        image_size = trunk_config.image_size
        patch_size = trunk_config.patch_size
        classifier = trunk_config.classifier
        assert image_size % patch_size == 0, "Input shape indivisible by patch size"
        assert classifier in ["token", "gap"], "Unexpected classifier mode"
        n_gpsa_layers = trunk_config.n_gpsa_layers
        class_token_in_local_layers = trunk_config.class_token_in_local_layers
        mlp_dim = trunk_config.mlp_dim
        embed_dim = trunk_config.hidden_dim
        locality_dim = trunk_config.locality_dim
        attention_dropout_rate = trunk_config.attention_dropout_rate
        dropout_rate = trunk_config.dropout_rate
        drop_path_rate = trunk_config.drop_path_rate
        num_layers = trunk_config.num_layers
        locality_strength = trunk_config.locality_strength
        num_heads = trunk_config.num_heads
        qkv_bias = trunk_config.qkv_bias
        qk_scale = trunk_config.qk_scale
        use_local_init = trunk_config.use_local_init

        hybrid_backbone = None
        if "hybrid" in trunk_config.keys():
            hybrid_backbone = trunk_config.hybrid

        in_chans = 3
        # TODO: Make this configurable
        norm_layer = nn.LayerNorm

        self.classifier = classifier
        self.n_gpsa_layers = n_gpsa_layers
        self.class_token_in_local_layers = class_token_in_local_layers
        # For consistency with other models
        self.num_features = self.embed_dim = self.hidden_dim = embed_dim
        self.locality_dim = locality_dim

        # Hybrid backbones not tested
        if hybrid_backbone is not None:
            self.patch_embed = HybridEmbed(
                hybrid_backbone,
                img_size=image_size,
                in_chans=in_chans,
                embed_dim=embed_dim,
            )
        else:
            self.patch_embed = PatchEmbed(
                img_size=image_size,
                patch_size=patch_size,
                in_chans=in_chans,
                embed_dim=embed_dim,
            )

        seq_length = (image_size // patch_size)**2
        self.seq_length = seq_length

        self.class_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
        self.pos_embedding = nn.Parameter(torch.zeros(1, seq_length,
                                                      embed_dim))
        self.pos_drop = nn.Dropout(p=dropout_rate)

        if class_token_in_local_layers:
            seq_length += 1

        # stochastic depth decay rule
        dpr = [x.item() for x in torch.linspace(0, drop_path_rate, num_layers)]

        layers = []
        for i in range(num_layers):
            if i < self.n_gpsa_layers:
                if locality_strength > 0:
                    layer_locality_strength = locality_strength
                else:
                    layer_locality_strength = 1 / (i + 1)
                layers.append(
                    AttentionBlock(
                        attention_module=GPSA,
                        embed_dim=embed_dim,
                        num_heads=num_heads,
                        mlp_dim=mlp_dim,
                        qkv_bias=qkv_bias,
                        qk_scale=qk_scale,
                        dropout_rate=dropout_rate,
                        attention_dropout_rate=attention_dropout_rate,
                        drop_path_rate=dpr[i],
                        norm_layer=norm_layer,
                        locality_strength=layer_locality_strength,
                        locality_dim=self.locality_dim,
                        use_local_init=use_local_init,
                    ))
            else:
                layers.append(
                    AttentionBlock(
                        attention_module=SelfAttention,
                        embed_dim=embed_dim,
                        num_heads=num_heads,
                        mlp_dim=mlp_dim,
                        qkv_bias=qkv_bias,
                        qk_scale=qk_scale,
                        dropout_rate=dropout_rate,
                        attention_dropout_rate=attention_dropout_rate,
                        drop_path_rate=dpr[i],
                        norm_layer=norm_layer,
                    ))
        self.blocks = nn.ModuleList(layers)
        self.norm = norm_layer(embed_dim)

        trunc_normal_(self.pos_embedding, std=0.02)
        trunc_normal_(self.class_token, std=0.02)
        self.apply(self._init_weights)
Esempio n. 42
0
"""
View more, visit my tutorial page: https://morvanzhou.github.io/tutorials/
My Youtube Channel: https://www.youtube.com/user/MorvanZhou

Dependencies:
torch: 0.1.11
"""
import torch
import torch.utils.data as Data

torch.manual_seed(1)    # reproducible

BATCH_SIZE = 5
# BATCH_SIZE = 8

x = torch.linspace(1, 10, 10)       # this is x data (torch tensor)
y = torch.linspace(10, 1, 10)       # this is y data (torch tensor)

torch_dataset = Data.TensorDataset(x, y)
loader = Data.DataLoader(
    dataset=torch_dataset,      # torch TensorDataset format
    batch_size=BATCH_SIZE,      # mini batch size
    shuffle=True,               # random shuffle for training
    num_workers=2,              # subprocesses for loading data
)

for epoch in range(3):   # train entire dataset 3 times
    for step, (batch_x, batch_y) in enumerate(loader):  # for each training step
        # train your data...
        print('Epoch: ', epoch, '| Step: ', step, '| batch x: ',
              batch_x.numpy(), '| batch y: ', batch_y.numpy())
Esempio n. 43
0
import torch
import torch.utils.data as Data

BATCH_SIZE = 5

x = torch.linspace(1, 10, 10)  # this is x data (torch tensor)
y = torch.linspace(10, 1, 10)  # this is y data (torch tensor)

torch_dataset = Data.TensorDataset(x, y)
loader = Data.DataLoader(
    dataset=torch_dataset,
    batch_size=BATCH_SIZE,
    shuffle=True,
    num_workers=2,
)

if __name__ == '__main__':

    for epoch in range(3):
        for step, (batch_x, batch_y) in enumerate(loader):
            # training....
            print('Epoch: ', epoch, '| Step: ', step, '| batch x: ',
                  batch_x.numpy(), '| batch y: ', batch_y.numpy())
def get_region_boxes_v2(output,
                        n_models,
                        conf_thresh,
                        num_classes,
                        anchors,
                        num_anchors,
                        only_objectness=1,
                        validation=False):
    cs = n_models
    nA = num_anchors
    nC = num_classes
    anchor_step = int(len(anchors) / num_anchors)
    if len(output.shape) == 3:
        output = output.unsqueeze(0)
    batch = output.shape[0]
    assert (output.shape[1] == (5 + num_classes) * num_anchors)
    nH = h = output.shape[2]
    nW = w = output.shape[3]
    assert (batch % n_models == 0)
    bs = batch // n_models

    t0 = time.time()
    all_boxes = []
    # import pdb; pdb.set_trace()
    cls = output.view(output.shape[0], nA, (5 + nC), nH, nW)
    cls = cls.index_select(2,
                           torch.linspace(5, 5 + nC - 1, nC).long()).squeeze()
    cls = cls.view(bs, cs,
                   nA * nC * nH * nW).transpose(1, 2).contiguous().view(
                       bs * nA * nC * nH * nW, cs)
    normfn = torch.nn.Softmax(dim=1)
    print("=======time:", float(round((time.time() - t0) * 1000)))
    # cls = torch.nn.Softmax(dim=1)(Variable(cls)).data
    cls = normfn(Variable(cls)).data
    cls_confs = cls.view(bs, nA * nC * nH * nW,
                         cs).transpose(1, 2).contiguous().view(
                             bs * cs * nA, nC, nH * nW).transpose(1, 2).view(
                                 bs * cs * nA * nH * nW, nC)

    output = output.view(batch * num_anchors, 5 + num_classes,
                         h * w).transpose(0, 1).contiguous().view(
                             5 + num_classes, batch * num_anchors * h * w)

    grid_x = torch.linspace(0, w - 1, w).repeat(h, 1).repeat(
        batch * num_anchors, 1, 1).view(batch * num_anchors * h * w)
    grid_y = torch.linspace(0, h - 1, h).repeat(w, 1).t().repeat(
        batch * num_anchors, 1, 1).view(batch * num_anchors * h * w)
    xs = torch.sigmoid(output[0]) + grid_x
    ys = torch.sigmoid(output[1]) + grid_y

    anchor_w = torch.Tensor(anchors).view(num_anchors,
                                          anchor_step).index_select(
                                              1, torch.LongTensor([0]))
    anchor_h = torch.Tensor(anchors).view(num_anchors,
                                          anchor_step).index_select(
                                              1, torch.LongTensor([1]))
    anchor_w = anchor_w.repeat(batch, 1).repeat(1, 1, h * w).view(
        batch * num_anchors * h * w)
    anchor_h = anchor_h.repeat(batch, 1).repeat(1, 1, h * w).view(
        batch * num_anchors * h * w)
    ws = torch.exp(output[2]) * anchor_w
    hs = torch.exp(output[3]) * anchor_h

    det_confs = torch.sigmoid(output[4])

    # cls_confs = torch.nn.Softmax()(Variable(output[5:5+num_classes].transpose(0,1))).data
    #print("==========",cls_confs.shape)
    #cls_confs=torch.sigmoid(cls_confs)

    print("==========", cls_confs.shape)
    cls_max_confs, cls_max_ids = torch.max(cls_confs, 1)  #33800L, 1L
    cls_max_confs = cls_max_confs.view(-1)
    cls_max_ids = cls_max_ids.view(-1)
    t1 = time.time()

    sz_hw = h * w
    sz_hwa = sz_hw * num_anchors
    det_confs = convert2cpu(det_confs)
    cls_max_confs = convert2cpu(cls_max_confs)
    cls_max_ids = convert2cpu_long(cls_max_ids)
    xs = convert2cpu(xs)
    ys = convert2cpu(ys)
    ws = convert2cpu(ws)
    hs = convert2cpu(hs)
    if validation:
        cls_confs = convert2cpu(cls_confs.view(-1, num_classes))
    t2 = time.time()
    #det_confs shape:[1,batch*num_anchors*h*w]
    #print(batch,h,w,det_confs.shape,cls_max_confs.shape,cls_confs.shape,num_classes)
    #(1280, 13, 13, (1081600L,), (1081600L,), (1081600L, 1L), 1)
    for b in range(batch):  #40
        boxes = []
        for cy in range(h):  #13
            for cx in range(w):  #13
                for i in range(num_anchors):  #5
                    ind = b * sz_hwa + i * sz_hw + cy * w + cx
                    det_conf = det_confs[ind]
                    if only_objectness:
                        conf = det_confs[ind]
                    else:  ##object confidence * cls confidence
                        conf = det_confs[ind] * cls_max_confs[ind]
                    #print(b,cy,cx,i,conf)
                    if conf > conf_thresh:
                        bcx = xs[ind]
                        bcy = ys[ind]
                        bw = ws[ind]
                        bh = hs[ind]
                        cls_max_conf = cls_max_confs[ind]
                        cls_max_id = cls_max_ids[ind]
                        box = [
                            bcx / w, bcy / h, bw / w, bh / h, det_conf,
                            cls_max_conf, cls_max_id
                        ]
                        if (not only_objectness) and validation:
                            for c in range(num_classes):
                                tmp_conf = cls_confs[ind][c]
                                if c != cls_max_id and tmp_conf > conf_thresh:  #det_confs[ind]*tmp_conf > conf_thresh:
                                    box.append(tmp_conf)
                                    box.append(c)
                        boxes.append(box)
            #print("====================================",len(boxes))
        all_boxes.append(boxes)
    t3 = time.time()
    if False:
        print('---------------------------------')
        print('matrix computation : %f' % (t1 - t0))
        print('        gpu to cpu : %f' % (t2 - t1))
        print('      boxes filter : %f' % (t3 - t2))
        print('---------------------------------')
    return all_boxes
Esempio n. 45
0
"""
Know more, visit 莫烦Python: https://morvanzhou.github.io/tutorials/
My Youtube Channel: https://www.youtube.com/user/MorvanZhou

Dependencies:
torch: 0.1.11
"""
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt

# fake data
x = torch.linspace(-5, 5, 200)  # x data (tensor), shape=(100, 1)
x = Variable(x)
x_np = x.data.numpy()

# following are popular activation functions
y_relu = F.relu(x).data.numpy()
y_sigmoid = F.sigmoid(x).data.numpy()
y_tanh = F.tanh(x).data.numpy()
y_softplus = F.softplus(x).data.numpy()
# y_softmax = F.softmax(x)  softmax is a special kind of activation function, it is about probability

# plt to visualize these activation function
plt.figure(1, figsize=(8, 6))
plt.subplot(221)
plt.plot(x_np, y_relu, c='red', label='relu')
plt.ylim((-1, 5))
plt.legend(loc='best')
Esempio n. 46
0
matplotlib
"""
import torch
import torch.utils.data as Data
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt

# torch.manual_seed(1)    # reproducible

LR = 0.01
BATCH_SIZE = 32
EPOCH = 12

# fake dataset
x = torch.unsqueeze(torch.linspace(-1, 1, 1000), dim=1)
y = x.pow(2) + 0.1*torch.normal(torch.zeros(*x.size()))

# plot dataset
plt.scatter(x.numpy(), y.numpy())
plt.show()

# put dateset into torch dataset
torch_dataset = Data.TensorDataset(data_tensor=x, target_tensor=y)
loader = Data.DataLoader(dataset=torch_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=2,)


# default network
class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
Esempio n. 47
0
G = torch.bmm(grid[:, :, 0, 1].unsqueeze(2), grid[:, 0, :, 0].unsqueeze(1))
G.size()
plt.imshow(G.squeeze())

'''
Grid created by Jason for cropping
'''

# the grid is created by an inner product across a linspace in x & y
N = 1
W, H = 128, 128
scale = 0.7

theta2 = torch.from_numpy(np.array([[0.0, 0.5, 0.2]])).type(torch.float32)

base_grid = [torch.linspace(-1, 1, W).expand(N, W),
             torch.linspace(-1, 1, H).expand(N, H)]
m_x = torch.max(torch.zeros_like(base_grid[0]),
                1 - torch.abs(theta2[:, 1].unsqueeze(1) - base_grid[0])).unsqueeze(2)
m_y = torch.max(torch.zeros_like(base_grid[1]),
                1 - torch.abs(theta2[:, 2].unsqueeze(1) - base_grid[1])).unsqueeze(1)

# the final mask is a batch-matrix-multiple of each inner product
# the dimensions expected here are m_x = [N, W, 1] & m_y = [N, 1, H]
M = torch.bmm(m_x, m_y)

M_clip = torch.clamp(M, M.max() * scale, M.max())

plt.rcParams['figure.figsize'] = 8, 8

fig, axis = plt.subplots(nrows=1, ncols=2)
Esempio n. 48
0
    def __init__(self,
                 image_size,
                 border_size,
                 input_nc=3,
                 output_nc=3,
                 ngf=64,
                 norm_layer=nn.BatchNorm2d,
                 use_dropout=False,
                 n_blocks=6,
                 padding_type='reflect'):
        assert (n_blocks >= 0)
        super(FDS, self).__init__()
        self.input_nc = input_nc
        self.output_nc = output_nc
        self.ngf = ngf
        if type(norm_layer) == functools.partial:
            use_bias = norm_layer.func == nn.InstanceNorm2d
        else:
            use_bias = norm_layer == nn.InstanceNorm2d

        # Encoder
        encoder = [
            nn.ReflectionPad2d(3),
            nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
            norm_layer(ngf),
            nn.ReLU(True)
        ]

        n_downsampling = 2
        for i in range(n_downsampling):
            mult = 2**i
            encoder += [
                nn.Conv2d(ngf * mult,
                          ngf * mult * 2,
                          kernel_size=3,
                          stride=2,
                          padding=1,
                          bias=use_bias),
                norm_layer(ngf * mult * 2),
                nn.ReLU(True)
            ]

        mult = 2**n_downsampling

        for i in range(int(n_blocks / 2)):
            encoder += [
                ResnetBlock(ngf * mult,
                            padding_type=padding_type,
                            norm_layer=norm_layer,
                            use_dropout=use_dropout,
                            use_bias=use_bias)
            ]

        # 1x1 conv to reduce dimensionality
        encoder += [
            nn.Conv2d(256, 16, kernel_size=1, padding=0),
            nn.BatchNorm2d(16)
        ]

        self.encoder = nn.Sequential(*encoder)

        # Decoder
        decoder = []
        decoder += [
            nn.Conv2d(16, 256, kernel_size=1, padding=0),
            nn.BatchNorm2d(256)
        ]

        for i in range(int(n_blocks / 2)):
            decoder += [
                ResnetBlock(ngf * mult,
                            padding_type=padding_type,
                            norm_layer=norm_layer,
                            use_dropout=use_dropout,
                            use_bias=use_bias)
            ]

        for i in range(n_downsampling):
            mult = 2**(n_downsampling - i)
            decoder += [  #nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),kernel_size=3, stride=2,padding=1, output_padding=1,bias=use_bias),
                nn.Upsample(scale_factor=2, mode='nearest'),
                nn.Conv2d(ngf * mult,
                          int(ngf * mult / 2),
                          kernel_size=3,
                          stride=1,
                          padding=1,
                          bias=use_bias),  # output_padding=1,
                norm_layer(int(ngf * mult / 2)),
                nn.ReLU(True)
            ]
        decoder += [nn.ReflectionPad2d(3)]
        decoder += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
        decoder += [nn.Tanh()]

        self.decoder = nn.Sequential(*decoder)

        self.drop_out = nn.Dropout(p=0.1)
        self.drop_out50 = nn.Dropout(p=0.5)

        # small grid to apply warp on the feature maps
        yy = torch.linspace(-1, 1, int(image_size[0] / 4))
        xx = torch.linspace(-1, 1, int(
            (image_size[1] + (border_size * 2)) / 4))
        grid_x, grid_y = torch.meshgrid(yy, xx)
        self.grid_small = torch.cat(
            [grid_y.unsqueeze(2), grid_x.unsqueeze(2)], dim=2).cuda()
Esempio n. 49
0
def test(model=None, args=None, params=None):
    if model is not None:
        device = next(model.parameters()).device
    else:
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        model = torch.load(os.path.join('weights', 'best.pt'),
                           device)['model'].float().fuse().eval()

    half = device.type != 'cpu'
    if half:
        model.half()

    model.eval()

    iou_v = torch.linspace(0.5, 0.95, 10).to(device)
    n_iou = iou_v.numel()

    file_names = []
    with open('../Dataset/Dubai/test.txt') as f:
        for file_name in f.readlines():
            file_names.append('../Dataset/Dubai/images/' + file_name.rstrip() +
                              '.jpg')
    loader, _ = input_fn(file_names, args.image_size, args.batch_size // 2,
                         model.head.stride.max(), names, params)

    seen = 0
    s = ('%10s' * 3) % ('precision', 'recall', 'mAP')
    p, r, f1, mp, mr, map50, mean_ap, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
    stats, ap, ap_class = [], [], []
    for img, targets, paths, shapes in tqdm.tqdm(loader, desc=s):
        img = img.to(device, non_blocking=True)
        img = img.half() if half else img.float()
        img /= 255.0
        targets = targets.to(device)
        nb, _, height, width = img.shape
        wh_wh = torch.Tensor([width, height, width, height]).to(device)

        with torch.no_grad():
            t = util.time_synchronized()
            inf_out, train_out = model(img)
            t0 += util.time_synchronized() - t
            t = util.time_synchronized()
            output = util.non_max_suppression(inf_out, 0.001)
            t1 += util.time_synchronized() - t

        for si, pred in enumerate(output):
            labels = targets[targets[:, 0] == si, 1:]
            nl = len(labels)
            t_cls = labels[:, 0].tolist() if nl else []
            seen += 1

            if len(pred) == 0:
                if nl:
                    stats.append((torch.zeros(0, n_iou, dtype=torch.bool),
                                  torch.Tensor(), torch.Tensor(), t_cls))
                continue

            pred_n = pred.clone()
            util.scale_coords(img[si].shape[1:], pred_n[:, :4], shapes[si][0],
                              shapes[si][1])
            correct = torch.zeros(pred.shape[0],
                                  n_iou,
                                  dtype=torch.bool,
                                  device=device)
            if nl:
                detected = []
                t_cls_tensor = labels[:, 0]
                t_box = util.wh2xy(labels[:, 1:5]) * wh_wh
                util.scale_coords(img[si].shape[1:], t_box, shapes[si][0],
                                  shapes[si][1])

                for cls in torch.unique(t_cls_tensor):
                    ti = (cls == t_cls_tensor).nonzero(as_tuple=False).view(-1)
                    pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1)

                    if pi.shape[0]:
                        iou_list, i = util.box_iou(pred_n[pi, :4],
                                                   t_box[ti]).max(1)

                        detected_set = set()
                        for j in (iou_list > iou_v[0]).nonzero(as_tuple=False):
                            d = ti[i[j]]
                            if d.item() not in detected_set:
                                detected_set.add(d.item())
                                detected.append(d)
                                correct[pi[j]] = iou_list[j] > iou_v
                                if len(detected) == nl:
                                    break

            stats.append(
                (correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), t_cls))

    stats = [numpy.concatenate(x, 0) for x in zip(*stats)]
    if len(stats) and stats[0].any():
        p, r, ap, f1, ap_class = util.ap_per_class(*stats)
        p, r, ap50, ap = p[:, 0], r[:, 0], ap[:, 0], ap.mean(1)
        mp, mr, map50, mean_ap = p.mean(), r.mean(), ap50.mean(), ap.mean()

    print('%10.3g' * 3 % (mp, mr, mean_ap))

    if model is None:
        t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1))
        s = f'Speed: {t[0]:.1f}/{t[1]:.1f}/{t[2]:.1f} ms inference/nms/total'
        print(
            f'{s} per {args.image_size}x{args.image_size} image at batch-size {args.batch_size}'
        )

    model.float()
    return mp, mr, map50, mean_ap
def read_sdf(file_path, target_grid_res, target_bounding_box_min,
             target_bounding_box_max, target_voxel_size):

    with open(file_path) as file:
        line = file.readline()

        # Get grid resolutions
        grid_res = line.split()
        grid_res_x = int(grid_res[0])
        grid_res_y = int(grid_res[1])
        grid_res_z = int(grid_res[2])

        # Get bounding box min
        line = file.readline()
        bounding_box_min = line.split()
        bounding_box_min_x = float(bounding_box_min[0])
        bounding_box_min_y = float(bounding_box_min[1])
        bounding_box_min_z = float(bounding_box_min[2])

        line = file.readline()
        voxel_size = float(line)

        # max bounding box (we need to plus 0.0001 to avoid round error)
        bounding_box_max_x = bounding_box_min_x + voxel_size * (grid_res_x - 1
                                                                )  # + 0.0001
        bounding_box_max_y = bounding_box_min_y + voxel_size * (grid_res_y - 1
                                                                )  #+ 0.0001
        bounding_box_max_z = bounding_box_min_z + voxel_size * (grid_res_z - 1
                                                                )  #+ 0.0001

        min_bounding_box_min = min(bounding_box_min_x, bounding_box_min_y,
                                   bounding_box_min_z)
        print(bounding_box_min_x, bounding_box_min_y, bounding_box_min_z)
        max_bounding_box_max = max(bounding_box_max_x, bounding_box_max_y,
                                   bounding_box_max_z)
        print(bounding_box_max_x, bounding_box_max_y, bounding_box_max_z)
        max_dist = max(bounding_box_max_x - bounding_box_min_x,
                       bounding_box_max_y - bounding_box_min_y,
                       bounding_box_max_z - bounding_box_min_z)

        max_grid_res = max(grid_res_x, grid_res_y, grid_res_z)

        grid = []
        for i in range(grid_res_x):
            grid.append([])
            for j in range(grid_res_y):
                grid[i].append([])
                for k in range(grid_res_z):
                    grid[i][j].append(2)

        for i in range(grid_res_z):
            for j in range(grid_res_y):
                for k in range(grid_res_x):
                    grid_value = float(file.readline())
                    grid[k][j][i] = grid_value

        grid = Tensor(grid)

        target_grid = Tensor(target_grid_res, target_grid_res, target_grid_res)

        linear_space_x = torch.linspace(0, target_grid_res - 1,
                                        target_grid_res)
        linear_space_y = torch.linspace(0, target_grid_res - 1,
                                        target_grid_res)
        linear_space_z = torch.linspace(0, target_grid_res - 1,
                                        target_grid_res)
        first_loop = linear_space_x.repeat(
            target_grid_res * target_grid_res,
            1).t().contiguous().view(-1).unsqueeze_(1)
        second_loop = linear_space_y.repeat(
            target_grid_res,
            target_grid_res).t().contiguous().view(-1).unsqueeze_(1)
        third_loop = linear_space_z.repeat(target_grid_res *
                                           target_grid_res).unsqueeze_(1)
        loop = torch.cat((first_loop, second_loop, third_loop), 1).cuda()

        min_x = Tensor([bounding_box_min_x]).repeat(
            target_grid_res * target_grid_res * target_grid_res, 1)
        min_y = Tensor([bounding_box_min_y]).repeat(
            target_grid_res * target_grid_res * target_grid_res, 1)
        min_z = Tensor([bounding_box_min_z]).repeat(
            target_grid_res * target_grid_res * target_grid_res, 1)
        bounding_min_matrix = torch.cat((min_x, min_y, min_z), 1)

        move_to_center_x = Tensor([
            (max_dist - (bounding_box_max_x - bounding_box_min_x)) / 2
        ]).repeat(target_grid_res * target_grid_res * target_grid_res, 1)
        move_to_center_y = Tensor([
            (max_dist - (bounding_box_max_y - bounding_box_min_y)) / 2
        ]).repeat(target_grid_res * target_grid_res * target_grid_res, 1)
        move_to_center_z = Tensor([
            (max_dist - (bounding_box_max_z - bounding_box_min_z)) / 2
        ]).repeat(target_grid_res * target_grid_res * target_grid_res, 1)
        move_to_center_matrix = torch.cat(
            (move_to_center_x, move_to_center_y, move_to_center_z), 1)

        # Get the position of the grid points in the refined grid
        points = bounding_min_matrix + target_voxel_size * max_dist / (
            target_bounding_box_max -
            target_bounding_box_min) * loop - move_to_center_matrix
        if points[(points[:, 0] < bounding_box_min_x)].shape[0] != 0:
            points[(points[:, 0] < bounding_box_min_x)] = Tensor(
                [bounding_box_max_x, bounding_box_max_y,
                 bounding_box_max_z]).view(1, 3)
        if points[(points[:, 1] < bounding_box_min_y)].shape[0] != 0:
            points[(points[:, 1] < bounding_box_min_y)] = Tensor(
                [bounding_box_max_x, bounding_box_min_y,
                 bounding_box_min_z]).view(1, 3)
        if points[(points[:, 2] < bounding_box_min_z)].shape[0] != 0:
            points[(points[:, 2] < bounding_box_min_z)] = Tensor(
                [bounding_box_max_x, bounding_box_min_y,
                 bounding_box_min_z]).view(1, 3)
        if points[(points[:, 0] > bounding_box_max_x)].shape[0] != 0:
            points[(points[:, 0] > bounding_box_max_x)] = Tensor(
                [bounding_box_max_x, bounding_box_min_y,
                 bounding_box_min_z]).view(1, 3)
        if points[(points[:, 1] > bounding_box_max_y)].shape[0] != 0:
            points[(points[:, 1] > bounding_box_max_y)] = Tensor(
                [bounding_box_max_x, bounding_box_min_y,
                 bounding_box_min_z]).view(1, 3)
        if points[(points[:, 2] > bounding_box_max_z)].shape[0] != 0:
            points[(points[:, 2] > bounding_box_max_z)] = Tensor(
                [bounding_box_max_x, bounding_box_min_y,
                 bounding_box_min_z]).view(1, 3)
        voxel_min_point_index_x = torch.floor(
            (points[:, 0].unsqueeze_(1) - min_x) /
            voxel_size).clamp(max=grid_res_x - 2)
        voxel_min_point_index_y = torch.floor(
            (points[:, 1].unsqueeze_(1) - min_y) /
            voxel_size).clamp(max=grid_res_y - 2)
        voxel_min_point_index_z = torch.floor(
            (points[:, 2].unsqueeze_(1) - min_z) /
            voxel_size).clamp(max=grid_res_z - 2)
        voxel_min_point_index = torch.cat(
            (voxel_min_point_index_x, voxel_min_point_index_y,
             voxel_min_point_index_z), 1)
        voxel_min_point = bounding_min_matrix + voxel_min_point_index * voxel_size

        # Compute the sdf value of the grid points in the refined grid
        target_grid = calculate_sdf_value(
            grid, points, voxel_min_point, voxel_min_point_index, voxel_size,
            grid_res_x, grid_res_y,
            grid_res_z).view(target_grid_res, target_grid_res, target_grid_res)
        return target_grid
Esempio n. 51
0
matplotlib
"""
import torch
import torch.utils.data as Data
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt

# torch.manual_seed(1)    # reproducible

LR = 0.01
BATCH_SIZE = 32
EPOCH = 12

# fake dataset
x = torch.unsqueeze(torch.linspace(-1, 1, 1000), dim=1)
y = x.pow(2) + 0.1 * torch.normal(torch.zeros(*x.size()))

# plot dataset
plt.scatter(x.numpy(), y.numpy())
plt.show()

# put dateset into torch dataset
torch_dataset = Data.TensorDataset(data_tensor=x, target_tensor=y)
loader = Data.DataLoader(
    dataset=torch_dataset,
    batch_size=BATCH_SIZE,
    shuffle=True,
    num_workers=2,
)
Esempio n. 52
0
def make_mesh():
    domain = torch.linspace(-2.5, 2.5, 50)
    z1, z2 = torch.meshgrid((domain, domain))
    z = torch.stack((z1, z2), dim=-1)
    return z1, z2, z
    c -= learning_rate * grad_c
    d -= learning_rate * grad_d
t1 = time.time()
print(f'Result: y = {a} + {b} x + {c} x^2 + {d} x^3')

y1 = np.array([a + b * x + c * x**2 + d * x**3 for x in x])

fig, axes = plot_make()
sns.scatterplot(x=x, y=y, ax=axes, linewidth=0, s=0.1)
sns.scatterplot(x=x, y=y1, ax=axes, linewidth=0, s=0.1)

#%%

dtype = torch.float
device = torch.device("cuda:0")
x = torch.linspace(-math.pi, math.pi, 2000, device=device, dtype=dtype)
y = torch.sin(x)

a = torch.randn((), device=device, dtype=dtype)
b = torch.randn((), device=device, dtype=dtype)
c = torch.randn((), device=device, dtype=dtype)
d = torch.randn((), device=device, dtype=dtype)

learning_rate = 1e-6

for t in range(2000):
    # Forward pass: compute predicted y
    y_pred = a + b * x + c * x**2 + d * x**3

    # Compute and print loss
    loss = (y_pred - y).pow(2).sum().item()
Esempio n. 54
0
 def __init__(self, reg_max=16):
     super(Integral, self).__init__()
     self.reg_max = reg_max
     self.register_buffer('project',
                          torch.linspace(0, self.reg_max, self.reg_max + 1))
# PyTorch分批数据
import torch
import torch.utils.data as Data #将数据分批次需要用到
    
# 设置随机数种子
torch.manual_seed(33)
#设置批次大小
BATCH_SIZE = 8 
 
x = torch.linspace(1, 16, 16)       # 1到16共16个点
y = torch.linspace(16, 1, 16)       # 65到1共16个点
 
print(x)
print(y)

#将x,y读取,转换成Tensor格式
torch_dataset = Data.TensorDataset(x, y) 
loader = Data.DataLoader(
    dataset=torch_dataset,      # torch TensorDataset format
    batch_size=BATCH_SIZE,      # 最新批数据
    shuffle=True,               # 是否随机打乱数据
    num_workers=2,              # 用于加载数据的子进程
)
Esempio n. 56
0
def make_fields(protein_dict,
                channels=['CA'],
                bin_size=2.0,
                num_bins=50,
                return_bins=False):
    """
    This function takes a protein dict (from load_pdb function) and outputs a
        large tensor containing many atomic "fields" for the protein.

    The fields describe the atomic "density" (an exponentially decaying function
        of number of atoms in a voxel) of any particular atom type.

    Parameters:
        protein_dict (dict, requred): dictionary from the load_pdb function

        channels (list-like, optional): the different atomic densities we want fields for
            theoretically these different fields provide different chemical information
            full list of available channels is in protein_dict['atom_type_set']

        bin_size (float, optional): the side-length (angstrom) of a given voxel in the box
            that atomic densities are placed in

        num_bins (int, optional): how big is the cubic field tensor side length
            (i.e., num_bins is box side length)


    Returns:
        dictionary: A list of atomic density tensors (50x50x50), one for each
            channel in channels
    """
    # sets of allowed filters to build channels with
    residue_filters = protein_dict['residue_set']
    atom_filters = protein_dict['atom_type_set']
    residue_property_filters = np.array(['acidic', 'basic', 'polar', 'nonpolar',\
                                         'charged', 'amphipathic'])
    other_filters = np.array(['backbone', 'sidechains'])

    # consolidate into one set of filters
    filter_set = {'atom':atom_filters, 'residue':residue_filters,\
                  'residue_property':residue_property_filters, 'other':other_filters}

    # construct a single empty field, then initialize a dictionary with one
    # empty field for every channel we are going to calculate the density for
    empty_field = torch.zeros((num_bins, num_bins, num_bins))
    fields = {channel: empty_field for channel in channels}

    # create linearly spaced grid (default is -49 to 49 in steps of 2)
    grid_1d = torch.linspace(start=-num_bins / 2 * bin_size + bin_size / 2,
                             end=num_bins / 2 * bin_size - bin_size / 2,
                             steps=num_bins)

    # This makes three 3D meshgrids in for the x, y, and z positions
    # These cubes will be flattened, then used as a reference coordinate system
    # to place the actual channel densities into
    xgrid, ygrid, zgrid = grid_positions(grid_1d)

    for channel_index, channel in enumerate(channels):

        # no illegal channels allowed, assume the channel sucks
        channel_allowed = check_channel(channel, filter_set)

        if channel_allowed:
            pass
        else:
            #err_string = 'Allowed channels are: in a protein\'s atom_type_set,
            # residue_set',or the \'sidechains\' and \'backbone\' channels.'
            raise ValueError('The channel ', channel,
                             ' is not allowed for this protein.')

        # Extract positions of atoms that are part of the current channel
        atom_positions = find_channel_atoms(channel, protein_dict, filter_set)
        # print('This is channel ', channel)
        atom_positions = torch.FloatTensor(atom_positions)

        # xgrid.view(-1, 1) is 125,000 long, because it's viewing a 50x50x50 cube in one column
        # then you repeat that column horizontally for each atom
        xx_xx = xgrid.view(-1, 1).repeat(1, len(atom_positions))
        yy_yy = ygrid.view(-1, 1).repeat(1, len(atom_positions))
        zz_zz = zgrid.view(-1, 1).repeat(1, len(atom_positions))
        # at this point we've created 3 arrays that are 125,000 long
        # and as wide as the number of atoms that are the current channel type
        # these 3 arrays just contain the flattened x,y,z positions of our 50x50x50 box

        # now do the same thing as above, just with the ACTUAL atomic position data
        posx_posx = atom_positions[:, 0].contiguous().view(1, -1).repeat(
            len(xgrid.view(-1)), 1)
        posy_posy = atom_positions[:, 1].contiguous().view(1, -1).repeat(
            len(ygrid.view(-1)), 1)
        posz_posz = atom_positions[:, 2].contiguous().view(1, -1).repeat(
            len(zgrid.view(-1)), 1)
        # three tensors of the same size, with actual atomic coordinates

        # normalizes the atomic positions with respect to the center of the box
        # and calculates density of atoms in each voxel
        sigma = 0.5 * bin_size
        density = torch.exp(-((xx_xx - posx_posx)**2 + (yy_yy - posy_posy)**2 +
                              (zz_zz - posz_posz)**2) / (2 * (sigma)**2))

        # Normalize so each atom density sums to one
        density /= torch.sum(density, dim=0)

        # Sum densities and reshape to original shape
        sum_densities = torch.sum(density, dim=1).view(xgrid.shape)

        # set all nans to 0
        sum_densities[sum_densities != sum_densities] = 0

        # add two empty dimmensions to make it 1x1x50x50x50, needed for CNN
        # sum_densities = sum_densities.unsqueeze(0)
        # sum_densities = sum_densities.unsqueeze(0)

        #fields[atom_type_index] = sum_densities
        fields[channel] = sum_densities.numpy()

    if return_bins:
        return fields, num_bins
    else:
        return fields
Esempio n. 57
0
Dependencies:
torch: 0.1.11
matplotlib
"""
import torch
from torch.autograd import Variable
import matplotlib.pyplot as plt

# torch.manual_seed(1)    # reproducible

N_SAMPLES = 20
N_HIDDEN = 300

# training data
x = torch.unsqueeze(torch.linspace(-1, 1, N_SAMPLES), 1)
y = x + 0.3*torch.normal(torch.zeros(N_SAMPLES, 1), torch.ones(N_SAMPLES, 1))
x, y = Variable(x), Variable(y)

# test data
test_x = torch.unsqueeze(torch.linspace(-1, 1, N_SAMPLES), 1)
test_y = test_x + 0.3*torch.normal(torch.zeros(N_SAMPLES, 1), torch.ones(N_SAMPLES, 1))
test_x, test_y = Variable(test_x, volatile=True), Variable(test_y, volatile=True)

# show data
plt.scatter(x.data.numpy(), y.data.numpy(), c='magenta', s=50, alpha=0.5, label='train')
plt.scatter(test_x.data.numpy(), test_y.data.numpy(), c='cyan', s=50, alpha=0.5, label='test')
plt.legend(loc='upper left')
plt.ylim((-2.5, 2.5))
plt.show()
Esempio n. 58
0
def get_region_boxes(output,
                     netshape,
                     conf_thresh,
                     num_classes,
                     anchors,
                     num_anchors,
                     only_objectness=1,
                     validation=False,
                     use_cuda=True):
    device = torch.device("cuda" if use_cuda else "cpu")
    anchors = anchors.to(device)
    anchor_step = anchors.size(0) // num_anchors
    if output.dim() == 3:
        output = output.unsqueeze(0)
    batch = output.size(0)
    assert (output.size(1) == (5 + num_classes) * num_anchors)
    h = output.size(2)
    w = output.size(3)
    cls_anchor_dim = batch * num_anchors * h * w
    if netshape[0] != 0:
        nw, nh = netshape
    else:
        nw, nh = w, h

    t0 = time.time()
    all_boxes = []
    output = output.view(batch * num_anchors, 5 + num_classes,
                         h * w).transpose(0, 1).contiguous().view(
                             5 + num_classes, cls_anchor_dim)

    grid_x = torch.linspace(0, w - 1,
                            w).repeat(batch * num_anchors, h,
                                      1).view(cls_anchor_dim).to(device)
    grid_y = torch.linspace(0, h - 1, h).repeat(w, 1).t().repeat(
        batch * num_anchors, 1, 1).view(cls_anchor_dim).to(device)
    ix = torch.LongTensor(range(0, 2)).to(device)
    anchor_w = anchors.view(num_anchors,
                            anchor_step).index_select(1, ix[0]).repeat(
                                batch, h * w).view(cls_anchor_dim)
    anchor_h = anchors.view(num_anchors,
                            anchor_step).index_select(1, ix[1]).repeat(
                                batch, h * w).view(cls_anchor_dim)

    xs, ys = output[0].sigmoid() + grid_x, output[1].sigmoid() + grid_y
    ws, hs = output[2].exp() * anchor_w.detach(), output[3].exp(
    ) * anchor_h.detach()
    det_confs = output[4].sigmoid()

    # by ysyun, dim=1 means input is 2D or even dimension else dim=0
    cls_confs = torch.nn.Softmax(dim=1)(output[5:5 + num_classes].transpose(
        0, 1)).detach()
    cls_max_confs, cls_max_ids = torch.max(cls_confs, 1)
    cls_max_confs = cls_max_confs.view(-1)
    cls_max_ids = cls_max_ids.view(-1)
    t1 = time.time()

    sz_hw = h * w
    sz_hwa = sz_hw * num_anchors
    det_confs = convert2cpu(det_confs)
    cls_max_confs = convert2cpu(cls_max_confs)
    cls_max_ids = convert2cpu_long(cls_max_ids)
    xs, ys = convert2cpu(xs), convert2cpu(ys)
    ws, hs = convert2cpu(ws), convert2cpu(hs)
    if validation:
        cls_confs = convert2cpu(cls_confs.view(-1, num_classes))

    t2 = time.time()
    for b in range(batch):
        boxes = []
        for cy in range(h):
            for cx in range(w):
                for i in range(num_anchors):
                    ind = b * sz_hwa + i * sz_hw + cy * w + cx
                    det_conf = det_confs[ind]
                    conf = det_conf * (cls_max_confs[ind]
                                       if not only_objectness else 1.0)

                    if conf > conf_thresh:
                        bcx = xs[ind]
                        bcy = ys[ind]
                        bw = ws[ind]
                        bh = hs[ind]
                        cls_max_conf = cls_max_confs[ind]
                        cls_max_id = cls_max_ids[ind]
                        box = [
                            bcx / w, bcy / h, bw / nw, bh / nh, det_conf,
                            cls_max_conf, cls_max_id
                        ]
                        if (not only_objectness) and validation:
                            for c in range(num_classes):
                                tmp_conf = cls_confs[ind][c]
                                if c != cls_max_id and det_confs[
                                        ind] * tmp_conf > conf_thresh:
                                    box.append(tmp_conf)
                                    box.append(c)
                        boxes.append(box)
        all_boxes.append(boxes)
    t3 = time.time()
    if False:
        print('---------------------------------')
        print('matrix computation : %f' % (t1 - t0))
        print('        gpu to cpu : %f' % (t2 - t1))
        print('      boxes filter : %f' % (t3 - t2))
        print('---------------------------------')
    return all_boxes
Esempio n. 59
0
"""
View more, visit my tutorial page: https://morvanzhou.github.io/tutorials/
My Youtube Channel: https://www.youtube.com/user/MorvanZhou

Dependencies:
torch: 0.1.11
matplotlib
"""
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt

# fake data
x = torch.linspace(-5, 5, 200)  # x data (tensor), shape=(100, 1)
x = Variable(x)
x_np = x.data.numpy()   # numpy array for plotting

# following are popular activation functions
y_relu = F.relu(x).data.numpy()
y_sigmoid = F.sigmoid(x).data.numpy()
y_tanh = F.tanh(x).data.numpy()
y_softplus = F.softplus(x).data.numpy()
# y_softmax = F.softmax(x)  softmax is a special kind of activation function, it is about probability


# plt to visualize these activation function
plt.figure(1, figsize=(8, 6))
plt.subplot(221)
plt.plot(x_np, y_relu, c='red', label='relu')
plt.ylim((-1, 5))
Esempio n. 60
0
import torch
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
from model import AE
from calc_uncertainty import uncertainty
import random
import math

### Make meshgrid
end, d = 5, 101

x1 = torch.linspace(0, end, d)
y1 = torch.linspace(0, end, d)
xx, yy = np.meshgrid(np.arange(0, end, d), np.arange(0, end, d))
uncertainty_data = torch.from_numpy(np.dstack(np.meshgrid(x1, y1)))

### Make training data
z = random.randint(1, 10)
ind_x = list(range(z + 80, z + 90, 1))
# ind_y = [int((a-50)*(a-50)*0.2 + a + 1) for a in ind_x]
ind_y = [int(-1 * a + 101)
         for a in ind_x]  # Change these things to make shape of training data
index = [a + b * d for (a, b) in zip(ind_x, ind_y)]
print(ind_x, ind_y, index)
# index =  torch.randperm(d*d)[:20] --> If you want to select random location for training data
index = torch.randperm(d * d)[:16]

uncertainty_data = uncertainty_data.reshape((d * d, 2))
train_data = uncertainty_data[index, :]