def get_pred_ear_figure(self, ear_true, labels, n_images=6):
     ear_true = ear_true.to(self.device)
     # run prediction
     self.eval()
     with torch.no_grad():
         ear_pred = self.forward(ear_true)
     self.train()
     img_true = torch.dstack(ear_true[:n_images].unbind())
     img_pred = torch.dstack(ear_pred[:n_images].unbind())
     img = torch.hstack((img_true, img_pred))
     return img
 def get_pred_ear_figure(self, ear_true, ear_pred, n_cols=8):
     bs = ear_true.shape[0]
     n_rows = max(bs, 1) // n_cols
     imgs = []
     for i in range(n_rows):
         sl = slice(i * n_cols, min((i + 1) * n_cols, bs))
         img_true = torch.dstack(ear_true[sl].unbind())
         img_pred = torch.dstack(ear_pred[sl].unbind())
         img = torch.hstack((img_true, img_pred))
         imgs.append(img)
     img = torch.hstack(imgs)
     return img
Exemplo n.º 3
0
def contour_plot(xmin, xmax, ymin, ymax, pred_fxn, ngrid=33):
    """
    make a contour plot without
    @param xmin: lowest value of x in the plot
    @param xmax: highest value of x in the plot
    @param ymin: ditto for y
    @param ymax: ditto for y
    @param pred_fxn: prediction function that takes an (n x d) tensor as input
                     and returns an (n x 1) tensor of predictions as output
    @param ngrid: number of points to use in contour plot per axis
    """
    # Build grid
    xgrid = torch.linspace(xmin, xmax, ngrid)
    ygrid = torch.linspace(ymin, ymax, ngrid)
    (xx, yy) = torch.meshgrid(xgrid, ygrid)

    # Get predictions
    features = torch.dstack((xx, yy)).reshape(-1, 2)
    predictions = pred_fxn(features)

    # Arrange predictions into grid and plot
    zz = predictions.reshape(xx.shape)
    C = plt.contour(xx, yy, zz, cmap='rainbow')
    plt.clabel(C)
    #plt.show()

    return plt.gcf()
Exemplo n.º 4
0
 def tensor_indexing_ops(self):
     x = torch.randn(2, 4)
     y = torch.randn(2, 4, 2)
     t = torch.tensor([[0, 0], [1, 0]])
     mask = x.ge(0.5)
     i = [0, 1]
     return (
         torch.cat((x, x, x), 0),
         torch.concat((x, x, x), 0),
         torch.conj(x),
         torch.chunk(x, 2),
         torch.dsplit(y, i),
         torch.column_stack((x, x)),
         torch.dstack((x, x)),
         torch.gather(x, 0, t),
         torch.hsplit(x, i),
         torch.hstack((x, x)),
         torch.index_select(x, 0, torch.tensor([0, 1])),
         torch.masked_select(x, mask),
         torch.movedim(x, 1, 0),
         torch.moveaxis(x, 1, 0),
         torch.narrow(x, 0, 0, 2),
         torch.nonzero(x),
         torch.permute(x, (0, 1)),
         torch.reshape(x, (-1, )),
     )
Exemplo n.º 5
0
 def tensor_indexing_ops(self):
     x = torch.randn(2, 4)
     y = torch.randn(4, 4)
     t = torch.tensor([[0, 0], [1, 0]])
     mask = x.ge(0.5)
     i = [0, 1]
     return len(
         torch.cat((x, x, x), 0),
         torch.concat((x, x, x), 0),
         torch.conj(x),
         torch.chunk(x, 2),
         torch.dsplit(torch.randn(2, 2, 4), i),
         torch.column_stack((x, x)),
         torch.dstack((x, x)),
         torch.gather(x, 0, t),
         torch.hsplit(x, i),
         torch.hstack((x, x)),
         torch.index_select(x, 0, torch.tensor([0, 1])),
         x.index(t),
         torch.masked_select(x, mask),
         torch.movedim(x, 1, 0),
         torch.moveaxis(x, 1, 0),
         torch.narrow(x, 0, 0, 2),
         torch.nonzero(x),
         torch.permute(x, (0, 1)),
         torch.reshape(x, (-1, )),
         torch.row_stack((x, x)),
         torch.select(x, 0, 0),
         torch.scatter(x, 0, t, x),
         x.scatter(0, t, x.clone()),
         torch.diagonal_scatter(y, torch.ones(4)),
         torch.select_scatter(y, torch.ones(4), 0, 0),
         torch.slice_scatter(x, x),
         torch.scatter_add(x, 0, t, x),
         x.scatter_(0, t, y),
         x.scatter_add_(0, t, y),
         # torch.scatter_reduce(x, 0, t, reduce="sum"),
         torch.split(x, 1),
         torch.squeeze(x, 0),
         torch.stack([x, x]),
         torch.swapaxes(x, 0, 1),
         torch.swapdims(x, 0, 1),
         torch.t(x),
         torch.take(x, t),
         torch.take_along_dim(x, torch.argmax(x)),
         torch.tensor_split(x, 1),
         torch.tensor_split(x, [0, 1]),
         torch.tile(x, (2, 2)),
         torch.transpose(x, 0, 1),
         torch.unbind(x),
         torch.unsqueeze(x, -1),
         torch.vsplit(x, i),
         torch.vstack((x, x)),
         torch.where(x),
         torch.where(t > 0, t, 0),
         torch.where(t > 0, t, t),
     )
Exemplo n.º 6
0
    def _first_stage(self, imgs: torch.Tensor):
        with EvalScope(self.pNet):
            _, c, h, w = imgs.shape

            scale = 12.0 / self.minSize  # This is initial scale
            min_l = min(h, w)

            b, s, i = [], [], []

            while min_l * scale >= 12.:
                imgs = _nnf.interpolate(imgs,
                                        size=[int(h * scale),
                                              int(w * scale)],
                                        mode='area')
                reg, pro = self.pNet(imgs)

                pro = pro[:, 1]

                strd = 2. / scale
                cell = 12. / scale

                msk = torch.ge(pro, self.pNetThreshold)  # b, h, w

                if msk.any():
                    indices = msk.nonzero()  # n, 3 <- (i, y, x)
                    idx, r, c = indices[:, 0], indices[:, 1], indices[:, 2]
                    pro = pro[msk]

                    reg = reg.permute(0, 2, 3,
                                      1)  # b, h, w, c <- (x1^, y1^, x2^, y2^)
                    reg = reg[msk]

                    x1, y1 = c * strd, r * strd
                    x2, y2 = x1 + cell, y1 + cell

                    bbs = torch.dstack([x1, y1, x2, y2]).squeeze(0)
                    bbs = self._bb_reg(bbs, reg)
                    nms_idx = batched_nms(bbs, pro, idx, self.nmsThreshold)

                    b.append(bbs[nms_idx])
                    s.append(pro[nms_idx])
                    i.append(idx[nms_idx])

                scale = scale * self.factor

            if len(b) > 0:
                b = torch.cat(b, dim=0)
                s = torch.cat(s, dim=0)
                i = torch.cat(i, dim=0)

                nms_idx = batched_nms(b, s, i, self.nmsThreshold)
                b = clip_boxes_to_image(b[nms_idx], size=(w, h)).int()
                i = i[nms_idx]

                return b, i
            else:
                return None
Exemplo n.º 7
0
    def _bb_reg(bbs: torch.Tensor, reg: torch.Tensor) -> torch.Tensor:
        w = bbs[:, 2] - bbs[:, 0]
        h = bbs[:, 3] - bbs[:, 1]

        x1 = bbs[:, 0] + w * reg[:, 0]
        y1 = bbs[:, 1] + h * reg[:, 1]
        x2 = bbs[:, 2] + w * reg[:, 2]
        y2 = bbs[:, 3] + h * reg[:, 3]

        return torch.dstack([x1, y1, x2, y2]).squeeze(0)
def save_tensor_image(img, name, de_normalize=True):

    if de_normalize:
        img = torch.round(
            torch.dstack((scale(img[0]), scale(img[1]),
                          scale(img[2])))).type(torch.uint8)
        img = Image.fromarray(img.cpu().detach().numpy())
        img.save(name)
    else:
        save_image(img, name)
Exemplo n.º 9
0
 def do_generate(cx, cy, width, height, xx, yy):
     distr = tdist.MultivariateNormal(
         torch.tensor([cy, cx], dtype=torch.float),
         torch.tensor(
             [[variance_func(height), 0], [0, variance_func(width)]],
             dtype=torch.float))
     log_probs = distr.log_prob(torch.dstack((yy, xx)))
     probs = torch.exp(log_probs)
     normalized_probs = probs / torch.max(probs) * peak
     return normalized_probs
Exemplo n.º 10
0
def build_diag_block(blk_list):
    """Build a block diagonal Tensor from a list of Tensors."""
    if blk_list[0].ndim == 2:
        return torch.block_diag(*blk_list)
    elif blk_list[0].ndim == 3:
        blks = []
        for idx_ant in range(blk_list[0].shape[-1]):
            blks_per_ant = []
            for idx_link in range(len(blk_list)):
                blks_per_ant.append(blk_list[idx_link][:, :, idx_ant])
            blks.append(torch.block_diag(*blks_per_ant))
        return torch.dstack(blks)
    else:
        raise Exception("Invalid input dimension")
Exemplo n.º 11
0
    def forward(self, preds: Dict[str, torch.Tensor]) -> Dict[str, Any]:
        predictions = preds[self.predictions_key]
        if self.bool2str is not None:
            predictions = predictions.to(dtype=torch.int32)
            predictions = [
                self.bool2str.get(pred, self.bool2str[0])
                for pred in predictions
            ]

        probs = preds[self.probabilities_key]
        probs = torch.dstack(1 - probs, probs)

        return {
            self.predictions_key: predictions,
            self.probabilities_key: probs,
        }
Exemplo n.º 12
0
    def construct_first_embedding(self, date_time):
        # For now only use one attr to construct embedding

        # Get window for embedding
        embedding_window = pd.date_range(
            date_time - pd.Timedelta(cons.EMB_WINDOW, unit="d"), date_time)
        df = self.df_data[self.df_data["date_time"].isin(
            embedding_window)].copy()
        df.set_index("date_time", inplace=True)
        df.sort_index(inplace=True, ascending=False)

        tensors = []
        for node_attr in self.node_attrs:
            node_attrs = []
            for d in df.index:
                attrs = df.at[d, node_attr]
                node_attrs.append(attrs)

            tensor = torch.tensor(node_attrs)
            tensors.append(tensor)

        return torch.dstack(tensors)
Exemplo n.º 13
0
    def forward(self,
                token_ids,
                token_starts,
                sents_other_feats,
                lm_lengths=None,
                labels=None):

        batch_size = token_ids.size()[0]
        pad_size = token_ids.size()[1]
        # print("batch size",batch_size,"\t\tpad_size",pad_size)
        model_mask = (token_ids != 0).int().to(self.device)
        output = self.model(token_ids.long(), attention_mask=model_mask)

        # Concatenating hidden dimensions of all encoder layers
        model_out = output[-1][0]
        for layers in range(1, self.config.num_hidden_layers + 1, 1):
            model_out = torch.cat((model_out, output[-1][layers]), dim=2)

        # model_out:[batch_size, seq_len, ]
        # Concatenating other features
        if args.add_features == 1:
            model_out = torch.dstack((model_out, sents_other_feats[:, :, 0:2]))
        elif args.add_features == 2:
            model_out = torch.dstack((model_out, sents_other_feats))
        # Fully connected layers with relu and dropouts in between
        pred_logits = torch.relu(self.fc1(self.dropout(model_out).float()))
        pred_logits = torch.relu(self.fc2(self.dropout(pred_logits)))
        pred_logits = torch.sigmoid(self.fc3(self.dropout(pred_logits)))
        pred_logits = torch.squeeze(pred_logits, 2)

        pred_labels = torch.tensor(np.zeros(token_starts.size()),
                                   dtype=torch.float64).to(self.device)
        for b in range(batch_size):
            for w in range(pad_size):
                if (token_starts[b][w] != 0):
                    if (token_starts[b][w] >= pad_size):
                        print(token_starts[b])
                    else:
                        st = token_starts[b][w]
                        end = token_starts[b][w + 1]
                        if (end == 0):
                            end = st + 1
                            while (model_mask[b][end] != 0):
                                end = end + 1
                        # For using average or just the first token of a word (in case of word splitting by tokenizer)
                        # pred_labels[b][w] = self.avg(pred_logits[b],st,end)
                        pred_labels[b][w] = pred_logits[b][token_starts[b][w]]

        if (labels != None):
            lm_lengths, lm_sort_ind = lm_lengths.sort(dim=0, descending=True)
            scores = labels[lm_sort_ind]
            targets = pred_labels[lm_sort_ind]
            scores = pack_padded_sequence(scores,
                                          lm_lengths.cpu().int(),
                                          batch_first=True).data
            targets = pack_padded_sequence(targets,
                                           lm_lengths.cpu().int(),
                                           batch_first=True).data
            # print(targets,scores)
            loss_fn = nn.BCELoss()
            loss = loss_fn(targets.float(), scores.float())

            return loss, pred_labels
        else:
            return 0.0, pred_labels
Exemplo n.º 14
0
w0_train, w1_train = tuple(
    torch.stack(weights).T)  # Weight vectors from each epoch
x_bias_t = torch.t(x_train_bias)
w_opt = (torch.inverse(x_bias_t.mm(x_train_bias)).mm(
    x_train_bias.T)).mm(y_train_n)
best_w0, best_w1 = w_opt.T[0]  # Weights from analitical solution
padding = 1

w0 = torch.linspace(best_w0 - padding, best_w0 + padding, 100)
w1 = torch.linspace(best_w1 - padding, best_w1 + padding, 100)

# create w0 and w1 grid
w0_grid, w1_grid = torch.meshgrid(w0, w1)
# calculate J
w = torch.dstack((w0_grid, w1_grid))  # 100x100x2
x_train = x_train_bias  # 50x2 => 100x50x2
y_train = y_train_n  # 50x1 => 100x50x100
y_pred = torch.matmul(x_train,
                      torch.swapaxes(w, 1, 2))  # 50x2 * 100x2x100 = 100x100x50
J = ((y_pred - y_train)**2).mean(1)

external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']

app = dash.Dash(__name__, external_stylesheets=external_stylesheets)

df = pd.read_csv('https://plotly.github.io/datasets/country_indicators.csv')

available_indicators = df['Indicator Name'].unique()

app.layout = html.Div([
Exemplo n.º 15
0
        act2.append(sample[0])
        act3.append(sample[1])
        act3up.append(sample[2])
        act4.append(sample[3])
        images.append(sample[4])

    act2 = torch.stack(act2)
    act3 = torch.stack(act3)
    act3up = torch.stack(act3up)
    act4 = torch.stack(act4)
    images = torch.stack(images)

    tact2, q2 = threshold(act2)
    tact3, q3 = threshold(act3)
    tact3up, q3up = threshold(act3up)
    tact4, q4 = threshold(act4)

    qt = torch.dstack((q2, q3, q3up, q4))[0]
    torch.save(qt, './static/qt.pt')

    iou2_3 = iou(tact2, tact3)
    torch.save(iou2_3, './static/tensor2_3.pt')

    iou3_4 = iou(tact3up, tact4)
    torch.save(iou3_4, './static/tensor3_4.pt')

    images = postprocess(images)
    for i in range(0, n_samples):
        im = Image.fromarray(images[i], "RGB")
        im.save('./static/image' + str(i) + '.jpeg')
Exemplo n.º 16
0
def main(_):
    writer = SummaryWriter(log_dir=opts.tb_log_dir + str(opts.alpha)  + '/' + opts.exp_name)

    torch.manual_seed(0)
    if opts.category in ['horse', 'tiger']:
        dataset = tf_final.TigDogDataset_Final(opts.root_dir, opts.category, transforms=None, normalize=False,
                                               max_length=None, remove_neck_kp=False, split='train',
                                               img_size=opts.img_size, mirror=False, scale=False, crop=False)

        collate_fn = tf_final.TigDog_collate

    directory = opts.tmp_dir + '/' + opts.category + '/'
    if not osp.exists(directory):
        os.makedirs(directory)

    save_counter = 0
    sample_to_vid = {}
    samples_per_vid = {}
    print('Number of videos for ', opts.category, '-', len(dataset))
    i_sample = 0
    for i_sample, sample in enumerate(dataset):
        num_frames = sample['video'].shape[0]
        for i in range(num_frames):
            new_sample = {}
            for k in sample.keys():
                if k in ['video', 'sfm_poses', 'landmarks', 'segmentations', 'bboxes']:
                    new_sample[k] = sample[k][i]

            pkl.dump(new_sample, open(directory + str(save_counter) + '.pkl', 'wb'))
            sample_to_vid[save_counter] = i_sample
            if i_sample in samples_per_vid:
                samples_per_vid[i_sample].append(save_counter)
            else:
                samples_per_vid[i_sample] = [save_counter]
            save_counter += 1
           # if i >= 5:  # 35:  # TODO:fix this
               # break
        #if i_sample >= 3:  # TODO:fix this
           # break

    training_samples = save_counter
    print('Training samples (frames):', training_samples)
    dataset = tigdog_mf.TigDogDataset_MultiFrame(opts.tmp_dir, opts.category, num_frames=opts.num_frames,
                                                 sample_to_vid=sample_to_vid,
                                                 samples_per_vid=samples_per_vid,
                                                 normalize=True, transforms=True,
                                                 remove_neck_kp=True, split='train', img_size=opts.img_size,
                                                 mirror=True, scale=True, crop=True, v2_crop=True, tight_bboxes=True)
    collate_fn = tigdog_mf.TigDog_collate

    dataloader = DataLoader(dataset, opts.batch_size, drop_last=True, shuffle=True,
                            collate_fn=collate_fn, num_workers=2)
    print('Dataloader:', len(dataloader))

    keypoint_model = UNet(opts.num_kps).cuda()
    reconstruct_model = UNet_Reconstruct(3, opts.num_kps).cuda()
    loss_fn_alex = lpips.LPIPS(net='alex').cuda()
    optimizer = optim.Adam(list(keypoint_model.parameters()) + list(reconstruct_model.parameters()), lr=opts.lr, weight_decay=opts.wd)
    std = opts.std
    n_iter = 0
    affine = RandomAffine(degrees=5, shear=(0.0,0.5))
    for epoch in range(opts.epochs):
        avg_loss = 0
        for sample in dataloader:
            input_img_tensor = sample['img'].type(torch.FloatTensor).clone().cuda()
            mask_3channels = torch.unsqueeze(sample['mask'], 2)
            mask_3channels = mask_3channels.repeat(1,1,3,1,1).clone().cuda()
            frame1 = input_img_tensor[:, 0] * mask_3channels[:,0]
            frame2 = input_img_tensor[:, 1] * mask_3channels[:,1]
            source = frame1
            target = frame2
            target_outputs = keypoint_model(target)
            result_x, result_y = xy_outputs(target_outputs, scaling=True, scale=16)
            result_kps = torch.cat([result_x, result_y], dim=1)
            result_kps_vis = torch.stack([result_x, result_y, torch.ones_like(result_y)], dim=-1)
            reconstruct = reconstruct_model(source, result_kps)
            target_mask = sample['mask'][:,1]
            mask_edt = np.stack([compute_dt(m) for m in target_mask])
            result_kps_xy = torch.dstack((result_x, result_y))
            edts_barrier = torch.tensor(mask_edt).float().unsqueeze(1).cuda()
            loss_mask = texture_dt_loss_v(result_kps_xy, edts_barrier)
            loss_reconstruction = loss_fn_alex.forward(reconstruct, change_range(target)).mean()
            loss = loss_reconstruction + (opts.alpha * loss_mask)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if n_iter % opts.vis_every == 0:
                kp_img = utils.kp2im(result_kps_vis[0].detach().cpu().numpy(), target[0].cpu().numpy(), radius=2) / 255
                kp_img = torch.from_numpy(kp_img).permute(2, 0, 1)[None]
                kp_img = kp_img.to(source.device)
                kp_mask = utils.kp2im(result_kps_vis[0].detach().cpu().numpy(), mask_3channels[0,1].cpu().numpy())
                kp_mask = torch.from_numpy(kp_mask).permute(2, 0, 1)[None]
                kp_mask = kp_mask.to(source.device)
                grid = torch.cat([source[:1], target[:1], kp_img[:1], kp_mask, change_range(reconstruct[:1], to_01=True)], dim=3)[0]
                writer.add_image('iter {n} of image (reconstruction, mask, loss) = ({r},{m},{l})   '.format(r=loss_reconstruction,m=loss_mask,l=loss,n=str(n_iter)), grid, n_iter)
            avg_loss += loss.item()
            writer.add_scalar('Loss/train std : ' + str(opts.std), loss, n_iter)
            n_iter += 1
        avg_loss = avg_loss / len(dataloader)
        print('Epoch ', epoch, ' average loss ', avg_loss)
    torch.save({
        'keypoint_state_dict' : keypoint_model.state_dict(),
        'reconstruct_state_dict' : reconstruct_model.state_dict()
        }, opts.model_state_dir + str(opts.alpha))
    writer.close()
Exemplo n.º 17
0
        PRUN_GOAL_STD = 25
        PRUN_K_FROM_STD = 1
        PRUN_MAX_ITER = 5

    HPARAM = DEFAULT_POSE_HPARAM()

    # The code below is a simplified version of a real unit vector problem
    hv_layer = HoughVotingLayer(HPARAM)

    # Creating test mask
    mask = torch.ones((5, 5))
    mask[0, :] = 0
    mask[-1, :] = 0
    mask[:, 0] = 0
    mask[:, -1] = 0

    # Selecting center
    h, w = mask.shape
    center = torch.tensor([2, 2])

    # Creating unit vectors
    x_coord = torch.remainder(torch.arange(w * h), w).reshape((h, w)).float()
    y_coord = torch.remainder(torch.arange(w * h), h).reshape((w, h)).float().T
    coord = torch.dstack([y_coord, x_coord])
    diff_norm = torch.norm(center - coord, dim=-1)
    vector = torch.divide((center - coord), torch.unsqueeze(diff_norm, dim=-1))
    vector = vector.permute(2, 0, 1)

    # Determine the center
    center = hv_layer.forward(vector, mask)
    print(f'Center: {center}')