Exemple #1
0
    def _optimize_q(self, start_states, actions, end_states, rewards, dones):
        with torch.no_grad():
            targets = rewards + self.gamma * (1 - dones) * self.q_target(
                torch.hstack([end_states, self.policy_target(end_states)])
            )

        estimates = self.q(torch.hstack([start_states, actions]))
        loss = F.smooth_l1_loss(estimates, targets)

        self.q_optimizer.zero_grad()
        loss.backward()
        self.q_optimizer.step()

        return loss.item()
Exemple #2
0
def get_image_grad(instances, transfunc, transinst, transtype, eps=1e-1):

    h = instances.size()[-2]
    w = instances.size()[-1]
    assert h == w
    mask = torch.zeros((h, w))
    for i in range(h):
        wl = math.ceil((w - 1) / (2.0) - math.sqrt(((w - 1) / 2.0)**2 -
                                                   (i - (h - 1) / 2.0)**2))
        wr = math.trunc((w - 1) / (2.0) + math.sqrt(((w - 1) / 2.0)**2 -
                                                    (i - (h - 1) / 2.0)**2))
        mask[i][wl:wr + 1] = 1
    mask = mask.cuda()
    mask.unsqueeze_(0).unsqueeze_(0)

    if transtype in [
            'gaussian', 'rotation', 'scaling', 'rotation-brightness',
            'scaling-brightness', 'rotation-brightness-l2',
            'scaling-brightness-l2'
    ]:
        # non-differentiable
        instances = instances.cpu().contiguous()
        deltas = torch.zeros_like(instances)
        for i in range(instances.size()[0]):
            deltas[i] = transfunc(transinst, instances[i], eps) - instances[i]
        deltas /= eps
        deltas = deltas.cuda()
    if transtype == 'gaussian':
        return deltas.unsqueeze(0)
    elif transtype == 'brightness':
        return torch.ones_like(instances).unsqueeze(0)
    elif transtype == 'brightness-contrast':
        nabla_b = torch.ones_like(instances)
        nabla_k = instances.clone()
        return torch.hstack([nabla_b, nabla_k])
    elif transtype == 'rotation':
        return deltas.unsqueeze(0) * mask
    elif transtype == 'scaling':
        return deltas.unsqueeze(0)
    elif transtype == 'rotation-brightness' or transtype == 'rotation-brightness-l2':
        return torch.hstack(
            [deltas.unsqueeze(0) * mask,
             torch.ones_like(instances)])
    elif transtype == 'scaling-brightness' or transtype == 'scaling-brightness-l2':
        return torch.hstack(
            [deltas.unsqueeze(0) * mask,
             torch.ones_like(instances)])
    else:
        raise Exception(f'Unnown transtype: {transtype}')
Exemple #3
0
    def generate_input(self) -> torch.Tensor:
        noise_input = torch.randn(self.feature_spec['noise'])

        categorical_input = []
        categorical_labels = []
        for n_cat in self.feature_spec['categorical']:
            categorical_input.append(OneHotCategorical(torch.ones(n_cat)/n_cat).sample())
            categorical_labels.append(torch.argmax(categorical_input[-1]))
        categorical_input = torch.hstack(categorical_input)
        gaussian_input = torch.randn(self.feature_spec['gaussian'])
        uniform_input = Uniform(-1, 1).sample((self.feature_spec['uniform'], ))

        gen_input = torch.hstack([noise_input, categorical_input, gaussian_input, uniform_input])
        gen_input = gen_input.to(self.device)
        return gen_input, torch.tensor(categorical_labels)
Exemple #4
0
def quat_2_RT_given_T_in_world(q, T):

    # First ensure that the quaternion is normalized
    norm = q.norm()
    if norm > 0:
        q = q / norm

    # Convert quaternion to rotation matrix
    R = quat_2_rotation_matrix(q)

    # Modifying the rotation matrix to work
    R = torch.rot90(R, 2).T
    x = torch.tensor([[1, -1, 1], [1, -1, 1], [-1, 1, -1]],
                     device=R.device,
                     dtype=R.dtype)
    R = torch.mul(R, x)

    # Then invert the rotation matrix
    inv_R = torch.inverse(R)

    # Then combine to generate the inverse transformation matrix
    inv_RT = torch.vstack([
        torch.hstack([inv_R, T]),
        torch.tensor([0, 0, 0, 1], device=q.device, dtype=q.dtype)
    ])

    # Then undo the inverse to obtain the correct transformation matrix
    RT = torch.inverse(inv_RT)

    return RT
Exemple #5
0
def main():

    # Projection
    n_data = 100
    circle_input, _ = get_circle_data(n_data)
    plane_input, _ = get_plane_data(n_data)

    projection_input = torch.hstack([circle_input, plane_input])

    discr, gener = train_gan(projection_input)

    func = gener
    inputs = gener.generate_generator_input(1)
    jac = get_jacobian(func=func, inputs=inputs).squeeze()
    print("Generator output ID is ", torch.matrix_rank(jac))

    proj_func = ProjectionFunction()
    func = lambda x: proj_func(gener(x))
    jac = get_jacobian(func=func, inputs=inputs).squeeze()
    print("ANN output ID is ", torch.matrix_rank(jac))

    evaluate(
        estimate_id=twonn_dimension,
        get_data=lambda n: get_parabolic_data(n)[1],
    )

    plt.show()
    def validation_epoch_end(self, outputs):
        tqdm_dict = {}
        y_score_all = torch.vstack([output['y_pred'] for output in outputs
                                    ]).detach().cpu().numpy()
        y_label_all = torch.hstack([output['y_label'] for output in outputs
                                    ]).detach().cpu().numpy()
        y_label_all = LabelBinarizer().fit([0, 1, 2, 3, 4, 5, 6, 7, 8,
                                            9]).transform(y_label_all)
        avg_val_loss = torch.stack([output['val_loss']
                                    for output in outputs]).mean()

        for i in range(self.opt.num_class):
            # Per-class AUC
            y_score_i = np.expand_dims(y_score_all[:, i], axis=1)
            y_label_i = np.expand_dims(y_label_all[:, i], axis=1)
            auc = roc_auc_score(y_label_i[:, 0],
                                y_score_i[:, 0],
                                average='micro')
            self.log('val auc [Class %d]' % i, auc)

            # Per-class Accuracy
            y_pred_i = np.array(y_score_all.argmax(axis=1) == i, dtype=np.int)
            acc = accuracy_score(np.squeeze(y_label_i), y_pred_i)
            self.log('val acc [Class %d]' % i, acc)

        self.log('val loss', avg_val_loss)

        ### Manual Logging
        self.log_pkl['epoch_val_loss'].append(avg_val_loss)
        self.log_pkl['epoch_val_score'].append(y_score_all)
        self.log_pkl['epoch_val_label'].append(y_label_all)
        with open(os.path.join(self.opt.log_path, 'results.pkl'),
                  'wb') as handle:
            pickle.dump(self.log_pkl, handle, protocol=pickle.HIGHEST_PROTOCOL)
def visualize(model, obj, cam_center, show=True, log=False, experiment=None, epoch=None):
    model = model.eval()
    with torch.no_grad():
        l, x, y, _, _ = obj.contains_2d(cam_center, hyperparams["focal_length"], hyperparams["sensor_bounds"], hyperparams["sensor_size"])
        x_batch = np.reshape(x, (-1,1))
        y_batch = np.reshape(y, (-1,1))
        coord_batch = torch.hstack([torch.tensor(x_batch, dtype=torch.float32), torch.tensor(y_batch, dtype=torch.float32)])
        r = rotation_matrix(cam_center, inverse=True).flatten()
        cam_params = list(r) + cam_center
        view_embedding = angle_features(cam_center)
        ve_batch = torch.tensor([view_embedding]*coord_batch.shape[0], dtype=torch.float32)
        # params_batch = torch.tensor([cam_params]*coord_batch.shape[0], dtype=torch.float32)
        coord_batch = coord_batch.to(device)
        # params_batch = params_batch.to(device)
        model_labels = model.forward(coord_batch, ve_batch)
        model_labels = np.reshape(model_labels.cpu().numpy(), l.shape)

        # show results
        f = plt.figure()
        ax1 = f.add_subplot(121)
        ax2 = f.add_subplot(122)
        ax1.imshow(l)
        ax1.set_title("Ground Truth")
        ax2.imshow(model_labels)
        ax2.set_title("2D Occ Network")

        if show:
            plt.show()
        if log:
            if experiment is None:
                print("Must provide experiment to visualizer in order to log figure")
            else:
                experiment.log_figure(figure=f, figure_name=f"epoch_{epoch}_2d")
                plt.close(f)
Exemple #8
0
def valid(model: nn.Module, loss: nn.Module, valid_loader: DataLoader,
          gpu) -> tuple:
    """
    :param model: torch ML model
    :param loss: loss function
    :param valid_loader: validation set
    :param gpu: gpu number
    :return: loss, accuracy
    """
    model.training = False
    with torch.no_grad():
        all_losses = []
        for i, (inputs, targets) in enumerate(valid_loader):
            if gpu is not None:
                inputs = inputs.cuda(gpu)
                targets = targets.float().cuda(gpu)

            predictions = model(inputs).squeeze()
            err = loss(predictions, targets)

            all_losses.append(err.detach().cpu())

            # Clean GPU
            if gpu is not None:
                err = err.cpu()
                inputs = inputs.cpu()
                targets = targets.cpu()
                predictions = predictions.cpu()
                torch.cuda.empty_cache()

            print(f'\rValid batch : {i + 1} / {len(valid_loader)}', end='')

        all_losses = torch.hstack(all_losses)

        return all_losses.mean()
Exemple #9
0
        def forward(self, x: torch.Tensor, m: torch.Tensor) -> torch.Tensor:
            """Feeds data through the network.

            Args:
                x (Tensor): A spectra minibatch.
                m (Tensor): A metadata minibatch corresponding to x.

            Returns:
                A prediction from the network.
            """

            # Max pooling over a (2) window
            x = F.max_pool1d(F.relu(self.conv2(F.relu(self.conv1(x)))), 2)
            x = F.max_pool1d(F.relu(self.conv4(F.relu(self.conv3(x)))), 2)
            x = F.max_pool1d(F.relu(self.conv6(F.relu(self.conv5(x)))), 2)
            x = F.max_pool1d(F.relu(self.conv8(F.relu(self.conv7(x)))), 2)
            x = F.max_pool1d(F.relu(self.conv10(F.relu(self.conv9(x)))), 2)
            x = F.max_pool1d(F.relu(self.conv12(F.relu(self.conv11(x)))), 2)
            x = x.view(-1, self.num_flat_features(x))
            m = self.metadata.forward(m)
            x = torch.hstack((x, m))
            x = F.relu(self.fc1_dropout(self.fc1(x)))
            x = F.relu(self.fc1_dropout(self.fc2(x)))
            x = self.fc3(x)
            return x
 def forward(self, state: torch.Tensor,
             action: torch.Tensor) -> torch.Tensor:
     value = torch.hstack((state, action))
     value = F.relu(self.H1(value))
     value = F.relu(self.H2(value))
     value = self.Q(value)
     return value
Exemple #11
0
 def cart2hom(self, pts_3d):
     ''' Input: nx3 points in Cartesian
         Oupput: nx4 points in Homogeneous by pending 1
     '''
     n = pts_3d.shape[0]
     pts_3d_hom = torch.hstack((pts_3d, torch.ones((n, 1), device=device)))
     return pts_3d_hom
def acitvate_PsROI_for_eval(model: PSRoIPooling2D):
    """
    backward once first to speed up eval(cause of an hidden conflict with SkImage lib?)
    :return:
    """
    # fake data
    class_num = 21
    group_size = 7
    B, C, H, W, PH, PW = 2, class_num * group_size * group_size, 28, 28, 21, 21
    bottom_data = t.randn((B, C, H, W)).cuda()

    # rois
    rois = [
        torch.tensor([[0, 0, 112, 112], [7, 75, 503, 442]], dtype=torch.float),
        torch.tensor([[0, 0, 224, 224]], dtype=torch.float)
    ]
    indices = torch.tensor([0, 0, 1])
    rois2 = torch.cat(rois, dim=0)
    indices = torch.reshape(indices, (-1, 1))
    rois2_with_indices = torch.hstack((indices, rois2))
    bottom_rois = rois2_with_indices.cuda()

    x = bottom_data.detach().requires_grad_()
    rois = bottom_rois.detach()

    output = model(x, rois)
    output.sum().backward()
    def calculate_deltas(self, scores: Tuple[torch.Tensor, torch.Tensor],
                         delta_metric: Callable) -> torch.Tensor:
        pos, neg = scores
        assert pos.shape[0] == neg.shape[0]
        with torch.no_grad():
            df = pd.DataFrame({
                'score':
                torch.hstack([pos, neg]),
                'label': [1] * pos.shape[0] + [0] * pos.shape[0],
                'pair_id':
                np.hstack([np.arange(pos.shape[0]),
                           np.arange(pos.shape[0])])
            })

            df = df.sort_values('score',
                                ascending=False).reset_index(drop=True)
            df['rank'] = df.index.values + 1
            df['log2'] = 1 / np.log2(df['rank'] + 1)
            deltas = (df.sort_values(
                ['pair_id', 'label'],
                ascending=[True, False]).groupby('pair_id')['log2'].agg(
                    lambda x: delta_metric(x.values[0], x.values[1]))).values

        deltas = torch.Tensor(np.abs(deltas), )

        return deltas
Exemple #14
0
def fit_predict_torch(embeddings, seediness, margins, fitfunc,
                 s_threshold=0.0, p_threshold=0.5):
    pred_labels = -torch.ones(embeddings.shape[0])
    probs = []
    spheres = []
    seediness_copy = seediness.detach().clone()
    count = 0
    #if seediness_copy.shape[0] == 1:
    #    return np.argmax(seediness_copy)
    while count < int(seediness.shape[0]):
        i = torch.argsort(seediness_copy.squeeze())[-1]
        seedScore = seediness[i]
        if seedScore < s_threshold:
            break
        centroid = embeddings[i]
        sigma = margins[i]
        spheres.append((centroid, sigma))
        f = fitfunc(centroid, sigma)
        pValues = f(embeddings)
        probs.append(pValues.reshape(-1, 1))
        cluster_index = (pValues > p_threshold).reshape(-1) & (seediness_copy > 0).reshape(-1)
        seediness_copy[cluster_index] = -1
        count += torch.sum(cluster_index)
    if len(probs) == 0:
        return pred_labels, 1
    probs = torch.hstack(probs)
    pred_labels = torch.argmax(probs, dim=1)
    return pred_labels, probs.shape[1]
Exemple #15
0
 def tensor_indexing_ops(self):
     x = torch.randn(2, 4)
     y = torch.randn(2, 4, 2)
     t = torch.tensor([[0, 0], [1, 0]])
     mask = x.ge(0.5)
     i = [0, 1]
     return (
         torch.cat((x, x, x), 0),
         torch.concat((x, x, x), 0),
         torch.conj(x),
         torch.chunk(x, 2),
         torch.dsplit(y, i),
         torch.column_stack((x, x)),
         torch.dstack((x, x)),
         torch.gather(x, 0, t),
         torch.hsplit(x, i),
         torch.hstack((x, x)),
         torch.index_select(x, 0, torch.tensor([0, 1])),
         torch.masked_select(x, mask),
         torch.movedim(x, 1, 0),
         torch.moveaxis(x, 1, 0),
         torch.narrow(x, 0, 0, 2),
         torch.nonzero(x),
         torch.permute(x, (0, 1)),
         torch.reshape(x, (-1, )),
     )
Exemple #16
0
    def build_reg_and_cls_targets(self, boxes):
        boxes_xyxy = ops.box_convert(boxes, 'cxcywh', 'xyxy') # [B, 4]
        iou_dist = ops.box_iou(self.anchors_xyxy, boxes_xyxy) # [A, B]
        closest_box_indices = torch.argmax(iou_dist, dim=1) # [A, 1]
        target_boxes = boxes[closest_box_indices] # [A, 4]

        # Both [A, 2]
        xy_targets = (
                (target_boxes[..., :2] - self.anchors[..., :2]) /
                self.anchors[..., 2:])
        wh_targets = torch.log(target_boxes[..., 2:] / self.anchors[..., 2:])
        reg_target = torch.hstack((xy_targets, wh_targets)) # [A, 4]

        pos_selector = torch.any(iou_dist > self.pos_thresh, dim=1) # [A,]
        neg_selector = torch.all(iou_dist < self.neg_thresh, dim=1) # [A,]

        valid_pos_selector = pos_selector & self.valid_anchors_selector # [A,]
        valid_neg_selector = neg_selector & self.valid_anchors_selector # [A,]

        cls_target = torch.full(
            (len(self.anchors),), INVALID_ANCHOR_LABEL,
            device=boxes.device) # [A,]
        cls_target[valid_pos_selector] = POS_ANCHOR_LABEL # [A,]
        cls_target[valid_neg_selector] = NEG_ANCHOR_LABEL # [A,]
        
        return reg_target, cls_target
Exemple #17
0
 def proj(self, x, v):
     assert v.ndim == 3
     b = torch.hstack((torch.sum(v, axis=2), torch.sum(v, axis=1)))
     alpha, beta = self._lsolve(x, b)
     result = v - (torch.einsum('bn,m->bnm', alpha, self._e2) +
                   torch.einsum('n,bm->bnm', self._e1, beta)) * x
     return result
Exemple #18
0
def QuasiNewton(func, initial, method='sr1', max_iter=16, criteria=1e-4):
    x = Var(initial, requires_grad=True).reshape(-1, 1)  # 列向量构造
    pos = [x.data.clone().numpy()]
    # 求二阶导的方法在我看来非常的暴力
    y = func(x)
    g = grad(y, x, create_graph=True)[0]
    H = torch.Tensor([])
    for gval in g:  # 只求一直Hesse阵(小规模问题可以)
        g2 = grad(gval, x, retain_graph=True)[0]
        H = torch.hstack((H, g2))
    print(H)
    g = g.detach()
    x_old = x.data.clone()
    g_old = g.data.clone()
    for i in range(max_iter):
        d = -H @ g
        t = Armijo(func, x, d)
        x.data += t * d / d.norm()
        pos.append(x.data.clone().numpy())
        s = x.data - x_old
        y = func(x)
        g = grad(y, x)[0]
        # print(g)
        if g.norm() < criteria:
            print("Convergence. Exiting...")
            breakpoint
        p = g - g_old
        x_old = x.data.clone()
        g_old = g.clone()
        temp = s - H @ p
        H += temp @ temp.T / (temp.T @ p)
        print("Iter %d, y = %f" % (i, y))
    return x.data.numpy(), pos
Exemple #19
0
    def increase_hidden_size(self, h_layer):
        # Assert that we have a valid hidden layer
        assert(h_layer < len(self.hidden_size))
        
        # Get the relevant weight matrices.
        w1 = self.layers[h_layer].weight
        w2 = self.layers[h_layer + 1].weight
        bias = self.layers[h_layer].bias
        
        # add a row/column of 0's to the weight matrices
        with torch.no_grad():
            
            # Construct the new weight matrices (simply append a 0 row/column)
            z1 = torch.Tensor(np.random.random((1, w1.shape[1]))-0.5).to(self.device)
            z2 = torch.Tensor(np.random.random((w2.shape[0], 1))-0.5).to(self.device)
            z3 = torch.Tensor([np.random.random() -0.5]).to(self.device)

            new_mat_1 = nn.Parameter(torch.vstack((w1, z1)))
            new_mat_2 = nn.Parameter(torch.hstack((w2, z2)))
            
            # Set the appropriate weights/biases of our network
            
            self.layers[h_layer].bias = nn.Parameter(torch.cat((bias, z3)))
            self.layers[h_layer].weight = new_mat_1
            self.layers[h_layer + 1].weight = new_mat_2
            self.hidden_size[h_layer] += 1
def predict(model,
            dataset,
            indices,
            batch_size=10,
            num_workers=4,
            transform=None):
    dataset = DatasetFromSubset(Subset(dataset, indices=indices),
                                transform=transform)

    loader = DataLoader(dataset,
                        batch_size=batch_size,
                        num_workers=num_workers,
                        shuffle=False,
                        drop_last=False,
                        pin_memory=True)

    predictions = []
    probas = []
    model.eval()
    if torch.cuda.is_available():
        model = model.cuda()
    with torch.no_grad():
        for images, labels in tqdm(loader):
            if torch.cuda.is_available():
                images = images.cuda()
            batch_probas = model.predict_proba(images)
            batch_preds = torch.max(batch_probas, 1)[1]
            predictions.append(batch_preds)
            probas.append(batch_probas)

    predictions = torch.hstack(predictions).flatten().tolist()
    probas = torch.vstack(probas).tolist()

    return predictions, probas
Exemple #21
0
def calculate_actdiff_loss(regular_activations,
                           masked_activations,
                           similarity_metric="l2"):
    """
    regular_activtations: list of activations produced by the original image in the model
    masked_activations: list of activations produced by the masked image in the mdodel
    """

    assert len(regular_activations) == len(masked_activations)

    if similarity_metric == "l2":
        metric = torch.nn.modules.distance.PairwiseDistance(p=2)
        all_dists = []
        for reg_act, masked_act in zip(regular_activations,
                                       masked_activations):
            all_dists.append(
                metric(reg_act.flatten().unsqueeze(0),
                       masked_act.flatten().unsqueeze(0)))

    elif similarity_metric == "cosine":
        metric = torch.nn.CosineSimilarity(dim=0)
        all_dists = []
        for reg_act, masked_act in zip(regular_activations,
                                       masked_activations):
            all_dists.append(metric(reg_act.flatten(), masked_act.flatten()))

    #print(torch.hstack(all_dists))
    actdiff_loss = torch.sum(torch.hstack(all_dists)) / len(all_dists)

    return (actdiff_loss)
Exemple #22
0
    def _optimize_policy(self, start_states, actions, end_states, rewards, dones):
        loss = -self.q(torch.hstack([start_states, self.policy(start_states)])).mean()

        self.policy_optimizer.zero_grad()
        loss.backward()
        self.policy_optimizer.step()

        return loss.item()
def classify(classifier, open_type, img):
    if open_type != 0:
        return classifier(img)
    else:
        neural_net, multinomial_regression = classifier
        av2 = neural_net(img)
        av1 = multinomial_regression(av2)
        return torch.hstack([av1, av2])
Exemple #24
0
def blackharr(n, l=None, mod=True, device="cpu"):
    if l is None:
        l = n
    nn = (n // 2) * 2
    k = torch.arange(n, device=torch.device(device))
    if not mod:
        bh = 0.35875 - 0.48829 * torch.cos(
            k * (2 * pi / nn)) + 0.14128 * torch.cos(
                k * (4 * pi / nn)) - 0.01168 * torch.cos(k * (6 * pi / nn))
    else:
        bh = 0.35872 - 0.48832 * torch.cos(
            k * (2 * pi / nn)) + 0.14128 * torch.cos(
                k * (4 * pi / nn)) - 0.01168 * torch.cos(k * (6 * pi / nn))
    bh = torch.hstack(
        (bh, torch.zeros(l - n, dtype=bh.dtype, device=torch.device(device))))
    bh = torch.hstack((bh[-n // 2:], bh[:-n // 2]))
    return bh
Exemple #25
0
def evaluate_task(data_columns, datasets_config, fine_tune_task, model,
                  orig_stdout, tasks_config, val_results):
    with torch.no_grad():
        val_bar = tqdm(tasks_config[fine_tune_task]['val_loader'],
                       file=orig_stdout,
                       position=0,
                       leave=True)
        task_predicted_labels = torch.empty(0, device=device)
        task_labels = torch.empty(0, device=device)
        for val_data in val_bar:
            val_bar.set_description(fine_tune_task.name)

            input_data = list(zip(*(val_data[col] for col in data_columns)))
            label = val_data["label"].to(device)

            if len(data_columns) == 1:
                input_data = list(map(operator.itemgetter(0), input_data))

            model_output = model(input_data, fine_tune_task)

            if fine_tune_task == Task.QNLI:
                predicted_label = torch.round(model_output)
            elif fine_tune_task.num_classes() > 1:
                predicted_label = torch.argmax(model_output, -1)
            else:
                predicted_label = model_output

            if fine_tune_task == Task.STS_B:
                predicted_label = torch.clamp(predicted_label, 0, 5).to(device)

            task_predicted_labels = torch.hstack(
                (task_predicted_labels, predicted_label.view(-1)))
            task_labels = torch.hstack((task_labels, label))

        metrics = datasets_config[fine_tune_task].metrics
        for metric in metrics:
            metric_result = metric(task_labels.cpu(),
                                   task_predicted_labels.cpu())
            if type(metric_result) == tuple or type(
                    metric_result) == scipy.stats.stats.SpearmanrResult:
                metric_result = metric_result[0]
            val_results[fine_tune_task.name, metric.__name__] = metric_result
            print(
                f"val_results[{fine_tune_task.name}, {metric.__name__}] = {val_results[fine_tune_task.name, metric.__name__]}"
            )
Exemple #26
0
def load_ogb_graph(dataset_name):
    if not os.path.isfile('torch_geometric_data/dgl_' + dataset_name):
        dataset = PygNodePropPredDataset(name="ogbn-" + dataset_name,
                                         root='torch_geometric_data/')
        split_idx = dataset.get_idx_split()
        train_idx, valid_idx, test_idx = split_idx["train"], split_idx[
            "valid"], split_idx["test"]
        edge = dataset[0].edge_index
        num_classes = len(np.unique(dataset[0].y))
        print("Nodes: %d, edges: %d, features: %d, classes: %d. \n" %
              (dataset[0].y.shape[0], len(edge[0]) / 2, len(
                  dataset[0].x[0]), num_classes))
        graph = dgl.DGLGraph((edge[0], edge[1]))
        graph.ndata['features'] = dataset[0].x
        graph.ndata['labels'] = dataset[0].y
        dgl.data.utils.save_graphs('torch_geometric_data/dgl_' + dataset_name,
                                   graph)
        torch.save(
            train_idx, 'torch_geometric_data/ogbn_' + dataset_name +
            '/train_' + dataset_name + '.pt')
        torch.save(
            valid_idx, 'torch_geometric_data/ogbn_' + dataset_name +
            '/valid_' + dataset_name + '.pt')
        torch.save(
            test_idx, 'torch_geometric_data/ogbn_' + dataset_name + '/test_' +
            dataset_name + '.pt')
        labels = graph.ndata.pop('labels')
        features = graph.ndata.pop('features')
        features = torch.hstack([features, torch.ones([features.shape[0], 1])])
        #print(features)
    elif os.path.isfile('torch_geometric_data/dgl_' + dataset_name):
        graph = dgl.data.utils.load_graphs('torch_geometric_data/dgl_' +
                                           dataset_name)[0][0]
        labels = graph.ndata.pop('labels')
        features = graph.ndata.pop('features')
        features = torch.hstack([features, torch.ones([features.shape[0], 1])])
        train_idx = torch.load('torch_geometric_data/ogbn_' + dataset_name +
                               '/train_' + dataset_name + '.pt')
        valid_idx = torch.load('torch_geometric_data/ogbn_' + dataset_name +
                               '/valid_' + dataset_name + '.pt')
        test_idx = torch.load('torch_geometric_data/ogbn_' + dataset_name +
                              '/test_' + dataset_name + '.pt')
        num_classes = len(torch.unique(labels))

    return graph, features, labels, num_classes, train_idx, valid_idx, test_idx
Exemple #27
0
def main():
    dset = torch.load("TR1.pt")
    dset = [tup[-1] for tup in dset]
    dists = torch.hstack([dist.flatten() for dist in dset])
    dists = dists[dists > 0].numpy()
    hist, bin_edges = np.histogram(dists, bins=100)
    idxs = np.digitize([4, 100], bin_edges)
    print(idxs)
    print(bin_edges[idxs])
Exemple #28
0
def get_z(xyz, network_size):
    """
    Inputs: xyz in some type, will be converted to tensor & Network Size
    Output: padded tensor of the form z + [0]*n such that the total tensor has length network size
    """
    xyz = torch.tensor(xyz)
    pad = network_size - xyz.shape[0]
    z = torch.hstack((xyz[:, 2], torch.zeros(pad)))
    return z
Exemple #29
0
 def tensor_indexing_ops(self):
     x = torch.randn(2, 4)
     y = torch.randn(4, 4)
     t = torch.tensor([[0, 0], [1, 0]])
     mask = x.ge(0.5)
     i = [0, 1]
     return len(
         torch.cat((x, x, x), 0),
         torch.concat((x, x, x), 0),
         torch.conj(x),
         torch.chunk(x, 2),
         torch.dsplit(torch.randn(2, 2, 4), i),
         torch.column_stack((x, x)),
         torch.dstack((x, x)),
         torch.gather(x, 0, t),
         torch.hsplit(x, i),
         torch.hstack((x, x)),
         torch.index_select(x, 0, torch.tensor([0, 1])),
         x.index(t),
         torch.masked_select(x, mask),
         torch.movedim(x, 1, 0),
         torch.moveaxis(x, 1, 0),
         torch.narrow(x, 0, 0, 2),
         torch.nonzero(x),
         torch.permute(x, (0, 1)),
         torch.reshape(x, (-1, )),
         torch.row_stack((x, x)),
         torch.select(x, 0, 0),
         torch.scatter(x, 0, t, x),
         x.scatter(0, t, x.clone()),
         torch.diagonal_scatter(y, torch.ones(4)),
         torch.select_scatter(y, torch.ones(4), 0, 0),
         torch.slice_scatter(x, x),
         torch.scatter_add(x, 0, t, x),
         x.scatter_(0, t, y),
         x.scatter_add_(0, t, y),
         # torch.scatter_reduce(x, 0, t, reduce="sum"),
         torch.split(x, 1),
         torch.squeeze(x, 0),
         torch.stack([x, x]),
         torch.swapaxes(x, 0, 1),
         torch.swapdims(x, 0, 1),
         torch.t(x),
         torch.take(x, t),
         torch.take_along_dim(x, torch.argmax(x)),
         torch.tensor_split(x, 1),
         torch.tensor_split(x, [0, 1]),
         torch.tile(x, (2, 2)),
         torch.transpose(x, 0, 1),
         torch.unbind(x),
         torch.unsqueeze(x, -1),
         torch.vsplit(x, i),
         torch.vstack((x, x)),
         torch.where(x),
         torch.where(t > 0, t, 0),
         torch.where(t > 0, t, t),
     )
        def get_samples(labels):
            sampled = []
            for i in range(self.n_labels):
                indices = torch.nonzero(labels == i).view(-1)
                random_indices = torch.randperm(
                    indices.shape[0])[:self.samples_per_class]
                sampled.append(indices[random_indices])

            return torch.hstack(sampled)