예제 #1
0
 def forward(self, pre_state, a_in_t, a_out_t, weight_h):
     """
     :param pre_state: (n_node, state_dim)
     :param a_in_t: (n_node, state_dim)
     :param a_out_t: (n_node, state_dim)
     :return: ht (n_node, state_dim)
     """
     self.pre_state = pre_state
     self.a_in_t = a_in_t
     self.a_out_t = a_out_t
     self.weight_z[2] = weight_h
     self.z_t = sigmoid(
         np.matmul(a_in_t, self.weight_z[0]) +
         np.matmul(a_out_t, self.weight_z[1]) +
         np.matmul(pre_state, self.weight_z[2]) + self.weight_z_bias)
     self.r_t = sigmoid(
         np.matmul(a_in_t, self.weight_r[0]) +
         np.matmul(a_out_t, self.weight_r[1]) +
         np.matmul(pre_state, self.weight_r[2]) + self.weight_r_bias)
     self.h_zt = np.tanh(
         np.matmul(a_in_t, self.weight_h[0]) +
         np.matmul(a_out_t, self.weight_h[1]) +
         np.matmul(pre_state * self.r_t, self.weight_h[2]) +
         self.weight_h_bias)
     self.h_t = (1 - self.z_t) * pre_state + self.z_t * self.h_zt
     return self.h_t
예제 #2
0
    def _train_one_sample(self, w, c, learning_rate=0.001):
        neg = self.sample_contexts()

        # Forward propagation
        e = self.E[w]
        labels = [c] + neg
        f_labels = self.F[labels].copy()
        a = np.dot(e, f_labels.T).reshape(-1, 1)
        p = utils.sigmoid(a)

        if p[0] == 0:
            print(w, c, a, utils.sigmoid(np.dot(e, self.F[labels].T)))
        for i in p[1:]:
            if i == 1:
                print(w, c, a, utils.sigmoid(np.dot(e, self.F[labels].T)))

        # Loss
        loss = -(np.log(p[0]) + np.sum(np.log(1-p[1:])))

        # Back propagation
        p[0] = p[0] - 1
        self.F[labels] -= learning_rate * p * np.tile(e, (len(p), 1))
        self.E[w] -= learning_rate * np.sum(p * f_labels, axis=0)

        return loss
예제 #3
0
    def get_prob(self, word, context):
        # forward propagation
        e = self.E[word]
        a = np.dot(e, self.F[context].T)
        p = utils.sigmoid(a)

        return p
예제 #4
0
    def get_context_dis(self, word):
        # forward propagation
        e = self.E[word]
        a = np.dot(e, self.F.T).reshape(-1)
        p = utils.sigmoid(a)
        p = p / sum(p)

        return p
예제 #5
0
    def _train_pos(self, w, labels, pos_sample_index, epochs):
        for _ in range(epochs):
            # Forward propagation
            e = self.E[w].copy()
            a = np.dot(e, self.F[labels].T).reshape(-1, 1)
            p = utils.sigmoid(a)

            # Back propagation
            p[pos_sample_index] = p[pos_sample_index] - 1
            self.E[w] -= self.lr * np.sum(p * self.F[labels], axis=0)
            self.F[labels] -= self.lr * p * np.tile(e, (len(p), 1))

        # forward propagation
        a = np.dot(self.E[w], self.F[labels].T)
        p = utils.sigmoid(a)

        # compute joint probability
        prob = p[pos_sample_index] * np.prod(1 - np.delete(p, pos_sample_index))

        return prob
예제 #6
0
    def _train_noise(self, e, F, pos_sample_index, epochs):
        for _ in range(epochs):
            # Forward propagation
            a = np.dot(e, F.T).reshape(-1, 1)
            p = utils.sigmoid(a)

            # Back propagation
            p[pos_sample_index] = p[pos_sample_index] - 1
            e_ = e.copy()
            e -= self.lr * np.sum(p * F, axis=0)
            F -= self.lr * p * np.tile(e_, (len(p), 1))

        # forward propagation
        a = np.dot(e, F.T)
        p = utils.sigmoid(a)

        # compute joint probability
        prob = p[pos_sample_index] * np.prod(1 - np.delete(p, pos_sample_index))

        return prob
예제 #7
0
    def validation_loss(self, word, context):
        neg_samples = self.sample_contexts()

        # forward propagation
        e = self.E[word]
        a = np.dot(e, self.F[[context] + neg_samples].T)
        p = utils.sigmoid(a)

        # compute loss
        loss = - np.log(p[0]) - sum(np.log(1-p[1:]))

        return loss
예제 #8
0
def segmentStudy(model, scanname, config, OrderByFileName=True):
    """ 
    segmentation = segmentStudy(model, scanname, config)
    
    Performs inference on slices of test study and returns reconstructed volume of predicted segmentation
    """
    testSet = datafunctions.testSlices(scanname, config)
    testLoader = torch.utils.data.DataLoader(testSet,
                                             batch_size=config["batchSize"],
                                             shuffle=False,
                                             num_workers=4)
    logits_slice_list = []
    file_name_list = []
    model.eval()
    with tqdm(total=len(testLoader), position=0, leave=True) as (t):
        t.set_description(' AIMOS prediction:')
        with torch.no_grad():
            for i, (imgs, file_names) in enumerate(testLoader):
                imgs = imgs.cuda(non_blocking=True)
                imgs = torch.autograd.Variable(imgs, requires_grad=False)
                logits = model(imgs)
                logits_of_batch = logits.detach().cpu().numpy()
                for b in range(0, logits_of_batch.shape[0]):
                    logits_slice = logits_of_batch[
                        b, :, :, :]  # batchsize, n_classes, height, width
                    logits_slice_list += [logits_slice]
                    file_name = file_names[b]
                    file_name_list += [file_name]
                t.update()
    # Re-order by filenames
    if (OrderByFileName):
        logits_slice_list = tools.sortAbyB(logits_slice_list, file_name_list)
    # Turn into segmentation volume
    logits_vol = np.asarray(
        logits_slice_list)  # z-slices, n_classes, height, width
    logits_vol = np.moveaxis(logits_vol, 0,
                             -1)  # n_classes, height, width, z-slices
    probs_vol = tools.sigmoid(logits_vol)
    segmentation_vol = np.argmax(probs_vol, axis=0)  # height, width, z-slices
    # Resample segmentation volume to original dimensions
    zoomFactors = np.asarray(testSet.original_shape) / np.asarray(
        segmentation_vol.shape)
    segmentation_resampled = scipy.ndimage.zoom(segmentation_vol,
                                                zoomFactors,
                                                order=0)
    return segmentation_resampled
예제 #9
0
a_in_t = np.zeros((n_node, state_dim))
a_out_t = np.zeros((n_node, state_dim))
for i in range(n_edge_types):
    a_in_t += np.matmul(adj[:, i * n_node:(i + 1) * n_node], in_states[i])
    a_out_t += np.matmul(
        adj[:, (i + n_edge_types) * n_node:(i + 1 + n_edge_types) * n_node],
        out_states[i])

# ggsnn
# 1. propogator model
weight_z = np.zeros((3, state_dim, state_dim))
weight_r = np.zeros((3, state_dim, state_dim))
weight_h = np.zeros((3, state_dim, state_dim))

z_t = sigmoid(
    np.matmul(a_in_t, weight_z[0]) + np.matmul(a_out_t, weight_z[1]) +
    np.matmul(pre_state, weight_z[2]))
r_t = sigmoid(
    np.matmul(a_in_t, weight_r[0]) + np.matmul(a_out_t, weight_r[1]) +
    np.matmul(pre_state, weight_r[2]))
h_zt = np.tanh(
    np.matmul(a_in_t, weight_h[0]) + np.matmul(a_out_t, weight_h[1]) +
    np.matmul(pre_state, weight_h[2]))
h_t = (1 - z_t) * pre_state + z_t * h_zt

# 2. output model
weight_ho = np.zeros((state_dim, state_dim))
weight_xo = np.zeros((annotation_dim, state_dim))
weight_o = np.zeros((state_dim, 1))

z_1 = np.tanh(np.matmul(h_t, weight_ho) + np.matmul(annotation, weight_xo))