Exemplo n.º 1
0
    def __iter__(self):
        batch = []

        for idx in range(len(self.arrays[0])):
            batch.append(idx)

            if len(batch) == self.batch_size:
                yield [tensor(arr[batch].copy()) for arr in self.arrays]
                batch = []

        if batch:
            yield [tensor(arr[batch].copy()) for arr in self.arrays]
Exemplo n.º 2
0
def spike(x):
    time_range = 2247
    truth, shape = loader.loader(
        'D:\\document\\体外网络发放数据\\a195e390b707a65cf3f319dbadbbc75f_6b245c0909b1a21072dd559d4203e15b_8.txt')
    ture = truth[200::, :]

    start =tf.tensor(truth[0:200, :],dtype=tf.float32)
    ture = tf.tensor(ture, dtype=tf.float32)

    spike = tf.zeros(size=(shape[0]-200,shape[1]),dtype=tf.float32)
    spike = tf.concat([start,spike],axis=0)
    print(spike.size())
    def condition(self,time):
        time <time_range
Exemplo n.º 3
0
    def __iter__(self):
        for idxs in self.sampler(self.val):
            hvg_input = tensor(self.hvg_input[idxs].copy())
            hvg_target = tensor(self.hvg_target[idxs].copy())
            p_target = tensor(self.p_target[idxs].copy())

            if (self.lvg_input is not None) and (self.lvg_target is not None):
                lvg_input = tensor(self.lvg_input[idxs].copy())
                lvg_target = tensor(self.lvg_target[idxs].copy())
            else:
                lvg_input = None
                lvg_target = None

            yield [hvg_input, lvg_input], hvg_target, lvg_target, p_target
Exemplo n.º 4
0
def encode(sm_list, pad_size=50):
    """
    Encoder list of smiles to tensor of tokens
    """
    res = []
    lens = []
    for s in sm_list:
        tokens = ([1] + [__t2i[tok]
                         for tok in smiles_tokenizer(s)])[:pad_size - 1]
        lens.append(len(tokens))
        tokens += (pad_size - len(tokens)) * [2]
        res.append(tokens)

    #zz return torch.tensor(res).long(), lens
    tf.tensor(res).long(), lens
Exemplo n.º 5
0
 def gen_context_from_nrp(self, prev_context, nrp_sym, position):
     if nrp_sym in self.sym2char:
         rapper = self.sym2char[nrp_sym]
         rap_vec = self.rapper_vectors[rapper]
         prev_context["rapper" + str(position)] = tf.cast(
             tf.tensor(rap_vec), tf.int32)
     return prev_context
Exemplo n.º 6
0
 def __init__(
         self,
         initial_conditions=[0, 2, 20],
         model_parameters=[28., 10., 8. / 3.],
         final_time=50,
         time_steps=5000):
     self.initial_conditions = np.array(initial_conditions)
     self.model_parameters = model_parameters
     self.final_time = final_time
     self.time_steps = time_steps
     self.state = tf.tensor()
Exemplo n.º 7
0
 def pad_neighbors(self, mesh, size):
     """ extracts one-ring neighbors (4x) -> mesh.edge_to_neighbors
     which is of size #edges x 4
     add the edge_id itself to make #edges x 5
     then pad to desired size e.g., size x 5
     """
     padded_neighbors = tf.tensor(mesh.edge_to_neighbors).float()
     padded_neighbors = tf.concat((tf.expand_dims(tf.range(len(mesh.edges)).float(), 1), padded_neighbors), axis=1)
     padded_neighbors = tf.pad(padded_neighbors, [0, 0, 0, size-len(mesh.edges)], 'CONSTANT')
     padded_neighbors = tf.expand_dims(padded_neighbors, 0)
     return padded_neighbors
Exemplo n.º 8
0
    def __iter__(self):
        batch = []

        for idx in range(len(self.array)):
            batch.append(idx)

            if len(batch) == self.batch_size:
                yield tensor(self.array[batch].copy())
                batch = []

        if batch:
            yield self.array[batch].copy()
Exemplo n.º 9
0
    def step(self, closure=None):
        """Performs a single optimization step.

        Arguments:
            closure (callable, optional): A closure that reevaluates the model
                and returns the loss.
        """
        loss = None
        if closure is not None:
            with tf.GradientTape():
                loss = closure()

        for group in self.param_groups:
            for p in group['params']:
                param_norm = tf.max(unitwise_norm(p),
                                    tf.Variable(group['eps']).to(p.device))
                grad_norm = unitwise_norm(p.grad)
                max_norm = param_norm * group['clipping']

                trigger = grad_norm > max_norm

                clipped_grad = p.grad * \
                    (max_norm / tf.max(grad_norm,
                                          tf.tensor(1e-6).to(grad_norm.device)))
                p.grad.data.copy_(tf.where(trigger, clipped_grad, p.grad))

        for group in self.param_groups:
            weight_decay = group['weight_decay']
            momentum = group['momentum']
            dampening = group['dampening']
            nesterov = group['nesterov']

            for p in group['params']:
                if p.grad is None:
                    continue
                d_p = p.grad
                if weight_decay != 0:
                    d_p = d_p.add(p, alpha=weight_decay)
                if momentum != 0:
                    param_state = self.state[p]
                    if 'momentum_buffer' not in param_state:
                        buf = param_state['momentum_buffer'] = d_p.numpy()
                    else:
                        buf = param_state['momentum_buffer']
                        buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
                    if nesterov:
                        d_p = d_p.add(buf, alpha=momentum)
                    else:
                        d_p = buf

                p.add_(d_p, alpha=-group['lr'])

        return loss
    def loss(self, f, y):
        """The average loss across batch examples.
        Computes the average hinge loss.

        Args:
            f: Tensor containing the output of the forward operation.
            y(tf.placeholder): Tensor containing the ground truth label.
        Returns:
            (1): Returns the loss function tensor.
        """
        if -tf.matmul(y, f) > 0:
            return -tf.matmul(y, f)
        else:
            return tf.tensor(0.0)
Exemplo n.º 11
0
 def _intr_reward(self, seq):
     inputs = seq['feat']
     if self.config.disag_action_cond:
         action = tf.cast(seq['action'], inputs.dtype)
         inputs = tf.concat([inputs, action], -1)
     preds = [head(inputs).mode() for head in self._networks]
     disag = tf.tensor(preds).std(0).mean(-1)
     if self.config.disag_log:
         disag = tf.math.log(disag)
     reward = self.config.expl_intr_scale * self.intr_rewnorm(disag)[0]
     if self.config.expl_extr_scale:
         reward += self.config.expl_extr_scale * self.extr_rewnorm(
             self.reward(seq))[0]
     return reward
Exemplo n.º 12
0
 def __iter__(self):
     for idxs in self.sampler(self.val):
         yield [tensor(arr[idxs].copy()) for arr in self.arrays]
Exemplo n.º 13
0
 def __iter__(self):
     for idxs in self.sampler(self.val):
         yield (tensor(self.embedding[idxs].copy()),
                tensor(self.sizefactor[idxs].copy())), tensor(
                    self.target[idxs].copy())
Exemplo n.º 14
0
def get_press_time_array(press_time_file):
    time_press_array = np.load(press_time_file.eval())
    # del train_path
    # del press_time_file
    return tf.tensor(time_press_array)
Exemplo n.º 15
0
 def gen_context_from_nrp(self, prev_context, nrp_sym, position):
     if nrp_sym in self.sym2char:
         rapper = self.sym2char[nrp_sym]
         rap_vec = self.rapper_vectors[rapper]
         prev_context["rapper"+str(position)] = tf.cast(tf.tensor(rap_vec), tf.int32)
     return prev_context