def forward(self, src, dst=None): if dst is None: src, dst = src.T n = src.shape[0] if self.nearest and not self.training: mid0 = ((self.position[src].unsqueeze(1) - self.position.unsqueeze(2)).norm(dim=2).topk( 1 + self.nearest, largest=False).indices[:, 1:]) mid1 = ((self.position[dst].unsqueeze(1) - self.position.unsqueeze(2)).norm(dim=2).topk( 1 + self.nearest, largest=False).indices[:, 1:]) else: mid0 = torch.randint(0, self.n_nodes, (n, self.n_samples)) mid1 = torch.randint(0, self.n_nodes, (n, self.n_samples)) srcdiff = self.position[src].unsqueeze(1) - self.position[mid0] logits1 = ((srcdiff.unsqueeze(2) @ self.dst_field[dst].unsqueeze( 1).unsqueeze(3)).squeeze(2).squeeze(2) + self.uncertainty * self.edge[mid0, dst.unsqueeze(1)]) dstdiff = self.position[dst].unsqueeze(1) - self.position[mid1] logits2 = ((dstdiff.unsqueeze(2) @ self.src_field[src].unsqueeze( 1).unsqueeze(3)).squeeze(2).squeeze(2) + self.uncertainty * self.edge[src.unsqueeze(1), mid1]) logits = torch.cat((logits1, logits2), dim=1) dist = torch.cat((srcdiff, dstdiff), dim=1).norm(dim=2) if self.sentinel: logits = torch.cat((logits, gpu(torch.zeros(n, self.sentinel))), dim=1) dist = torch.cat((dist, gpu(torch.ones(n, self.sentinel))), dim=1) return (logits.unsqueeze(1) @ torch.softmax( 1 - dist, dim=1).unsqueeze(2)).squeeze()
def forward(self, out: Dict, data: Dict) -> Dict: loss_out = self.pred_loss(out, gpu(data["gt_preds"]), gpu(data["has_preds"])) loss_out["loss"] = loss_out["cls_loss"] / ( loss_out["num_cls"] + 1e-10) + loss_out["reg_loss"] / (loss_out["num_reg"] + 1e-10) return loss_out
def __init__( self, n_nodes, node_feats, src, dst, edge_feats=1, n_samples=8, directed=True, nearest=0, sentinel=0, ): super(self.__class__, self).__init__() self.n_nodes = n_nodes self.node_feats = node_feats self.edge_feats = edge_feats self.n_samples = n_samples self.nearest = nearest self.sentinel = sentinel self.position = nn.Parameter(gpu(torch.rand((n_nodes, node_feats)))) self.src_field = nn.Parameter(gpu(torch.rand((n_nodes, node_feats)))) self.dst_field = (self.src_field if not directed else nn.Parameter( gpu(torch.rand((n_nodes, node_feats))))) self.uncertainty = nn.Parameter(gpu(torch.ones(1, 1) * 5)) self.edge = None edge = -1 * torch.ones(n_nodes, n_nodes) edge[src, dst] = 1 if not directed: edge[dst, src] = 1 self.edge = gpu(edge)
def forward(self, data: Dict) -> Dict[str, List[Tensor]]: # construct actor feature actors, actor_idcs = actor_gather(gpu(data["feats"])) actor_ctrs = gpu(data["ctrs"]) actors = self.actor_net(actors) # construct map features graph = graph_gather(to_long(gpu(data["graph"]))) nodes, node_idcs, node_ctrs = self.map_net(graph) # actor-map fusion cycle nodes = self.a2m(nodes, graph, actors, actor_idcs, actor_ctrs) nodes = self.m2m(nodes, graph) actors = self.m2a(actors, actor_idcs, actor_ctrs, nodes, node_idcs, node_ctrs) actors = self.a2a(actors, actor_idcs, actor_ctrs) # prediction out = self.pred_net(actors, actor_idcs, actor_ctrs) rot, orig = gpu(data["rot"]), gpu(data["orig"]) # transform prediction to world coordinates for i in range(len(out["reg"])): out["reg"][i] = torch.matmul(out["reg"][i], rot[i]) + orig[i].view( 1, 1, 1, -1) return out
def _tensor_or_none(arg, use_cuda): if arg is None: return None elif isinstance(arg, tuple): return tuple(gpu(torch.from_numpy(x), use_cuda) for x in arg) else: return gpu(torch.from_numpy(arg), use_cuda)
def visualize_samples(n_samps, freq, amp, phase, stop, model): """Visualize model performance on trajectories of various sample sizes. Processing is sequential due to irregularly sampled time series. This however makes for very slow runs. Args: n_samps (list of int): All numbers of trajectory samples to evaluate. freq (float): Frequency of sine waves. amp (float): Amplitude of sine waves. phase (float): Phase of sine waves. stop (float): End timepoint of sine waves. model (nn.Module): PyTorch model to evaluate. """ data = [] ts = [] pred = [] # TODO: Optimize by using a padding scheme. for i in range(len(n_samps)): t, d = generate_sine_linear(n_samps[i], freq, amp, phase, stop) out = asnp(model.get_prediction(gpu(d).reshape(1, -1, 1), gpu(t))) data.append(d) ts.append(t) pred.append(out.flatten()) titles = ["# Samps = {}".format(n_samp) for n_samp in n_samps] n_row = int(np.ceil(np.sqrt(len(n_samps)))) n_col = int(np.ceil(len(n_samps) / n_row)) fig, ax = plt.subplots(n_row, n_col, sharex=True, sharey=True) fig.set_size_inches((n_col * 3, n_row * 2)) r = 0 c = 0 for i in range(len(n_samps)): ax[r, c].plot(ts[i], data[i].flatten()) ax[r, c].plot(ts[i], pred[i]) ax[r, c].title.set_text(titles[i]) ax[r, c].minorticks_on() ax[r, c].grid(which='major') ax[r, c].grid(which='minor', linestyle='--') c += 1 if c == n_col: c = 0 r += 1 plt.show()
def get_dataloaders(dataset_type, batch_size): data_path = DATA_PATH_DICT[dataset_type] if dataset_type == "sine": generator = torch.load(data_path)['generator'] train_time, train_data = generator.get_train_set() val_time, val_data = generator.get_val_set() train_data = train_data.reshape(len(train_data), -1, 1) val_data = val_data.reshape(len(val_data), -1, 1) train_set = SineSet(gpu(train_data), gpu(train_time)) val_set = SineSet(gpu(val_data), gpu(val_time)) elif dataset_type == "aussign": data = torch.load(data_path) train_data = data["train_dataset"] val_data = data["val_dataset"] train_time = list(range(train_data.shape[1])) val_time = list(range(val_data.shape[1])) train_set = GenericSet(gpu(train_data), gpu(train_time)) val_set = GenericSet(gpu(val_data), gpu(val_time)) else: raise ValueError("Unknown dataset type.") train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True) val_loader = DataLoader(val_set, batch_size=len(val_set)) return train_loader, val_loader
def visualize_segmentation(tp, data, true_cp, pred_cp, model): """Visualize segmentation results. Plots reconstructed trajectory against input data points. True changepoints are visualized against changepoints predicted via PELT. TODO: Add feature to show more than one reconstructed trajectory. Args: tp (np.ndarray): Observation timepoints of data. data (np.ndarray): Input data. true_cp (np.ndarray): Location of true changepoints. pred_cp (np.ndarray): Location of predicted changepoints. model (nn.Module): PyTorch Latent NODE model. """ pred_cp = np.concatenate(([0], pred_cp, [len(data)])).astype(int) segment = [] for i in range(len(pred_cp) - 1): data_tt = gpu(data[pred_cp[i]:pred_cp[i + 1]]).reshape(1, -1, 1) tp_tt = gpu(tp[pred_cp[i]:pred_cp[i + 1]]) segment_x = asnp(model.get_prediction(data_tt, tp_tt)).flatten() segment.append(segment_x) traj_x = np.concatenate(segment, 0) plt.scatter(tp, data) plt.plot(tp, traj_x) for cp in true_cp: plt.axvline(x=tp[cp], c='royalblue', lw='4') for cp in pred_cp[1:-1]: plt.axvline(x=tp[cp], c='orangered', ls='--', lw='2') plt.legend([ plt.Line2D([0], [0], c='royalblue', lw=4), plt.Line2D([0], [0], c='orangered', ls='--', lw=2) ], ['True CP', 'Predicted CP']) plt.show()
def _get_predictions(model): model._net.eval() sequences = model.test_sequence.sequences users = np.arange(model._num_users) n_train = sequences.shape[0] indices = np.arange(n_train) # convert from numpy to PyTorch tensor all_item_ids = np.arange(model._num_items).reshape(1, -1) all_item_ids = torch.from_numpy(all_item_ids.astype(np.int64)).clone() all_item_var = Variable(gpu(all_item_ids, model._use_cuda)) sequences_tensor = gpu(torch.from_numpy(sequences), model._use_cuda) user_tensor = gpu(torch.from_numpy(users), model._use_cuda) top = 1000 all_predictions = np.zeros((model._num_users, top), dtype=np.int64) for (batch_indices, batch_sequence, batch_user) in minibatch(indices, sequences_tensor, user_tensor, batch_size=256): batch_size = batch_user.shape[0] sequence_var = Variable(batch_sequence) user_var = Variable(batch_user) batch_all_item_var = all_item_var.repeat(batch_size, 1) prediction = model._net(sequence_var, user_var, batch_all_item_var) _, tops = prediction.topk(top, dim=1) all_predictions[batch_indices, :] = cpu(tops.data).numpy() return all_predictions
def visualize_time(n_samp, freq, amp, phase, stops, model): """Visualize model performance on trajectories of various lengths. Args: n_samp (int): Number of samples per trajectory. freq (float): Frequency of sine waves. amp (float): Amplitude of sine waves. phase (float): Phase of sine waves. stop (list of float): All end timepoint of sine waves to evaluate. model (nn.Module): PyTorch model to evaluate. """ data = np.zeros((len(stops), n_samp)) for i in range(len(stops)): t, d = generate_sine_linear(n_samp, freq, amp, phase, stops[i]) data[i] = d out = asnp(model.get_prediction(gpu(data).unsqueeze(2), gpu(t))) titles = ["Stop = {}".format(stop) for stop in stops] visualize_grid(data, out, t, titles)
def visualize_frequency(n_samp, freqs, amp, phase, stop, model): """Visualize model performance on different sine wave frequency settings. Args: n_samp (int): Number of samples per trajectory. freqs (list of float): Frequencies of sine waves to evaluate. amp (float): Amplitude of sine waves. phase (float): Phase of sine waves. stop (float): End timepoint of sine waves. model (nn.Module): PyTorch model to evaluate. """ data = np.zeros((len(freqs), n_samp)) for i in range(len(freqs)): t, d = generate_sine_linear(n_samp, freqs[i], amp, phase, stop) data[i] = d out = asnp(model.get_prediction(gpu(data).unsqueeze(2), gpu(t))) titles = ["Frequency = {}".format(freq) for freq in freqs] visualize_grid(data, out, t, titles)
def visualize_phase(n_samp, freq, amp, phases, stop, model): """Visualize model performance on trajectories of various phases. Args: n_samps (list of int): All numbers of trajectory samples to evaluate. freq (float): Frequency of sine waves. amp (float): Amplitude of sine waves. phase (float): Phase of sine waves. stop (float): End timepoint of sine waves. model (nn.Module): PyTorch model to evaluate. """ data = np.zeros((len(phases), n_samp)) for i in range(len(phases)): t, d = generate_sine_linear(n_samp, freq, amp, phases[i], stop) data[i] = d out = asnp(model.get_prediction(gpu(data).unsqueeze(2), gpu(t))) titles = ["Phase = {}".format(phase) for phase in phases] visualize_grid(data, out, t, titles)
def visualize_trajectory(data, ts, model, ax=plt.gca()): """Visualize trajectory reconstructions by Latent NODE model. Given an input of ground truth data, visualizes reconstructed trajectory made by latent model. Ground truth trajectories are solid, while predictions are dashed. Data should be in shape of BxLxD where: B = number of trajectories L = length of time series D = input features Args: data (np.ndarray): Input data to visualize. ts (np.ndarray): Timepoints of observation for data points. model (nn.Module): PyTorch model to evaluate. ax (matplotlib.axes.Axes): Matplotlib axes to plot results. """ out = asnp(model.get_prediction(gpu(data), gpu(ts))) for i in range(len(data)): ax.plot(ts, data[i], c='red', alpha=0.8) ax.plot(ts, out[i].squeeze(), c='orange', alpha=0.9, linestyle='--')
def _get_negative_prediction(self, minibatch): negative_items = sample_items( self._num_items, len(minibatch), random_state=self._random_state) negative_var = gpu(torch.from_numpy(negative_items), self._use_cuda) print() negative_prediction = self._net(minibatch.user_ids, negative_var, minibatch.user_features, minibatch.context_features, minibatch.item_features) return negative_prediction
def modify(config, data_loader, save): t = time.time() store = data_loader.dataset.split for i, data in enumerate(data_loader): data = [dict(x) for x in data] out = [] for j in range(len(data)): out.append(preprocess(to_long(gpu(data[j])), config['cross_dist'])) for j, graph in enumerate(out): idx = graph['idx'] store[idx]['graph']['left'] = graph['left'] store[idx]['graph']['right'] = graph['right'] if (i + 1) % 100 == 0: print((i + 1) * config['batch_size'], time.time() - t) t = time.time() f = open(os.path.join(root_path, 'preprocess', save), 'wb') pickle.dump(store, f, protocol=pickle.HIGHEST_PROTOCOL) f.close()
# 将 y 统一为矩阵的形式 X, y = X[:100], y[:100, np.newaxis] # 分割数据集 train_X, test_X, train_y, test_y = train_test_split(X, y, test_size=0.3, random_state=5) # 使用 OrderedDict 构建模型 model = nn.Sequential( OrderedDict([('linear1', nn.Linear(4, 1)), ('activation', nn.Sigmoid())])) # GPU 支持 model1, train_X1, train_y1 = gpu(model, train_X, train_y) _, test_X1, test_y1 = gpu(model, test_X, test_y) # 损失函数,优化器 criterion = nn.BCELoss() optimizer = optim.ASGD(model1.parameters(), lr=0.01) # train, 不使用 mini-batch for epoch in range(500): optimizer.zero_grad() y_prediction = model1(train_X1) loss = criterion(y_prediction, train_y1) loss.backward() optimizer.step() print('train epoch {} loss {}'.format(epoch + 1, loss.data[0]))
def _initialize(self, interactions): (self._num_users, self._num_items) = (interactions.num_users, interactions.num_items) if self._representation=='mixture': latent_net = MixtureNet(self._num_users, self._num_items, self._embedding_dim, num_components=self._num_components) elif self._representation=='nonlinear_mixture': latent_net = NonlinearMixtureNet(self._num_users, self._num_items, self._embedding_dim) elif self._representation=='embedding_mixture': latent_net = EmbeddingMixtureNet(self._num_users, self._num_items, self._embedding_dim) else: latent_net = BilinearNet(self._num_users, self._num_items, self._embedding_dim, sparse=self._sparse) if interactions.num_user_features(): user_net = FeatureNet(interactions.num_user_features(), self._embedding_dim) else: user_net = None if interactions.num_context_features(): context_net = FeatureNet(interactions.num_context_features(), self._embedding_dim) else: context_net = None if interactions.num_item_features(): item_net = FeatureNet(interactions.num_item_features(), self._embedding_dim) else: item_net = None self._net = gpu(HybridContainer(latent_net, user_net, context_net, item_net), self._use_cuda) if self._optimizer_func is None: self._optimizer = optim.Adam( self._net.parameters(), weight_decay=self._l2, lr=self._learning_rate ) else: self._optimizer = self._optimizer_func(self._net.parameters()) if self._loss == 'pointwise': self._loss_func = pointwise_loss elif self._loss == 'bpr': self._loss_func = bpr_loss elif self._loss == 'hinge': self._loss_func = hinge_loss else: self._loss_func = adaptive_hinge_loss