def data_build(self): if isinstance(self.x_raw, pd.DataFrame): self.x_tensor = utils.get_tensor(self.x_raw) elif isinstance(self.x_raw, torch.Tensor): self.x_tensor = self.x_raw else: self.logger.dbg('type x_raw', type(self.x_raw)) self.x = Variable(self.x_tensor, requires_grad=False) self.D_in = len(self.x_tensor[0]) self.y_tensor = utils.get_tensor(self.y_raw, 'long') self.y = Variable(self.y_tensor, requires_grad=False).squeeze(1) self.D_out = len(self.y_tensor[0])
def processImage(infile, args): n_classes = 12 model = LinkNet(n_classes) model.load_state_dict(torch.load(args.model_path)) if torch.cuda.is_available(): model = model.cuda(0) model.eval() gif = cv2.VideoCapture(infile) cv2.namedWindow('camvid') while (gif.isOpened()): ret, frame = gif.read() frame = cv2.resize(frame, (768, 576)) images = get_tensor(frame) if torch.cuda.is_available(): images = Variable(images.cuda(0)) else: images = Variable(images) outputs = model(images) pred = outputs.data.max(1)[1].cpu().numpy().reshape(576, 768) pred = decode_segmap(pred) vis = np.zeros((576, 1536, 3), np.uint8) vis[:576, :768, :3] = frame vis[:576, 768:1536, :3] = pred cv2.imshow('camvid', vis) cv2.waitKey(10)
def _pred(self, x_df, y_df=None, data='confusion_matrix', format='tensor'): assert isinstance(x_df, pd.DataFrame) if y_df is not None: assert isinstance(y_df, pd.DataFrame) x_tensor = utils.get_tensor(x_df) y_tensor = self.df_to_tensor(y_df) y_pred_prob = self.model(Variable(x_tensor)) # y_pred = x_tensor.mm(w1).clamp(min=0).mm(w2) if data == 'loss': loss = self.loss_fn(y_pred_prob, Variable(y_tensor)) result = loss.data[0] return result elif data == 'pred': data_tensor = y_pred_prob.data else: # data == 'confusion_matrix': y_pred, total_type_num = utils.max_ix(y_pred_prob.data) # total_type_num = len(self.ix_to_label) data_tensor = utils.confusion_matrix(y_pred, y_tensor, total_type_num) if format == 'df': result = pd.DataFrame(data_tensor.numpy()) elif format == 'np': result = data_tensor.numpy() else: # format == 'tensor': result = data_tensor return result
def model_build(self): if isinstance(self.x_raw, pd.DataFrame): self.x_tensor = utils.get_tensor(self.x_raw) self.logger.dbg('type xtensor', type(self.x_tensor)) elif isinstance(self.x_raw, torch.Tensor): self.x_tensor = self.x_raw self.x = Variable(self.x_tensor, requires_grad=False) self.D_in = len(self.x_tensor[0]) self.y_tensor = utils.get_tensor(self.y_raw, 'long') self.y = Variable(self.y_tensor, requires_grad=False).squeeze(1) self.D_out = len(self.y_tensor[0]) self.T_out, _ = np.unique(self.y_tensor.numpy(), return_inverse=True) self.model = OneTargetClassifier(self.D_in, len(self.T_out)) self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.lr) self.loss_fn = torch.nn.NLLLoss()
def model_build(self): if isinstance(self.x_raw, pd.DataFrame): self.x_tensor = utils.get_tensor(self.x_raw) self.logger.dbg('type xtensor', type(self.x_tensor)) elif isinstance(self.x_raw, torch.Tensor): self.x_tensor = self.x_raw self.x = Variable(self.x_tensor, requires_grad=False) self.D_in = len(self.x_tensor[0]) self.y_tensor = utils.get_tensor(self.y_raw) self.y = Variable(self.y_tensor, requires_grad=False) self.D_out = len(self.y_tensor[0]) self.model = torch.nn.Sequential( torch.nn.Linear(self.D_in, self.H), torch.nn.ReLU(), torch.nn.Linear(self.H, self.D_out), ) self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr) self.loss_fn = torch.nn.MSELoss(size_average=False)
def df_to_tensor(self, df): return utils.get_tensor(df, 'long').squeeze(1)
) # Block 4 weights # Show learnt Conv2D kernels # utils.plot_kernels(weights_l1, 4, 2, 'Encoder Block 1 Kernels') # utils.plot_kernels(weights_l2, 4, 4, 'Encoder Block 2 Kernels') # utils.plot_kernels(weights_l2, 6, 5, 'Encoder Block 3 Kernels') # utils.plot_kernels(weights_l2, 8, 8, 'Encoder Block 4 Kernels') # %% View the reconstruction performance with random test sample # Select randomly one sample idx = test_part[np.random.randint(len(test_part))] # Extracts data from the sample and transform them into tensors waterfalls = utils.get_tensor(dataset[idx]['Waterfalls'], float_cast=True, unsqueeze=2).cpu() signals = utils.get_tensor(dataset[idx]['SignalWaterfalls'], float_cast=True, unsqueeze=2).cpu() # Extract parameters from the sample parameters = dataset[idx]['Parameters'] # Use the waterfalls sample to evaluate the model net.eval() with torch.no_grad(): output = net.forward(waterfalls, depth=6) # Plot the waterfalls, the output of the network and the waterfalls without the noise utils.plot_reconstruction(waterfalls,
def df_to_tensor(self, df): return utils.get_tensor(df)