def init_weights(self, pretrained=''): if os.path.isfile(pretrained): logger.info('=> init deconv weights from normal distribution') for name, m in self.deconv_layers.named_modules(): if isinstance(m, nn.ConvTranspose2d): logger.info( '=> init {}.weight as normal(0, 0.001)'.format(name)) logger.info('=> init {}.bias as 0'.format(name)) nn.init.normal_(m.weight, std=0.001) if self.deconv_with_bias: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): logger.info('=> init {}.weight as 1'.format(name)) logger.info('=> init {}.bias as 0'.format(name)) nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) logger.info('=> init final conv weights from normal distribution') for m in self.final_layer.modules(): if isinstance(m, nn.Conv2d): # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') logger.info( '=> init {}.weight as normal(0, 0.001)'.format(name)) logger.info('=> init {}.bias as 0'.format(name)) nn.init.normal_(m.weight, std=0.001) nn.init.constant_(m.bias, 0) pretrained_state_dict = torch.load(pretrained) logger.info('=> loading pretrained model {}'.format(pretrained)) self.load_state_dict(pretrained_state_dict, strict=False) else: logger.error('=> imagenet pretrained model dose not exist') logger.error('=> please download it first') raise ValueError('imagenet pretrained model does not exist')
def test_by_seq(self, model, test_loader: DataLoader): model.eval() corrects = [] sequence_labels: Dict[int, int] = {} sequence_results: Dict[int, List[int]] = {} label_count: Dict[int, int] = {} for i, data in enumerate(test_loader): x = Variable(data["x"]).to(device) y = data["y"].numpy()[0] seq = data["seq"].numpy()[0] outputs = model(x).data.cpu().numpy()[0] predictions = np.argmax(outputs) if seq not in sequence_results: sequence_results[seq] = [] sequence_labels[seq] = y if y not in label_count: label_count[y] = 0 label_count[y] += 1 sequence_results[seq].append(predictions) corrects_per_label: Dict[int, List[int]] = {} for sequence_id, predictions in sequence_results.items(): prediction = max(set(predictions), key=predictions.count) label = sequence_labels[sequence_id] correct = prediction == label if label not in corrects_per_label: corrects_per_label[label] = [] corrects_per_label[label].append(correct) corrects.append(int(correct)) accuracy = float(sum(corrects)) / float(len(sequence_labels)) logger.error("Test set accuracy: {} [Num: Test Sequences: {}]".format( accuracy, len(sequence_labels))) # for label, corrects in corrects_per_label.items(): # accuracy = sum(corrects) / label_count[label] # logger.error("Label accuracy: {} [Label: {}, Num_Tests: {}]".format(accuracy, label, label_count[label])) return accuracy
def test(self, test_loader: DataLoader, weights_path: str, model): logger.error("Test model: {}".format(weights_path)) state_dict = torch.load(weights_path) model.load_state_dict(state_dict) model.to(device) ehpi_results = self.test_ehpis(model, test_loader) sequence_results = self.test_sequences(model, test_loader) return ehpi_results, sequence_results
def test(self, model, test_loader: DataLoader): model.eval() corrects = [] for i, data in enumerate(test_loader): x = Variable(data[0]).to(device) y = Variable(torch.tensor(data[1], dtype=torch.long)).to(device) outputs = model(x).data.cpu().numpy()[0] predictions = np.argmax(outputs) correct = predictions == y corrects.append(int(correct)) accuracy = float(sum(corrects)) / float(len(test_loader)) logger.error("Test set accuracy: {}".format(accuracy)) return accuracy
def __getitem__(self, index): sample = { "x": self.x[index].copy(), "y": self.y_labels[index], "seq": self.y_sequence_ids[index] } if self.transform: try: sample = self.transform(sample) except Exception as err: logger.error( "Error transform. Dataset: {}, index: {}, x_min_max: {}/{}" .format(self.dataset_path, index, self.x[index].min(), self.x[index].max())) logger.error(err) raise err return sample
def __getitem__(self, index): x = self.x[index].copy() sample = {"x": x, "y": self.y[index][0], "seq": self.y[index][1]} if self.transform: try: sample = self.transform(sample) x = sample["x"] x = np.transpose(x, (1, 2, 0)) x = np.reshape(x[:, :, 0:2], (x.shape[0], x.shape[1] * (x.shape[2] - 1))) sample["x"] = x except Exception as err: logger.error( "Error transform. Dataset: {}, index: {}, x_min_max: {}/{}" .format(self.dataset_path, index, self.x[index].min(), self.x[index].max())) logger.error(err) raise err return sample
def test_ehpis(self, model, test_loader: DataLoader) -> Tuple[List[int], List[int]]: """ Tests every single testset entry, so e.g. every 32frame EHPI. """ print("Test all...") model.eval() all_ys = [] all_predictions = [] corrects = [] for i, data in enumerate(test_loader): x = Variable(data["x"]).to(device) y = data["y"].numpy()[0] outputs = model(x).data.cpu().numpy()[0] predictions = np.argmax(outputs) all_ys.append(y) all_predictions.append(predictions) correct = predictions == y corrects.append(int(correct)) results = [all_ys, list(all_predictions)] accuracy = float(sum(corrects)) / float(len(test_loader)) logger.error("Test set accuracy: {}".format(accuracy)) return results