best_accuracy = 0.0 best_loss = 100 loss_cls = nn.CrossEntropyLoss() loss_offset = nn.MSELoss() loss_landmark = nn.MSELoss() num_epochs = 16 for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-' * 10) # Each epoch has a training and validation phase for phase in ['train', 'val']: if phase == 'train': model.train() # set model to training mode else: model.eval() # set model to evaluate mode running_loss, running_loss_cls, running_loss_offset, running_loss_landmark = 0.0, 0.0, 0.0, 0.0 running_correct = 0.0 running_gt = 0.0 # iterate over data for i_batch, sample_batched in enumerate(tqdm(dataloaders[phase])): input_images, gt_label, gt_offset, landmark_offset = sample_batched[ 'input_img'], sample_batched['label'], sample_batched[ 'bbox_target'], sample_batched['landmark'] input_images = input_images.to(device) gt_label = gt_label.to(device)
del model.conv4_2 model.conv4_1 = new_conv4_1 model.conv4_2 = new_conv4_2 return model if __name__ == '__main__': import sys sys.path.append("../Base_Model") from MTCNN_nets import PNet, RNet, ONet model = ONet() model.train() layer_index = 9 filter_index = (2,4) model = prune_mtcnn(model, layer_index, *filter_index, use_cuda=False) print(model)