import copy import torch.nn as nn def weights_init(m): if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight.data) nn.init.constant_(m.bias, 0.1) train_path = '../data_preprocessing/anno_store/imglist_anno_24.txt' val_path = '../data_preprocessing/anno_store/imglist_anno_24_val.txt' batch_size = 32 dataloaders = { 'train': torch.utils.data.DataLoader(ListDataset(train_path), batch_size=batch_size, shuffle=True), 'val': torch.utils.data.DataLoader(ListDataset(val_path), batch_size=batch_size, shuffle=True) } dataset_sizes = { 'train': len(ListDataset(train_path)), 'val': len(ListDataset(val_path)) } device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') # load the model and weights for initialization
from Data_Loading import ListDataset from MTCNN_nets import RNet import time import copy import torch.nn as nn from tqdm import tqdm def weights_init(m): if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight.data) nn.init.constant_(m.bias, 0.1) train_path = '../data_preprocessing/anno_store/imglist_anno_24.txt' val_path = '../data_preprocessing/anno_store/imglist_anno_24.txt' batch_size = 128 dataloaders = {'train': torch.utils.data.DataLoader(ListDataset(train_path), batch_size=batch_size, shuffle=True), 'val': torch.utils.data.DataLoader(ListDataset(val_path), batch_size=batch_size, shuffle=True)} dataset_sizes = {'train': len(ListDataset(train_path)), 'val': len(ListDataset(val_path))} device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') # load the model and weights for initialization model = RNet(is_train=True).to(device) model.apply(weights_init) optimizer = torch.optim.Adam(model.parameters()) since = time.time() best_model_wts = copy.deepcopy(model.state_dict()) best_accuracy = 0.0 best_loss = 100
from torch.utils.data import Dataset from Data_Loading import ListDataset from model.MTCNN_nets import PNet import time import copy import torch.nn as nn def weights_init(m): if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight.data) nn.init.constant_(m.bias, 0.1) train_path = '../data_preprocessing/anno_store/imglist_anno_12_train.txt' val_path = '../data_preprocessing/anno_store/imglist_anno_12_val.txt' batch_size = 64 dataloaders = {'train': torch.utils.data.DataLoader(ListDataset(train_path), batch_size=batch_size, shuffle=True), 'val': torch.utils.data.DataLoader(ListDataset(val_path), batch_size=batch_size, shuffle=True)} dataset_sizes = {'train': len(ListDataset(train_path)), 'val': len(ListDataset(val_path))} print('training dataset loaded with length : {}'.format(len(ListDataset(train_path)))) print('validation dataset loaded with length : {}'.format(len(ListDataset(val_path)))) device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') print(device) # load the model and weights for initialization model = PNet(is_train=True).to(device) model.apply(weights_init) print("Pnet loaded") train_logging_file = 'Pnet_train_logging.txt' optimizer = torch.optim.Adam(model.parameters())