def get_parameters(self): models = {1: Unet()} losses = { 1: Pixelwise_weighted_loss().compute_loss, 2: Dice_loss().compute_loss } optimizers = {1: tf.train.GradientDescentOptimizer} prompt_msg = { "model": "Enter the model to execute. Enter 1 for Unet", "loss": "Enter the loss function. Enter 1 for Weighted loss; 2 for Dice Loss", "optimizer": "Enter the optimizer. Enter 1 for SGD Optimizer", "learning_rate": "Enter the learning rate between 0 and 1", "weights_initializer": "Enter the weight initialization method. Enter 1 for Guassian-paper; 2 for xavier" } model_opt = int(input(prompt_msg['model'])) if model_opt not in models: raise Exception("Please enter valid model name") else: model = models[model_opt] loss_opt = int(input(prompt_msg['loss'])) if loss_opt not in losses: raise Exception("Please enter the valid loss function") else: loss = losses[loss_opt] model.loss = loss optimizer_opt = int(input(prompt_msg['optimizer'])) if optimizer_opt not in optimizers: raise Exception("Please enter the valid optimizer") else: optimizer = optimizers[optimizer_opt] learning_rate_opt = float(input(prompt_msg['learning_rate'])) if learning_rate_opt > 0.0 and learning_rate_opt < 1.0: lr = tf.train.exponential_decay(learning_rate_opt, tf.Variable(0, trainable=False), 10, 0.8, staircase=True) model.optimizer = optimizer(lr) else: raise Exception("Please enter valid learning rate") #int(input(prompt_msg['weights_initializer'])) return model
def build_model(size=256,net='U2netS'): if net=='U2netS': model = U2netS(input_shape=(size, size, 3), drop_rate=0.0) elif net=='U2netM': model = U2netM(input_shape=(size, size, 3)) elif net=='U2net': model = U2net(input_shape=(size, size, 3)) elif net=='Unet3': model = Unet3(input_shape=(size, size, 3)) elif net=='Unet': model = Unet(input_shape=(size, size, 3)) elif net=='UEfficientNetB4': model = UEfficientNetB4(input_shape=(size, size, 3),imagenet_weights='saved/efficientnet-b4_imagenet_1000_notop.h5') else: print(' not support your net .') sys.exit() return model
from tensorflow.keras.callbacks import TensorBoard if __name__ == "__main__": # Hyperparameters log_dir = "logs/" inputs_size = [512, 512, 3] lr = 1e-3 Init_Epoch = 0 Freeze_Epoch = 500 Batch_size = 4 num_classes = 2 dice_loss = True # Model model = Unet(inputs_size, num_classes) model.summary() # Data with open(r"./Medical_Datasets/ImageSets/Segmentation/train.txt", "r") as f: train_lines = f.readlines() print('Train on {} samples, with batch size {}.'.format( len(train_lines), Batch_size)) # Callbacks checkpoint_period = ModelCheckpoint( log_dir + 'ep{epoch:03d}-loss{loss:.3f}-f{_f_score:.3f}-IOU{_Iou_score:.3f}.h5', monitor='loss', save_weights_only=False,
def train(epo_num=50, show_vgg_params=False): device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # vgg_model = VGGNet(requires_grad=True, show_params=show_vgg_params) # model = FCN16s(pretrained_net=vgg_model, n_class=2) model = Unet(n_class=2) model = model.to(device) criterion = nn.BCELoss().to(device) optimizer = optim.SGD(model.parameters(), lr=1e-2, momentum=0.7) all_train_iter_loss = [] all_test_iter_loss = [] # start timing prev_time = datetime.now() for epoch in range(1, epo_num + 1): train_loss = 0 model.train() for index, (bag, bag_msk) in enumerate(train_dataloader): bag = bag.to(device) bag_msk = bag_msk.to(device) optimizer.zero_grad() output = model(bag) output = torch.sigmoid( output) # output.shape is torch.Size([4, 2, 160, 160]) loss = criterion(output, bag_msk) loss.backward() iter_loss = loss.item() all_train_iter_loss.append(iter_loss) train_loss += iter_loss optimizer.step() test_loss = 0 model.eval() with torch.no_grad(): for index, (bag, bag_msk) in enumerate(test_dataloader): bag = bag.to(device) bag_msk = bag_msk.to(device) optimizer.zero_grad() output = model(bag) output = torch.sigmoid( output) # output.shape is torch.Size([4, 2, 160, 160]) loss = criterion(output, bag_msk) iter_loss = loss.item() all_test_iter_loss.append(iter_loss) test_loss += iter_loss cur_time = datetime.now() h, remainder = divmod((cur_time - prev_time).seconds, 3600) m, s = divmod(remainder, 60) time_str = "Time %02d:%02d:%02d" % (h, m, s) prev_time = cur_time print('epoch:{} train loss = {}, epoch test loss = {}, {}'.format( epoch, train_loss / len(train_dataloader), test_loss / len(test_dataloader), time_str)) if epoch == epo_num: torch.save(model, 'checkpoints/model_u.pth') print('saveing checkpoints/model_u.pth')
input = torch.randn(*input_size).cuda() # warming for _ in range(10): out = model(input) num_iters = 10 ts = [] torch.cuda.synchronize() for _ in range(num_iters): start = torch.cuda.Event(enable_timing=True) end = torch.cuda.Event(enable_timing=True) start.record() out = model(input) end.record() torch.cuda.synchronize() t = start.elapsed_time(end) ts.append(t) print('{:.1f} ms'.format(np.mean(ts))) if __name__ == '__main__': net = Unet(65) input_size = (1, 3, 1024, 2048) measure_inference(net, input_size)
masks_data_dir=masks_data_dir, train_batch_size=16, val_batch_size=16, crop_size=(224, 224), nClasses=12, train_val_split_ratio=0.85) steps_per_epoch = dataGen.n_train_file // dataGen.train_batch_size validation_steps = dataGen.n_val_file // dataGen.val_batch_size train_generator = dataGen.train_generator_data() validation_generator = dataGen.val_generator_data() tensorboard = TensorBoard( log_dir='./logs/dataset1/Unet-dataset1-original-{}'.format( time.strftime('%Y-%m-%d_%H_%M_%S', time.localtime()))) model = Unet(input_size=(224, 224, 3), num_class=12) model.summary() sgd = optimizers.SGD(lr=1E-2, decay=5**(-4), momentum=0.9, nesterov=True) model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) best_weights_filepath = './models/Unet-best_weights.hdf5' earlyStopping = EarlyStopping(monitor='val_loss', patience=15, verbose=1, mode='auto') saveBestModel = ModelCheckpoint(best_weights_filepath, monitor='val_loss', verbose=1, save_best_only=True,
from train import train from validate import validate H = 224 W = 224 D = 3 ## Local Variables epochs = 5 batch_size = 2 lr = 0.01 epoch_save = list(range(epochs)[::5]) transform = [transforms.Resize((H,W)), transforms.ToTensor()] ## Model Loading model = Unet((3,H,W)) net = model.cuda() optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9, weight_decay=0.0005) criterion = nn.BCEWithLogitsLoss().cuda() ## Creating Save Directories curr_dir = os.getcwd() out_dir = os.path.join(os.getcwd(),'results') os.makedirs(out_dir, exist_ok=True) os.makedirs(out_dir+'/backup', exist_ok=True) os.makedirs(out_dir+'/checkpoint', exist_ok=True) os.makedirs(out_dir+'/snap', exist_ok=True) os.makedirs(out_dir+'/train', exist_ok=True) os.makedirs(out_dir+'/valid', exist_ok=True)