def train(): for epoch in range(epochs): running_loss = 0 for ind in tqdm.tqdm(range(steps_per_epoch)): batch_input_path_list = train_input_path_list[batch_size * ind:batch_size * (ind + 1)] batch_label_path_list = train_label_path_list[batch_size * ind:batch_size * (ind + 1)] # batch_input = tensorize_image(batch_input_path_list, input_shape, cuda, epoch%2==0 if augmentation else False) batch_input = tensorize_image(batch_input_path_list, input_shape, cuda, augmentation) batch_label = tensorize_mask(batch_label_path_list, input_shape, n_classes, cuda) optimizer.zero_grad() outputs = model(batch_input) loss = criterion(outputs, batch_label) loss.backward() optimizer.step() running_loss += loss.item() if ind == steps_per_epoch - 1: run_loss_list.append(running_loss) str1 = 'training loss on epoch {}: {}'.format( epoch, running_loss) val_loss = 0 for (valid_input_path, valid_label_path) in zip(valid_input_path_list, valid_label_path_list): batch_input = tensorize_image([valid_input_path], input_shape, cuda) batch_label = tensorize_mask([valid_label_path], input_shape, n_classes, cuda) outputs = model(batch_input) loss = criterion(outputs, batch_label) loss.backward() val_loss += loss.item() val_loss_list.append(val_loss) str2 = 'validation loss on epoch {}: {}'.format( epoch, val_loss) print(str1) print(str2) norm_run_loss_list = norm(run_loss_list) norm_val_loss_list = norm(val_loss_list) graph_name = predict_save_file_name draw_loss_graph(epochs, norm_run_loss_list, norm_val_loss_list, graph_name)
def test(model, images): predict_mask_list = list() for image in tqdm.tqdm(images): batch_input = tensorize_image([image], input_shape, cuda) output = model(batch_input) label = output > 0.5 decoded_list = decode_and_convert_image(label, n_class=2) mask = decoded_list[0] predict_mask_list.append(mask) write_mask_on_image2(predict_mask_list, images, input_shape, predict_save_file_name)
def test(_model: UNet): if test_predict: torch.cuda.empty_cache() predict_mask_list = [] for test_input_path, test_label_path in tqdm.tqdm( zip(test_input_path_list, test_label_path_list)): batch_input = tensorize_image([test_input_path], input_shape, cuda) # batch_label = tensorize_mask([test_label_path], input_shape, n_classes, cuda) outputs = _model(batch_input) label = outputs > 0.5 decoded_list = decode_and_convert_image(label, n_class=2) mask = decoded_list[0] predict_mask_list.append(mask) write_mask_on_image2(predict_mask_list, test_input_path_list, input_shape, predict_save_file_name)
def predict(test_input_path_list): for i in tqdm.tqdm(range(len(test_input_path_list))): batch_test = test_input_path_list[i:i + 1] test_input = tensorize_image(batch_test, input_shape, cuda) outs = model(test_input) out = torch.argmax(outs, axis=1) out_cpu = out.cpu() outputs_list = out_cpu.detach().numpy() mask = np.squeeze(outputs_list, axis=0) img = cv2.imread(batch_test[0]) mg = cv2.resize(img, (224, 224)) mask_ind = mask == 1 cpy_img = mg.copy() mg[mask == 1, :] = (255, 0, 125) opac_image = (mg / 2 + cpy_img / 2).astype(np.uint8) predict_name = batch_test[0] predict_path = predict_name.replace('image', 'predict') cv2.imwrite(predict_path, opac_image.astype(np.uint8))
# IF CUDA IS USED, IMPORT THE MODEL INTO CUDA if cuda: model = model.cuda() # TRAINING THE NEURAL NETWORK for epoch in range(epochs): running_loss = 0 for ind in range(steps_per_epoch): batch_input_path_list = test_input_path_list[batch_size * ind:batch_size * (ind + 1)] batch_label_path_list = test_label_path_list[batch_size * ind:batch_size * (ind + 1)] batch_input = tensorize_image(batch_input_path_list, input_shape, cuda) batch_label = tensorize_mask(batch_label_path_list, input_shape, n_classes, cuda) optimizer.zero_grad() outputs = model(batch_input) loss = criterion(outputs, batch_label) loss.backward() optimizer.step() running_loss += loss.item() print(ind) if ind == steps_per_epoch - 1: print('training loss on epoch {}: {}'.format(epoch, running_loss)) val_loss = 0
zipped_list = list(unzipped_object) train_input_path_list = list(zipped_list[0]) train_label_path_list = list(zipped_list[1]) for ind in range(steps_per_epoch): batch_input_path_list = train_input_path_list[batch_size * ind:batch_size * (ind + 1)] #ilk girişte train_input_path_list[0:4] ilk 4 elemanı alır #ikinci döngüde train_input_list[4:8] ikinci 4 elemanı alır #her seferinde batch_size kadar eleman ilerler batch_label_path_list = train_label_path_list[batch_size * ind:batch_size * (ind + 1)] batch_input = tensorize_image( batch_input_path_list, input_shape, cuda) #fonksiyonlar parametreleri girilerek değişkene atandı batch_label = tensorize_mask(batch_label_path_list, input_shape, n_classes, cuda) #preprocess kısmındaki modele sokucamız verilerimiz parametreler girilerek hazırlandı optimizer.zero_grad( ) #gradyanı sıfırlar yoksa her yinelemede birikme oluşur # Weights güncelledikten sonra gradientleri manuel olarak sıfırlayın outputs = model( batch_input ) # modele batch_inputu parametre olarak verip oluşan çıktıyı değişkene atadık # Forward passes the input data loss = criterion(