コード例 #1
0
ファイル: utils_data.py プロジェクト: youngminpark2559/kaggle
def get_batch_vali_imgs(img_paths):
    all_images = []
    for one_protein_vali in img_paths:

        loaded_b_img_vali = utils_image.load_img(one_protein_vali[0])
        loaded_g_img_vali = utils_image.load_img(one_protein_vali[1])
        loaded_r_img_vali = utils_image.load_img(one_protein_vali[2])
        loaded_y_img_vali = utils_image.load_img(one_protein_vali[3])
        # print("loaded_b_img_vali",loaded_b_img_vali.shape)
        # (512, 512)

        loaded_b_img_vali = utils_image.resize_img(loaded_b_img_vali, 224, 224)
        loaded_g_img_vali = utils_image.resize_img(loaded_g_img_vali, 224, 224)
        loaded_r_img_vali = utils_image.resize_img(loaded_r_img_vali, 224, 224)
        loaded_y_img_vali = utils_image.resize_img(loaded_y_img_vali, 224, 224)
        # print("loaded_b_img_vali",loaded_b_img_vali.shape)
        # (224, 224)

        all_images.append([
            loaded_b_img_vali, loaded_g_img_vali, loaded_r_img_vali,
            loaded_y_img_vali
        ])

    all_images_vali_stacked = np.stack(all_images, axis=0)
    # print("all_images_vali_stacked",all_images_vali_stacked.shape)
    # (2, 4, 224, 224)

    return all_images_vali_stacked
コード例 #2
0
def generate_dense_dataset(dense_d_bs_pa, img_h, img_w):
    dense_ori_ph = []
    dense_ref_ph = []

    for one_img in range(len(dense_d_bs_pa)):
        # c o_dense_ori_img_p: one dense train image path
        o_dense_ori_img_p = dense_d_bs_pa[one_img][0].strip()
        # print("o_dense_ori_img_p",o_dense_ori_img_p)
        # c o_dense_r_gt_img_p: one dense gt image path
        o_dense_r_gt_img_p = dense_d_bs_pa[one_img][1].strip()
        # print("o_dense_r_gt_img_p",o_dense_r_gt_img_p)

        # --------------------------------------------------------------------------------
        # c ol_dense_tr_i: one loaded dense train image
        ol_dense_tr_i = utils_image.load_img(o_dense_ori_img_p) / 255.0
        # ol_dense_tr_i=(ol_dense_tr_i-np.mean(ol_dense_tr_i))/(np.std(ol_dense_tr_i)+0.000001)
        # ol_dense_tr_i=(ol_dense_tr_i-np.min(ol_dense_tr_i))/np.ptp(ol_dense_tr_i)
        # ol_dense_tr_i[ol_dense_tr_i==0.0]=0.0001
        # print("ol_dense_tr_i",ol_dense_tr_i)
        # plt.imshow(ol_dense_tr_i)
        # plt.show()

        ol_dense_R_gt_i = utils_image.load_img(o_dense_r_gt_img_p) / 255.0
        # c ol_iiw_tr_i: one loaded iiw train image

        # --------------------------------------------------------------------------------
        # c ol_r_dense_tr_i: one loaded resized dense train image
        ol_r_dense_tr_i = utils_image.resize_img(ol_dense_tr_i, img_h, img_w)
        ol_r_dense_R_gt_i = utils_image.resize_img(ol_dense_R_gt_i, img_h,
                                                   img_w)

        dense_ori_ph.append(ol_r_dense_tr_i)
        dense_ref_ph.append(ol_r_dense_R_gt_i)

    dense_ori_imgs = np.stack(dense_ori_ph, axis=0).transpose(0, 3, 1, 2)
    dense_ref_imgs = np.stack(dense_ref_ph, axis=0).transpose(0, 3, 1, 2)

    return dense_ori_imgs, dense_ref_imgs
コード例 #3
0
def initialize_grad_cam(model, list_of_img_paths, args):

    # @ Get your trained network
    # print("model",model)

    # ================================================================================
    # @ Deep copy your trained network having FC
    model_with_fc = copy.deepcopy(model)

    # model = models.resnet50(pretrained=True)
    # del model.fc
    # print(model)

    # del model_with_fc.fc
    # del list(model_with_fc.layer4.children())[-1].cbam

    # ================================================================================
    # @ Delete last fully connected layer
    del model.fc
    # del list(model.layer4.children())[-1].cbam
    # print("model",list(model.layer4.children())[-1])
    # print("model",dir(model))
    # print("model",dir(model.layer4))
    # print("model",model)

    # ================================================================================
    grad_cam = GradCam(model, model_with_fc, target_layer_names=["layer1"])

    # ================================================================================
    for img_p in list_of_img_paths:
        loaded_img = utils_image.load_img(img_p) / 255.0
        loaded_img = utils_image.resize(loaded_img, (224, 224))
        loaded_img = loaded_img.astype("float32")

        input = preprocess_image(loaded_img)
        # <class 'torch.Tensor'>

        target_index = None

        mask = grad_cam(input, target_index)
        import scipy.misc
        # scipy.misc.imsave('./mask.png',mask)
        # afaf 1: mask = grad_cam(input, target_index)
        i = 0
        i = i + 1

        show_cam_on_image(loaded_img, mask, i)
コード例 #4
0
def load_skin_cancer_dense(bs_paths_dense,args):
  # print("bs_paths_dense",bs_paths_dense)
  # [['/mnt/1T-5e7/Code_projects/Bio_related/Skin_related/kaggle.com_kmader_skin-cancer-mnist-ham10000/kaggle.com_saketc_skin-lesion-analyser-sla/base_dir/train_dir/bcc/_36_6146895.jpg', 'bcc'], ['/mnt/1T-5e7/Code_projects/Bio_related/Skin_related/kaggle.com_kmader_skin-cancer-mnist-ham10000/kaggle.com_saketc_skin-lesion-analyser-sla/base_dir/train_dir/nv/ISIC_0024755.jpg', 'nv'], ['/mnt/1T-5e7/Code_projects/Bio_related/Skin_related/kaggle.com_kmader_skin-cancer-mnist-ham10000/kaggle.com_saketc_skin-lesion-analyser-sla/base_dir/train_dir/bcc/_4_7667536.jpg', 'bcc']]

  # ================================================================================
  lo_img_and_class_data=[]
  for one_img_and_class_path in bs_paths_dense:
    img_path=one_img_and_class_path[0]
    class_name=one_img_and_class_path[1]

    loaded_img=utils_image.load_img(img_path,gray=False)
    print("loaded_img",loaded_img.shape)
    # (450, 600, 3)

    # ================================================================================
    lo_img_and_class_data.append([loaded_img,class_name])
  print("lo_img_and_class_data",lo_img_and_class_data)
  afaf
コード例 #5
0
def detect_img_less_than_256(txt_file_containing_paths_of_img):
    """
  Act
    * 
  
  Params
    * txt_file_containing_paths_of_img
    "/mnt/1T-5e7/image/whole_dataset/text_for_colab/real/temp/bigtime_trn.txt"
  
  Return
    * have_small_img
    If 0, you have no small image
    * small_lens
    [199,200] means you have images which has 199 or 200 lengths
  """
    p_l_full, num_img = utils_common.return_path_list_from_txt(
        txt_file_containing_paths_of_img)
    # print("num_img",num_img)
    # 8400

    # c col_i_szs: collection of image sizes
    col_i_szs = []
    # c col_i_pa_szs: collection of image paths and sizes
    col_i_pa_szs = []
    for one_path in p_l_full:
        one_path = one_path.replace("\n", "")
        one_lo_img = utils_image.load_img(one_path)

        col_i_szs.append(one_lo_img.shape[:2])
        col_i_pa_szs.append([one_path, one_lo_img.shape[:2]])

    col_i_szs = list(set(col_i_szs))

    col_i_szs_np = np.array(col_i_szs)

    # c have_small_img: you have small images?
    have_small_img = np.sum(col_i_szs_np < 256)
    # print("have_small_img",have_small_img)

    # c small_lens: small length
    small_lens = col_i_szs_np[col_i_szs_np < 256]

    return have_small_img, small_lens
コード例 #6
0
def generate_cgmit_dataset(cgmit_d_bs_pa, img_h, img_w):
    cgmit_tr_3c_img = []
    cgmit_gt_R_3c_img = []
    cgmit_gt_S_1c_img = []
    cgmit_mask_3c_img = []

    for one_img in range(len(cgmit_d_bs_pa)):
        # c one_iiw_tr_img_p: one iiw train image path
        one_cgmit_tr_img_p = cgmit_d_bs_pa[one_img][0].strip()
        # print("one_cgmit_tr_img_p",one_cgmit_tr_img_p)

        # c one_cgmit_r_gt_p: one iiw json gt path
        one_cgmit_r_gt_p = cgmit_d_bs_pa[one_img][1].strip()
        # print("one_cgmit_r_gt_p",one_cgmit_r_gt_p)

        # --------------------------------------------------------------------------------
        ol_cgmit_tr_i = utils_image.load_img(one_cgmit_tr_img_p) / 255.0
        # ol_iiw_tr_i=np.where(ol_iiw_tr_i==0.0,0.0001,ol_dense_tr_i)
        # ol_iiw_tr_i[ol_iiw_tr_i==0.0]=0.0001

        ol_cgmit_r_gt_i = utils_image.load_img(one_cgmit_r_gt_p) / 255.0

        # --------------------------------------------------------------------------------
        ol_cgmit_tr_i = utils_image.resize_img(ol_cgmit_tr_i, img_h, img_w)

        ol_cgmit_r_gt_i = utils_image.resize_img(ol_cgmit_r_gt_i, img_h, img_w)

        # --------------------------------------------------------------------------------
        # c srgb_img: actually rgb original image than srgb original image
        srgb_img = ol_cgmit_tr_i

        gt_R = ol_cgmit_r_gt_i

        mask = np.ones((srgb_img.shape[0], srgb_img.shape[1]))
        # print("mask",mask.shape)
        # mask (341, 512)

        # c gt_R_gray: mean of R gt image
        gt_R_gray = np.mean(gt_R, 2)
        mask[gt_R_gray < 1e-6] = 0
        # mean of original image
        mask[np.mean(srgb_img, 2) < 1e-6] = 0

        # plt.imshow(mask,cmap="gray")
        # plt.show()
        mask = skimage.morphology.binary_erosion(mask, square(11))
        mask = np.expand_dims(mask, axis=2)
        mask = np.repeat(mask, 3, axis=2)
        gt_R[gt_R < 1e-6] = 1e-6

        rgb_img = srgb_img
        gt_S = rgb_img / gt_R
        # plt.imshow(gt_S[:,:,0],cmap='gray')
        # plt.show()

        # search_name=path[:-4]+".rgbe"
        # irridiance=self.stat_dict[search_name]

        # if irridiance<0.25:
        #     srgb_img=denoise_tv_chambolle(srgb_img,weight=0.05,multichannel=True)
        #     gt_S=denoise_tv_chambolle(gt_S,weight=0.1,multichannel=True)

        mask[gt_S > 10] = 0
        gt_S[gt_S > 20] = 20
        mask[gt_S < 1e-4] = 0
        gt_S[gt_S < 1e-4] = 1e-4

        if np.sum(mask) < 10:
            max_S = 1.0
        else:
            max_S = np.percentile(gt_S[mask > 0.5], 90)

        gt_S = gt_S / max_S

        gt_S = np.mean(gt_S, 2)
        gt_S = np.expand_dims(gt_S, axis=2)

        #gt_R=np.mean(gt_R,2)
        gt_R = np.expand_dims(gt_R, axis=2)

        tr_3c_img = srgb_img.squeeze()
        gt_R_3c_img = gt_R.squeeze()
        gt_S_1c_img = gt_S.squeeze()[:, :, np.newaxis]
        mask_3c_img = mask.squeeze()
        # print("tr_3c_img",tr_3c_img.shape)
        # print("gt_R_3c_img",gt_R_3c_img.shape)
        # print("gt_S_1c_img",gt_S_1c_img.shape)
        # print("mask_3c_img",mask_3c_img.shape)
        # tr_3c_img (1024, 1024, 3)
        # gt_R_3c_img (1024, 1024, 3)
        # gt_S_1c_img (1024, 1024, 1)
        # mask_3c_img (1024, 1024, 3)

        # --------------------------------------------------------------------------------
        cgmit_tr_3c_img.append(tr_3c_img)
        cgmit_gt_R_3c_img.append(gt_R_3c_img)
        cgmit_gt_S_1c_img.append(gt_S_1c_img)
        cgmit_mask_3c_img.append(mask_3c_img.astype("float16"))

    cgmit_tr_3c_imgs = np.stack(cgmit_tr_3c_img, axis=0).transpose(0, 3, 1, 2)
    cgmit_gt_R_3c_imgs = np.stack(cgmit_gt_R_3c_img,
                                  axis=0).transpose(0, 3, 1, 2)
    cgmit_gt_S_1c_imgs = np.stack(cgmit_gt_S_1c_img,
                                  axis=0).transpose(0, 3, 1, 2)
    cgmit_mask_3c_imgs = np.stack(cgmit_mask_3c_img,
                                  axis=0).transpose(0, 3, 1, 2)
    # print("cgmit_tr_3c_imgs",cgmit_tr_3c_imgs.shape)
    # print("cgmit_gt_R_3c_imgs",cgmit_gt_R_3c_imgs.shape)
    # print("cgmit_gt_S_1c_imgs",cgmit_gt_S_1c_imgs.shape)
    # print("cgmit_mask_3c_imgs",cgmit_mask_3c_imgs.shape)
    # cgmit_tr_3c_imgs (4, 3, 1024, 1024)
    # cgmit_gt_R_3c_imgs (4, 3, 1024, 1024)
    # cgmit_gt_S_1c_imgs (4, 1, 1024, 1024)
    # cgmit_mask_3c_imgs (4, 3, 1024, 1024)

    return cgmit_tr_3c_imgs, cgmit_gt_R_3c_imgs, cgmit_gt_S_1c_imgs, cgmit_mask_3c_imgs
コード例 #7
0
def generate_sparse_dataset(iiw_d_bs_pa, img_h, img_w):
    iiw_tr_ph = []
    iiw_ori_ph = []
    iiw_json_ph = []
    applied_DAs = []
    iiw_img_after_DA_before_rs_ph = []
    for one_img in range(len(iiw_d_bs_pa)):
        # c one_iiw_tr_img_p: one iiw train image path
        one_iiw_tr_img_p = iiw_d_bs_pa[one_img][0].strip()
        # print("one_iiw_tr_img_p",one_iiw_tr_img_p)

        # c one_iiw_json_gt_p: one iiw json gt path
        one_iiw_json_gt_p = iiw_d_bs_pa[one_img][1].strip()
        # print("one_iiw_json_gt_p",one_iiw_json_gt_p)

        # --------------------------------------------------------------------------------
        ol_iiw_tr_i = utils_image.load_img(one_iiw_tr_img_p) / 255.0
        # ol_iiw_tr_i=np.where(ol_iiw_tr_i==0.0,0.0001,ol_dense_tr_i)
        # ol_iiw_tr_i[ol_iiw_tr_i==0.0]=0.0001

        # --------------------------------------------------------------------------------
        # c kind_of_DA: you create list which contains kind of data augmentation
        kind_of_DA = ["no_DA", "ud", "lr", "p3", "p6", "p9", "n3", "n6", "n9"]
        # c chosen_DA: you get chosen kind of data augmentation
        chosen_DA = np.random.choice(kind_of_DA, 1, replace=False)[0]
        # print("chosen_DA",chosen_DA)
        # chosen_DA n9

        if chosen_DA == "ud":
            iiw_img_after_DA = np.flipud(ol_iiw_tr_i)
        elif chosen_DA == "lr":
            iiw_img_after_DA = np.fliplr(ol_iiw_tr_i)
        elif chosen_DA == "p3":
            iiw_img_after_DA = scipy.ndimage.interpolation.rotate(
                ol_iiw_tr_i, angle=3, reshape=True, mode="reflect")
        elif chosen_DA == "p6":
            iiw_img_after_DA = scipy.ndimage.interpolation.rotate(
                ol_iiw_tr_i, angle=6, reshape=True, mode="reflect")
        elif chosen_DA == "p9":
            iiw_img_after_DA = scipy.ndimage.interpolation.rotate(
                ol_iiw_tr_i, angle=9, reshape=True, mode="reflect")
        elif chosen_DA == "n3":
            iiw_img_after_DA = scipy.ndimage.interpolation.rotate(
                ol_iiw_tr_i, angle=-3, reshape=True, mode="reflect")
        elif chosen_DA == "n6":
            iiw_img_after_DA = scipy.ndimage.interpolation.rotate(
                ol_iiw_tr_i, angle=-6, reshape=True, mode="reflect")
        elif chosen_DA == "n9":
            iiw_img_after_DA = scipy.ndimage.interpolation.rotate(
                ol_iiw_tr_i, angle=-9, reshape=True, mode="reflect")
        else:
            iiw_img_after_DA = ol_iiw_tr_i

        # plt.imshow(iiw_img_after_DA)
        # plt.show()
        # afaf

        iiw_img_after_DA = cv2.normalize(iiw_img_after_DA,
                                         None,
                                         alpha=0,
                                         beta=1,
                                         norm_type=cv2.NORM_MINMAX,
                                         dtype=cv2.CV_32F)
        iiw_img_after_DA[iiw_img_after_DA < 1e-6] = 1e-6

        iiw_img_after_DA_before_rs = iiw_img_after_DA

        # --------------------------------------------------------------------------------
        iiw_img_after_DA_after_rs = utils_image.resize_img(
            iiw_img_after_DA, img_h, img_w)
        # plt.imshow(iiw_img_after_DA_after_rs)
        # plt.show()
        # afaf

        # --------------------------------------------------------------------------------
        # iiw_ori_ph: original size
        iiw_ori_ph.append(ol_iiw_tr_i)
        # iiw_tr_ph: resized size
        iiw_tr_ph.append(iiw_img_after_DA_after_rs)
        iiw_json_ph.append(one_iiw_json_gt_p)
        applied_DAs.append(chosen_DA)
        iiw_img_after_DA_before_rs_ph.append(iiw_img_after_DA_before_rs)

    iiw_tr_imgs = np.stack(iiw_tr_ph, axis=0).transpose(0, 3, 1, 2)

    return iiw_tr_imgs, iiw_ori_ph, iiw_json_ph, applied_DAs, iiw_img_after_DA_before_rs_ph
def train(args):
  k_fold=3
  epoch=int(args.epoch)
  batch_size=int(args.batch_size)
  # print("epoch",epoch)
  # print("batch_size",batch_size)
  # 9
  # 2

  # ================================================================================
  text_file_instance=text_file_path_api_module.Path_Of_Text_Files(args)

  txt_of_train_data=text_file_instance.train_data
  # print("txt_of_train_data",txt_of_train_data)
  # /mnt/1T-5e7/mycodehtml/bio_health/Kaggle/human-protein-atlas-image-classification/Data/train_csv_path.txt

  # ================================================================================
  contents_of_txt,num_line=utils_common.return_path_list_from_txt(txt_of_train_data)
  # print("contents_of_txt",contents_of_txt)
  # ['/mnt/1T-5e7/mycodehtml/prac_data_science/kaggle/hivprogression/My_code/Data/training_data.csv']
  
  # ================================================================================
  train_data_df=pd.read_csv(contents_of_txt[0],encoding='utf8')

  train_data_df=train_data_df.dropna()
  # print("train_data_df",train_data_df.shape)
  # (920, 6)

  train_data_wo_id_df=train_data_df.iloc[:,1:]
  # print("train_data_wo_id_df",train_data_wo_id_df.shape)
  # (920, 5)

  # ================================================================================
  train_k,vali_k=utils_data.get_k_folds(train_data_wo_id_df)

  # ================================================================================
  # c loss_list: list which will stores loss values to plot loss
  loss_list=[]
  f1_score_list=[]

  # ================================================================================
  # c model_api_instance: instance of model API
  model_api_instance=model_api_module.Model_API_class(args)
  # print("model_api_instance",model_api_instance)
  # <src.api_model.model_api_module.Model_API_class object at 0x7fb305557b00>

  # ================================================================================
  # # @ Test Grad CAM
  # imgs=["/mnt/1T-5e7/mycodehtml/bio_health/Kaggle_histopathologic-cancer-detection/Data/train_split/33/82d4d190d2fed1be255fc3bac36a37c860bb31c0.tif",
  #       "/mnt/1T-5e7/mycodehtml/bio_health/Kaggle_histopathologic-cancer-detection/Data/train_split/33/82a5300cd61628fb9bae332cdb7d5e7e37b1fb36.tif"]
  # grad_cam.initialize_grad_cam(model=model_api_instance.gen_net,list_of_img_paths=imgs,args=args)

  # ================================================================================
  if args.task_mode=="train": # If you're in train mode
    
    # ================================================================================
    # @ Configure learning rate scheduler

    # Update learning rate 4 times during entire epochs
    # For example, if you use 10 epochs, int(10/4), 1 2 / 3 4 / 5 6 / 7 8 / 9 10
    # 0-1 epochs: 0.001 -> 2-3 epochs: 0.0001 -> 4-5 epochs: 0.00001 -> 5-6 epochs: 0.000001

    scheduler=StepLR(model_api_instance.optimizer,step_size=int(epoch/4),gamma=0.1)

    # ================================================================================
    for one_k in range(k_fold):
      single_train_k=train_k[one_k]
      single_vali_k=vali_k[one_k]
      single_train_lbl_k=train_lbl_k[one_k]
      single_vali_lbl_k=vali_lbl_k[one_k]

      # ================================================================================
      # @ Validation dataset
      dataset_inst_vali=custom_ds.Custom_DS_vali(single_vali_k,single_vali_lbl_k,args=args)

      dataloader_vali=torch.utils.data.DataLoader(
          dataset=dataset_inst_vali,batch_size=batch_size,shuffle=False,num_workers=3)

      for one_ep in range(epoch): # @ Iterates all epochs
        # print("single_train_k",len(single_train_k))
        # 20714
        # print("single_vali_k",len(single_vali_k))
        # 10358
        # print("single_train_lbl_k",len(single_train_lbl_k))
        # 20714
        # print("single_vali_lbl_k",len(single_vali_lbl_k))
        # 10358

        # ================================================================================
        # c dataset_inst_trn: dataset instance of tumor
        dataset_inst_trn=custom_ds.Custom_DS(single_train_k,single_train_lbl_k,args=args)
        
        # Test iterator
        # iter_dataset_inst_trn=iter(dataset_inst_trn)
        # trn=next(iter_dataset_inst_trn)
        # print("trn",trn)

        # ================================================================================
        # c dataloader_trn: create dataloader
        dataloader_trn=torch.utils.data.DataLoader(
          dataset=dataset_inst_trn,batch_size=batch_size,shuffle=False,num_workers=3)
        
        # # c dataloader_trn_iter: iterator of dataloader
        # dataloader_trn_iter=iter(dataloader_trn)
        # # Test dataloader
        # pairs=next(dataloader_trn_iter)
        # # print("pairs",pairs)

        # ================================================================================
        # c num_imgs_trn: number of train image
        num_imgs_trn=len(dataset_inst_trn)
        # print("num_imgs_trn",num_imgs_trn)
        # 20714

        args.__setattr__("num_imgs_trn",num_imgs_trn)
        # print("args",args)
        
        # ================================================================================
        # print("Current batch size:",batch_size)
        # print("Possible batch size:",list(utils_common.divisorGenerator(num_imgs_trn)))
        # assert str(num_imgs_trn/batch_size).split(".")[-1]==str(0),"Check batch size, currently it's incorrect"

        # ================================================================================
        # @ If you don't use Augmentor
        if args.use_augmentor=="False":
          pass      

        else: # @ If you use Augmentor

          # @ Iterate all images in dataset during single epoch
          for idx,data in enumerate(dataloader_trn):
           
            bs_pa_tumor_d=utils_data.create_batch_pair_of_paths(data,args)
            # print("bs_pa_tumor_d",bs_pa_tumor_d)
            # [[('/mnt/1T-5e7/mycodehtml/bio_health/Kaggle/human-protein-atlas-image-classification/Data/train/292d9824-bba1-11e8-b2b9-ac1f6b6435d0_blue.png',
            #    '/mnt/1T-5e7/mycodehtml/bio_health/Kaggle/human-protein-atlas-image-classification/Data/train/292d9824-bba1-11e8-b2b9-ac1f6b6435d0_green.png',
            #    '/mnt/1T-5e7/mycodehtml/bio_health/Kaggle/human-protein-atlas-image-classification/Data/train/292d9824-bba1-11e8-b2b9-ac1f6b6435d0_red.png',
            #    '/mnt/1T-5e7/mycodehtml/bio_health/Kaggle/human-protein-atlas-image-classification/Data/train/292d9824-bba1-11e8-b2b9-ac1f6b6435d0_yellow.png'),
            #   ('/mnt/1T-5e7/mycodehtml/bio_health/Kaggle/human-protein-atlas-image-classification/Data/train/7f1e4598-bbc0-11e8-b2bb-ac1f6b6435d0_blue.png',
            #    '/mnt/1T-5e7/mycodehtml/bio_health/Kaggle/human-protein-atlas-image-classification/Data/train/7f1e4598-bbc0-11e8-b2bb-ac1f6b6435d0_green.png',
            #    '/mnt/1T-5e7/mycodehtml/bio_health/Kaggle/human-protein-atlas-image-classification/Data/train/7f1e4598-bbc0-11e8-b2bb-ac1f6b6435d0_red.png',
            #    '/mnt/1T-5e7/mycodehtml/bio_health/Kaggle/human-protein-atlas-image-classification/Data/train/7f1e4598-bbc0-11e8-b2bb-ac1f6b6435d0_yellow.png')],
            #  array(['3','23'],dtype='<U2')]

            # ================================================================================
            # @ Perform data augmentation

            sampled_trn_imgs,label_values=utils_data.use_augmetor_for_data(bs_pa_tumor_d,args)
            # afaf 1: sampled_trn_imgs,label_values=utils_data.use_augmetor_for_data(bs_pa_tumor_d,args)
            
            # print("sampled_trn_imgs",sampled_trn_imgs.shape)
            # (2, 4, 224, 224)
            
            # print("label_values",label_values)
            # [[4], [14]]

            # print("label_values",np.array(label_values).shape)
            # (2, 2)

            # ================================================================================
            oh_label_arr=utils_common.one_hot_label(batch_size,label_values)
            # print("oh_label_arr",oh_label_arr)
            # [[0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
            #  [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]

            # ================================================================================
            trn_imgs_tcv=utils_pytorch.get_Variable(sampled_trn_imgs)
            # print("trn_imgs_tcv",trn_imgs_tcv.shape)
            # torch.Size([2, 4, 224, 224])

            # ================================================================================
            # @ Remove existing gradients
            model_api_instance.remove_existing_gradients_before_starting_new_training()
            
            # ================================================================================
            # @ c predicted_labels: pass input images and get predictions
            predicted_labels=model_api_instance.gen_net(trn_imgs_tcv)
            # print("predicted_labels",predicted_labels)
            # tensor([[-0.2858, -0.7700, -0.0600,  0.3553,  0.0367, -0.4130,  0.3102, -0.2443,
            #          -0.1775, -0.1839,  0.0499, -0.1489, -0.9805,  0.1817, -0.0504,  0.8930,
            #          -0.4017, -0.1899,  0.0937, -0.3465,  0.2830, -0.2755,  0.4233, -0.1301,
            #           1.1688,  0.2110,  0.1423, -0.3933],
            #         [-0.2858, -0.7700, -0.0600,  0.3553,  0.0367, -0.4130,  0.3102, -0.2443,
            #          -0.1775, -0.1839,  0.0499, -0.1489, -0.9805,  0.1817, -0.0504,  0.8930,
            #          -0.4017, -0.1899,  0.0937, -0.3465,  0.2830, -0.2755,  0.4233, -0.1301,
            #           1.1688,  0.2110,  0.1423, -0.3933]], device='cuda:0',grad_fn=<AddmmBackward>)

            label_tc=Variable(torch.tensor(oh_label_arr,device=predicted_labels.device).float())
            # print("label_tc",label_tc)
            # tensor([[0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
            #         [0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]], 
            #         device='cuda:0',dtype=torch.float16)

            # ================================================================================
            # @ Calculate loss values

            loss_val=loss_functions_module.FocalLoss(predicted_labels,label_tc)
            # print("loss_val",loss_val)
            # tensor(6.5374, device='cuda:0', grad_fn=<MeanBackward1>)
                      
            # ================================================================================
            # @ Calculate gradient values through backpropagation
            loss_val.backward()

            # ================================================================================
            # @ Update parameters of the network based on gradients
            model_api_instance.optimizer.step()

            # ================================================================================
            # @ If you want to print loss
            if args.use_loss_display=="True":
              if idx%int(args.leapping_term_when_displaying_loss)==0:
                print("Epoch:",one_ep,", Batch:",idx)
                print("loss_from_one_batch",loss_val.item())
            
            loss_list.append(loss_val.item())

            # ================================================================================
            # @ Save model after every batch you configure 
            # by using args.leapping_term_when_saving_model_after_batch
            if idx%int(args.leapping_term_when_saving_model_after_batch)==0:
              num_batch="batch_"+str(idx)
              model_api_instance.save_model_after_epoch(num_batch)

            # ================================================================================
            # print("end of single batch")

          # ================================================================================
          # print("end of all batches")

        # ================================================================================
        # @ Save model after epoch
        num_epoch="epoch_"+str(one_ep)
        model_api_instance.save_model_after_epoch(num_epoch)

        # ================================================================================
        # @ Update learning rate

        scheduler.step()
        # print("scheduler.base_lrs",scheduler.base_lrs)

        # ================================================================================
        # print("End of single epoch")

      # ================================================================================
      # print("end of all epochs")
      
      # ================================================================================
      with torch.no_grad():
        n=28
        TP=torch.tensor(np.zeros(n)).float().cuda()
        FP=torch.tensor(np.zeros(n)).float().cuda()
        FN=torch.tensor(np.zeros(n)).float().cuda()

        for idx_vali,data_vali in enumerate(dataloader_vali):
          bs_pa_tumor_d_vali=utils_data.create_batch_pair_of_paths(data_vali,args)
          # print("bs_pa_tumor_d_vali",bs_pa_tumor_d_vali)
          # [[('/mnt/1T-5e7/mycodehtml/bio_health/Kaggle/human-protein-atlas-image-classification/Data/train/0020af02-bbba-11e8-b2ba-ac1f6b6435d0_blue.png',
          #    '/mnt/1T-5e7/mycodehtml/bio_health/Kaggle/human-protein-atlas-image-classification/Data/train/0020af02-bbba-11e8-b2ba-ac1f6b6435d0_green.png',
          #    '/mnt/1T-5e7/mycodehtml/bio_health/Kaggle/human-protein-atlas-image-classification/Data/train/0020af02-bbba-11e8-b2ba-ac1f6b6435d0_red.png',
          #    '/mnt/1T-5e7/mycodehtml/bio_health/Kaggle/human-protein-atlas-image-classification/Data/train/0020af02-bbba-11e8-b2ba-ac1f6b6435d0_yellow.png'),
          #   ('/mnt/1T-5e7/mycodehtml/bio_health/Kaggle/human-protein-atlas-image-classification/Data/train/00070df0-bbc3-11e8-b2bc-ac1f6b6435d0_blue.png',
          #    '/mnt/1T-5e7/mycodehtml/bio_health/Kaggle/human-protein-atlas-image-classification/Data/train/00070df0-bbc3-11e8-b2bc-ac1f6b6435d0_green.png',
          #    '/mnt/1T-5e7/mycodehtml/bio_health/Kaggle/human-protein-atlas-image-classification/Data/train/00070df0-bbc3-11e8-b2bc-ac1f6b6435d0_red.png',
          #    '/mnt/1T-5e7/mycodehtml/bio_health/Kaggle/human-protein-atlas-image-classification/Data/train/00070df0-bbc3-11e8-b2bc-ac1f6b6435d0_yellow.png')],
          #  array(['25 2','16 0'],dtype='<U4')]

          img_paths=bs_pa_tumor_d_vali[0]
          labels=bs_pa_tumor_d_vali[1]
          # print("labels",labels)
          # labels ['2 0' '1']
          # print("labels",labels.shape)

          labels=[one_protein_lbl.strip().split(" ") for one_protein_lbl in labels]
          # [['5'], ['0'], ['25'], ['2'], ['23'], ['25', '4'], ['12'], ['22', '2'], ['3'], ['0', '21'], ['2'], ['25', '18', '3', '0'], ['5'], ['2', '0', '21'], ['0', '21'], ['25'], ['25'], ['23'], ['23', '0'], ['25', '2', '0']]
          # print("labels",labels)

          labels_oh=utils_common.one_hot_label_vali(batch_size,labels)
          # print("labels_oh",labels_oh)
          # [[1. 1. 1. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
          #  [0. 0. 1. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]
          labels_oh_np=np.array(labels_oh)
          labels_oh_tc=torch.tensor(labels_oh_np).cuda()

          all_images_vali_stacked=utils_data.get_batch_vali_imgs(img_paths)
          # print("all_images_vali_stacked",all_images_vali_stacked.shape)
          # (2, 4, 224, 224)

          all_images_vali_stacked_tc=utils_pytorch.get_Variable(all_images_vali_stacked)
          # print("all_images_vali_stacked_tc",all_images_vali_stacked_tc.shape)
          # torch.Size([2, 4, 224, 224])

          model_eval=model_api_instance.gen_net.eval()
          pred_vali=model_eval(all_images_vali_stacked_tc)
          # print("pred_vali",pred_vali)
          # print("pred_vali",pred_vali.shape)
          # torch.Size([2, 28])
      
          # ================================================================================
          single_TP,single_FP,single_FN=metrics_module.calculate_f1_score(pred_vali,labels_oh_tc)
          TP+=single_TP
          FP+=single_FP
          FN+=single_FN

        score=(2.0*TP/(2.0*TP+FP+FN+1e-6)).mean()
        print("score",score)
        f1_score_list.append(score.item())
        # tensor(0.0238, device='cuda:0')

    # ================================================================================
    # @ Plot loss value
    plt.plot(loss_list)
    plt.title("Loss value: 1st fold, 2nd fold, 3rd fold, continuously")
    plt.savefig("loss.png")
    plt.show()

    plt.plot(f1_score_list)
    plt.title("F1 score: 1st fold, 2nd fold, 3rd fold, continuously")
    plt.savefig("f1_score.png")
    plt.show()
  
  # ================================================================================
  elif args.task_mode=="validation":
    with torch.no_grad(): # @ Use network without calculating gradients
      # tumor_trn=args.dir_where_text_file_for_image_paths_is_in+"/tumor_trn.txt"
      # tumor_lbl=args.dir_where_text_file_for_image_paths_is_in+"/train_labels.csv"
      # print("tumor_trn",tumor_trn)
      # print("tumor_lbl",tumor_lbl)
      # /mnt/1T-5e7/mycodehtml/bio_health/Kaggle_histopathologic-cancer-detection/Data/tumor_trn.txt
      # /mnt/1T-5e7/mycodehtml/bio_health/Kaggle_histopathologic-cancer-detection/Data/train_labels.csv

      # ================================================================================
      # @ Dataset and Dataloader

      # @ c dataset_inst_test_tumor: dataset instance of tumor dataset
      dataset_inst_test_tumor=custom_ds.custom_ds(
        txt_containing_paths=tumor_trn,txt_containing_labels=tumor_lbl,is_train=False,args=args)

      # @ c dataloader_tumor_test: dataloader instance of tumor dataset
      dataloader_tumor_test=torch.utils.data.DataLoader(
        dataset=dataset_inst_test_tumor,batch_size=batch_size,shuffle=False,num_workers=3)
      
      # ================================================================================
      # @ c num_imgs_test: number of entire test images
      num_imgs_test=len(dataset_inst_test_tumor)

      # ================================================================================
      # @ Create network and optimizer
      if args.train_method=="train_by_transfer_learning_using_resnet":
        model_api_instance=model_api_module.Model_API_class(args)
      
      # ================================================================================
      predicted_values=[]
      true_values=[]
      img_paths=[]

      # ================================================================================
      # @ Iterate all batches (batch1+batch2+...+batchn=entire images)
      for idx,data in enumerate(dataloader_tumor_test):
        # print("idx",idx)
        # print("data",data)
        # [('/mnt/1T-5e7/mycodehtml/bio_health/Kaggle_histopathologic-cancer-detection/Data/train/e693f9ac4097289c317831960514b78701999cd9.tif\n',
        #   '/mnt/1T-5e7/mycodehtml/bio_health/Kaggle_histopathologic-cancer-detection/Data/train/e6941f6c6825e7c409b9364e2fb6c2d629df8a76.tif\n',),
        #  [('e693f9ac4097289c317831960514b78701999cd9','e6941f6c6825e7c409b9364e2fb6c2d629df8a76'),tensor([1,0])]]
        
        # ================================================================================
        # @ c imgs: paths of validation images
        imgs=data[0]

        img_paths.extend(imgs)

        # @ c imgs: labels to validation images
        lbls=data[1][1].numpy()

        # @ c num_imgs: number of validation image in one batch
        num_imgs=lbls.shape[0]
        # print("num_imgs",num_imgs)
        # 11
        # @ Load images from paths

        # ================================================================================
        test_imgs_list=[]
        for one_img_path in imgs:
          one_loaded_img=utils_image.load_img(one_img_path)
          # print("one_loaded_img",one_loaded_img.shape)
          # (96, 96, 3)

          one_loaded_img=resize(one_loaded_img,(224,224))

          test_imgs_list.append(one_loaded_img)

        # ================================================================================
        test_imgs_np=np.array(test_imgs_list).transpose(0,3,1,2)
        
        # @ If you want to use center (48,48) image from (96,96) image
        # test_imgs_np=test_imgs_np[:,:,24:72,24:72]
        # print("test_imgs_np",test_imgs_np.shape)
        # (11, 3, 48, 48)

        test_imgs_tc=Variable(torch.Tensor(test_imgs_np).cuda())

        # ================================================================================
        # @ Make predictions

        prediction=model_api_instance.gen_net(test_imgs_tc)
        # print("prediction",prediction)
        # tensor([[-2.0675],
        #         [-2.9296],

        sigmoid=torch.nn.Sigmoid()

        prediction_np=sigmoid(prediction).cpu().numpy()

        # ================================================================================
        # @ Make predicted labels

        prediction_np=np.where(prediction_np>0.5,1,0).squeeze()
        # print("prediction_np",prediction_np)
        # [0 0 1 0 0 0 0 0 1 0 0 1 0 0 1 0 0 0 0 0 0 0 0 1 1 0 1 1 0 0]
        # print("lbls",lbls)
        # [1 0 0 0 0 1 0 0 1 0 0 1 1 0 1 0 1 0 0 0 0 0 0 0 1 0 1 1 1 0]

        predicted_values.extend(prediction_np)
        
        true_values.extend(lbls)
      
      # ================================================================================
      y_true=true_values
      y_pred=predicted_values

      # ================================================================================
      # @ Binary Confusion Matrix

      b_c_mat=confusion_matrix(true_values,predicted_values,labels=[0,1])
      # print("b_c_mat",b_c_mat)
      # [[30  2]
      #  [ 0 68]]

      # True Positive (Tumor pic is predicted as tumor)      False Negative (Tumor pic is predicted as non-tumor)
      # False Positive (Non-tumor pic is predicted as tumor) True Negative (Non-tumor pic is predicted as non-tumor)
      
      # ================================================================================
      # @ metric report
      
      report=classification_report(y_true,y_pred,target_names=['class Non tumor (neg)', 'class Tumor (pos)'])
      # print(report)
      #                        precision    recall  f1-score   support

      # class Non tumor (neg)       0.97      1.00      0.99        68
      #     class Tumor (pos)       1.00      0.94      0.97        32

      #             micro avg       0.98      0.98      0.98       100
      #             macro avg       0.99      0.97      0.98       100
      #          weighted avg       0.98      0.98      0.98       100

      # ================================================================================
      print("accuracy_score",accuracy_score(y_true,y_pred))
      # 0.98

      print("precision_score",precision_score(y_true,y_pred))
      # 1.0

      print("recall_score",recall_score(y_true,y_pred))
      # 0.9375

      # print("fbeta_score",fbeta_score(y_true, y_pred, beta))
      
      print("f1_score",fbeta_score(y_true,y_pred,beta=1))
      # 0.967741935483871

      # ================================================================================
      # @ ROC curve
      fpr,tpr,thresholds=roc_curve(y_true,y_pred)
      plt.plot(fpr,tpr,'o-',label="Logistic Regression")
      plt.title('Receiver operating characteristic example')
      plt.show()

  elif args.task_mode=="submission":
    with torch.no_grad(): # @ Use network without calculating gradients
      
      sub_ds=custom_ds_test.custom_ds_Submission()
      print("sub_ds",sub_ds)

      sub_dl=torch.utils.data.DataLoader(
        dataset=sub_ds,batch_size=batch_size,shuffle=False,num_workers=3)
      print("sub_dl",sub_dl)

      # ================================================================================
      # @ c num_imgs_test: number of entire test images

      num_imgs_test=len(sub_ds)

      # ================================================================================
      # @ Create network and optimizer

      if args.train_method=="train_by_transfer_learning_using_resnet":
        model_api_instance=model_api_module.Model_API_class(args)

      # ================================================================================
      label_submission=pd.read_csv("/mnt/1T-5e7/mycodehtml/bio_health/Kaggle_histopathologic-cancer-detection/Data/sample_submission.csv",encoding='utf8')
      base_names=label_submission.iloc[:,0].tolist()
      # print("base_names",base_names)

      # ================================================================================
      predicted_values=[]
      # @ Iterate all batches (batch1+batch2+...+batchn=entire images)
      for idx,data in enumerate(sub_dl):
        # print("idx",idx)
        # print("data",data)
        # 0
        # ['/mnt/1T-5e7/mycodehtml/bio_health/Kaggle_histopathologic-cancer-detection/Data/test/0b2ea2a822ad23fdb1b5dd26653da899fbd2c0d5.tif',
        
        imgs=data

        # ================================================================================
        test_imgs_list=[]
        for one_img_path in imgs:
          one_loaded_img=utils_image.load_img(one_img_path)
          # print("one_loaded_img",one_loaded_img.shape)
          # (96, 96, 3)

          one_loaded_img=resize(one_loaded_img,(224,224))

          test_imgs_list.append(one_loaded_img)

        # ================================================================================
        test_imgs_np=np.array(test_imgs_list).transpose(0,3,1,2)
        
        # @ If you want to use center (48,48) image from (96,96) image
        # test_imgs_np=test_imgs_np[:,:,24:72,24:72]
        # print("test_imgs_np",test_imgs_np.shape)
        # (11, 3, 48, 48)

        test_imgs_tc=Variable(torch.Tensor(test_imgs_np).cuda())
        # print("test_imgs_tc",test_imgs_tc.shape)
        # torch.Size([30, 3, 224, 224])

        # ================================================================================
        # @ Make predictions
        prediction=model_api_instance.gen_net(test_imgs_tc)
        # print("prediction",prediction)
        # tensor([[-2.0675],
        # ...
        #         [-1.2222]], device='cuda:0')

        sigmoid=torch.nn.Sigmoid()

        prediction_np=sigmoid(prediction).cpu().numpy()

        # ================================================================================
        # @ Make predicted labels

        prediction_np=np.where(prediction_np>0.5,1,0).squeeze()
        # print("prediction_np",prediction_np)
        # [0 0 1 0 0 0 0 0 1 0 0 1 0 0 1 0 0 0 0 0 0 0 0 1 1 0 1 1 0 0]
        # print("lbls",lbls)
        # [1 0 0 0 0 1 0 0 1 0 0 1 1 0 1 0 1 0 0 0 0 0 0 0 1 0 1 1 1 0]

        predicted_values.extend(prediction_np)
     
      my_submission=pd.DataFrame({'id': base_names,'label': predicted_values})
      my_submission.to_csv('youngminpar2559_submission.csv',index=False)
def train(args):

    epoch = int(args.epoch)
    batch_size = int(args.batch_size)
    # print("epoch",epoch)
    # print("batch_size",batch_size)
    # 200
    # 22

    # ================================================================================
    text_file_instance = text_file_path_api_module.Path_Of_Text_Files(args)
    tumor_trn = text_file_instance.tumor_trn
    tumor_lbl = text_file_instance.tumor_lbl
    # print("tumor_trn",tumor_trn)
    # print("tumor_lbl",tumor_lbl)
    # /mnt/1T-5e7/mycodehtml/bio_health/Kaggle_histopathologic-cancer-detection/Data/trn_txt_file_processed.txt
    # /mnt/1T-5e7/mycodehtml/bio_health/Kaggle_histopathologic-cancer-detection/Data/train_labels.csv

    # ================================================================================
    # c tumor_loss_temp: list which will stores loss values to plot loss
    tumor_loss_temp = []

    # ================================================================================
    # c model_api_instance: instance of model API
    model_api_instance = model_api_module.Model_API_class(args)

    # ================================================================================
    # @ Test Grad CAM
    imgs = [
        "/mnt/1T-5e7/mycodehtml/bio_health/Kaggle_histopathologic-cancer-detection/Data/train_split/00/02dcc00fde8ae55761b3d8e70d34b9153ea81bd9.tif",
        "/mnt/1T-5e7/mycodehtml/bio_health/Kaggle_histopathologic-cancer-detection/Data/test/ffcc29cf0e363737b577d1db470df0bb1adf7957.tif"
    ]
    grad_cam.initialize_grad_cam(model=model_api_instance.gen_net,
                                 list_of_img_paths=imgs,
                                 args=args)
    # afaf 1: grad_cam.initialize_grad_cam(model=model_api_instance.gen_net,list_of_img_paths=imgs,args=args)

    # ================================================================================
    if args.task_mode == "train":  # If you're in train mode

        # ================================================================================
        # @ Configure learning rate scheduler

        # Update learning rate 4 times during entire epochs
        # For example, if you use 10 epochs, int(10/4), 1 2 / 3 4 / 5 6 / 7 8 / 9 10
        # 0-1 epochs: 0.001 -> 2-3 epochs: 0.0001 -> 4-5 epochs: 0.00001 -> 5-6 epochs: 0.000001

        scheduler = StepLR(model_api_instance.optimizer,
                           step_size=int(epoch / 4),
                           gamma=0.1)

        # ================================================================================
        for one_ep in range(epoch):  # @ Iterates all epochs

            # c dataset_inst_trn_tumor: dataset instance of tumor
            dataset_inst_trn_tumor = dataset_tumor.Dataset_Tumor(
                txt_containing_paths=tumor_trn,
                txt_containing_labels=tumor_lbl,
                is_train=True,
                args=args)

            # Test iterator
            # iter_dataset_inst_trn_tumor=iter(dataset_inst_trn_tumor)
            # trn=next(iter_dataset_inst_trn_tumor)
            # print("trn",trn)
            # ('/mnt/1T-5e7/mycodehtml/bio_health/Kaggle_histopathologic-cancer-detection/Data/train/4036b2e1e551e14b88f7f9ada275935ec4b5bdcc.tif\n',
            #  ['4036b2e1e551e14b88f7f9ada275935ec4b5bdcc', 0])

            # ================================================================================
            # c dataloader_tumor_trn: create dataloader
            dataloader_tumor_trn = torch.utils.data.DataLoader(
                dataset=dataset_inst_trn_tumor,
                batch_size=batch_size,
                shuffle=False,
                num_workers=3)

            # c dataloader_tumor_trn_iter: iterator of dataloader
            # dataloader_tumor_trn_iter=iter(dataloader_tumor_trn)

            # Test dataloader
            # pairs=next(dataloader_tumor_trn_iter)
            # print("pairs",pairs)
            # [('/mnt/1T-5e7/mycodehtml/bio_health/Kaggle_histopathologic-cancer-detection/Data/train/45da9ea0958e83c3a5d6efe1e9912e80ab204b59.tif\n',
            #   '/mnt/1T-5e7/mycodehtml/bio_health/Kaggle_histopathologic-cancer-detection/Data/train/9f49570e69b43a10e58566c5207f87c4a96f04d2.tif\n'),
            #  [('45da9ea0958e83c3a5d6efe1e9912e80ab204b59',
            #    '9f49570e69b43a10e58566c5207f87c4a96f04d2'),
            #   tensor([1, 0])]]

            # ================================================================================
            # c num_imgs_trn: number of train image
            num_imgs_trn = len(dataset_inst_trn_tumor)
            # print("num_imgs_trn",num_imgs_trn)
            # 198022

            args.__setattr__("num_imgs_trn", num_imgs_trn)
            # print("args",args)
            # print("Current batch size:",batch_size)
            # print("Possible batch size:",list(utils_common.divisorGenerator(num_imgs_trn)))

            # assert str(num_imgs_trn/batch_size).split(".")[-1]==str(0),"Check batch size, currently it's incorrect"

            # ================================================================================
            # @ If you don't use Augmentor
            if args.use_augmentor == "False":
                pass

            else:  # @ If you use Augmentor

                # @ Iterate all images in dataset during single epoch
                for idx, data in enumerate(dataloader_tumor_trn):
                    # c paths_of_imgs: paths of images
                    paths_of_imgs = data[0]
                    # print("paths_of_imgs",paths_of_imgs)

                    # c labels_in_scalar: label values of images
                    labels_in_scalar = np.array(data[1][1])
                    # print("labels_in_scalar",labels_in_scalar)
                    # labels_in_scalar [1 0 1 1 0 0 0 1 0 1 0 1 0 0 0 1 0 1 0 1 0 1 0 1 0 0 1 0 1 0 0 1 1 0 0 0 1

                    # c bs_pa_tumor_d: batchsized paths of tumor dataset
                    bs_pa_tumor_d = [paths_of_imgs, labels_in_scalar]

                    # ================================================================================
                    # @ Perform data augmentation

                    # c sampled_trn_imgs_tc: sampled train images in torch tensor
                    # c label_values: corresponding label values
                    sampled_trn_imgs_tc, label_values = utils_data.use_augmetor_for_tumor_data(
                        bs_pa_tumor_d, args)
                    # print("sampled_trn_imgs_tc",sampled_trn_imgs_tc.shape)
                    # (10, 3, 48, 48)
                    # print("label_values",label_values)
                    # [1, 1, 0, 0, 0, 1, 0, 0, 0, 0]

                    # ================================================================================
                    trn_imgs_tcv = utils_pytorch.get_dense_data_Variable(
                        sampled_trn_imgs_tc)
                    # print("trn_imgs_tcv",trn_imgs_tcv.shape)
                    # torch.Size([10, 3, 48, 48])

                    # ================================================================================
                    # @ Remove existing gradients
                    model_api_instance.remove_existing_gradients_before_starting_new_training(
                    )

                    # ================================================================================
                    # @ c predicted_labels: pass input images and get predictions
                    predicted_labels = model_api_instance.gen_net(trn_imgs_tcv)
                    # print("predicted_labels",predicted_labels)
                    # tensor([[-0.0724],
                    #         [-0.0299],
                    #         [-0.1650],
                    #         [-0.2458],
                    #         [-0.3437],
                    #         [-0.1207],
                    #         [-0.3087],
                    #         [-0.2381],
                    #         [ 0.0811],
                    #         [-0.2436]], device='cuda:0', grad_fn=<AddmmBackward>)

                    label_tc = Variable(
                        torch.tensor(label_values,
                                     device=predicted_labels.device))

                    # ================================================================================
                    # @ Calculate loss values

                    criterion = nn.BCEWithLogitsLoss()

                    loss_val = criterion(predicted_labels.squeeze(), label_tc)
                    # print("loss_val",loss_val)

                    loss_val = 10.0 * loss_val

                    # ================================================================================
                    # @ When you use 2 feature output and Cross Entropy loss function

                    # # c m: LogSoftmax layer
                    # m=nn.LogSoftmax()
                    # # c loss: NLLLoss layer
                    # loss=nn.NLLLoss()
                    # # c loss_val: calculated loss value
                    # loss_val=loss(m(predicted_labels),Variable(torch.Tensor(label_values).long().cuda()))

                    # ================================================================================
                    # @ Calculate gradient values through backpropagation
                    loss_val.backward()

                    # ================================================================================
                    # @ Update parameters of the network based on gradients
                    model_api_instance.optimizer.step()

                    # ================================================================================
                    # @ If you want to print loss
                    if args.use_loss_display == "True":
                        if idx % int(
                                args.leapping_term_when_displaying_loss) == 0:
                            print("Epoch:", one_ep, ", Batch:", idx)
                            print("loss_from_one_batch", loss_val.item())

                    tumor_loss_temp.append(loss_val.item())

                    # ================================================================================
                    # @ Save model after every batch you configure
                    # by using args.leapping_term_when_saving_model_after_batch
                    if idx % int(
                            args.leapping_term_when_saving_model_after_batch
                    ) == 0:
                        num_batch = "batch_" + str(idx)
                        model_api_instance.save_model_after_epoch(num_batch)

            # ================================================================================
            # @ Save model after epoch
            num_epoch = "epoch_" + str(one_ep)
            model_api_instance.save_model_after_epoch(num_epoch)

            # ================================================================================
            # @ Update learning rate

            scheduler.step()
            # print("scheduler.base_lrs",scheduler.base_lrs)

        # ================================================================================
        # @ Plot loss value
        plt.plot(tumor_loss_temp)
        plt.savefig("loss.png")
        plt.show()

    # ================================================================================
    elif args.task_mode == "validation":
        with torch.no_grad():  # @ Use network without calculating gradients
            # tumor_trn=args.dir_where_text_file_for_image_paths_is_in+"/tumor_trn.txt"
            # tumor_lbl=args.dir_where_text_file_for_image_paths_is_in+"/train_labels.csv"
            # print("tumor_trn",tumor_trn)
            # print("tumor_lbl",tumor_lbl)
            # /mnt/1T-5e7/mycodehtml/bio_health/Kaggle_histopathologic-cancer-detection/Data/tumor_trn.txt
            # /mnt/1T-5e7/mycodehtml/bio_health/Kaggle_histopathologic-cancer-detection/Data/train_labels.csv

            # ================================================================================
            # @ Dataset and Dataloader

            # @ c dataset_inst_test_tumor: dataset instance of tumor dataset
            dataset_inst_test_tumor = dataset_tumor.Dataset_Tumor(
                txt_containing_paths=tumor_trn,
                txt_containing_labels=tumor_lbl,
                is_train=False,
                args=args)

            # @ c dataloader_tumor_test: dataloader instance of tumor dataset
            dataloader_tumor_test = torch.utils.data.DataLoader(
                dataset=dataset_inst_test_tumor,
                batch_size=batch_size,
                shuffle=False,
                num_workers=3)

            # ================================================================================
            # @ c num_imgs_test: number of entire test images
            num_imgs_test = len(dataset_inst_test_tumor)

            # ================================================================================
            # @ Create network and optimizer
            if args.train_method == "train_by_transfer_learning_using_resnet":
                model_api_instance = model_api_module.Model_API_class(args)

            # ================================================================================
            predicted_values = []
            true_values = []
            img_paths = []

            # ================================================================================
            # @ Iterate all batches (batch1+batch2+...+batchn=entire images)
            for idx, data in enumerate(dataloader_tumor_test):
                # print("idx",idx)
                # print("data",data)
                # [('/mnt/1T-5e7/mycodehtml/bio_health/Kaggle_histopathologic-cancer-detection/Data/train/e693f9ac4097289c317831960514b78701999cd9.tif\n',
                #   '/mnt/1T-5e7/mycodehtml/bio_health/Kaggle_histopathologic-cancer-detection/Data/train/e6941f6c6825e7c409b9364e2fb6c2d629df8a76.tif\n',),
                #  [('e693f9ac4097289c317831960514b78701999cd9','e6941f6c6825e7c409b9364e2fb6c2d629df8a76'),tensor([1,0])]]

                # ================================================================================
                # @ c imgs: paths of validation images
                imgs = data[0]

                img_paths.extend(imgs)

                # @ c imgs: labels to validation images
                lbls = data[1][1].numpy()

                # @ c num_imgs: number of validation image in one batch
                num_imgs = lbls.shape[0]
                # print("num_imgs",num_imgs)
                # 11
                # @ Load images from paths

                # ================================================================================
                test_imgs_list = []
                for one_img_path in imgs:
                    one_loaded_img = utils_image.load_img(one_img_path)
                    # print("one_loaded_img",one_loaded_img.shape)
                    # (96, 96, 3)

                    one_loaded_img = resize(one_loaded_img, (224, 224))

                    test_imgs_list.append(one_loaded_img)

                # ================================================================================
                test_imgs_np = np.array(test_imgs_list).transpose(0, 3, 1, 2)

                # @ If you want to use center (48,48) image from (96,96) image
                # test_imgs_np=test_imgs_np[:,:,24:72,24:72]
                # print("test_imgs_np",test_imgs_np.shape)
                # (11, 3, 48, 48)

                test_imgs_tc = Variable(torch.Tensor(test_imgs_np).cuda())

                # ================================================================================
                # @ Make predictions

                prediction = model_api_instance.gen_net(test_imgs_tc)
                # print("prediction",prediction)
                # tensor([[-2.0675],
                #         [-2.9296],

                sigmoid = torch.nn.Sigmoid()

                prediction_np = sigmoid(prediction).cpu().numpy()

                # ================================================================================
                # @ Make predicted labels

                prediction_np = np.where(prediction_np > 0.5, 1, 0).squeeze()
                # print("prediction_np",prediction_np)
                # [0 0 1 0 0 0 0 0 1 0 0 1 0 0 1 0 0 0 0 0 0 0 0 1 1 0 1 1 0 0]
                # print("lbls",lbls)
                # [1 0 0 0 0 1 0 0 1 0 0 1 1 0 1 0 1 0 0 0 0 0 0 0 1 0 1 1 1 0]

                predicted_values.extend(prediction_np)

                true_values.extend(lbls)

            # ================================================================================
            y_true = true_values
            y_pred = predicted_values

            # ================================================================================
            # @ Binary Confusion Matrix

            b_c_mat = confusion_matrix(true_values,
                                       predicted_values,
                                       labels=[0, 1])
            # print("b_c_mat",b_c_mat)
            # [[30  2]
            #  [ 0 68]]

            # True Positive (Tumor pic is predicted as tumor)      False Negative (Tumor pic is predicted as non-tumor)
            # False Positive (Non-tumor pic is predicted as tumor) True Negative (Non-tumor pic is predicted as non-tumor)

            # ================================================================================
            # @ metric report

            report = classification_report(
                y_true,
                y_pred,
                target_names=['class Non tumor (neg)', 'class Tumor (pos)'])
            # print(report)
            #                        precision    recall  f1-score   support

            # class Non tumor (neg)       0.97      1.00      0.99        68
            #     class Tumor (pos)       1.00      0.94      0.97        32

            #             micro avg       0.98      0.98      0.98       100
            #             macro avg       0.99      0.97      0.98       100
            #          weighted avg       0.98      0.98      0.98       100

            # ================================================================================
            print("accuracy_score", accuracy_score(y_true, y_pred))
            # 0.98

            print("precision_score", precision_score(y_true, y_pred))
            # 1.0

            print("recall_score", recall_score(y_true, y_pred))
            # 0.9375

            # print("fbeta_score",fbeta_score(y_true, y_pred, beta))

            print("f1_score", fbeta_score(y_true, y_pred, beta=1))
            # 0.967741935483871

            # ================================================================================
            # @ ROC curve
            fpr, tpr, thresholds = roc_curve(y_true, y_pred)
            plt.plot(fpr, tpr, 'o-', label="Logistic Regression")
            plt.title('Receiver operating characteristic example')
            plt.show()

    elif args.task_mode == "submission":
        with torch.no_grad():  # @ Use network without calculating gradients

            sub_ds = dataset_tumor_test.Dataset_Tumor_Submission()
            print("sub_ds", sub_ds)

            sub_dl = torch.utils.data.DataLoader(dataset=sub_ds,
                                                 batch_size=batch_size,
                                                 shuffle=False,
                                                 num_workers=3)
            print("sub_dl", sub_dl)

            # ================================================================================
            # @ c num_imgs_test: number of entire test images

            num_imgs_test = len(sub_ds)

            # ================================================================================
            # @ Create network and optimizer

            if args.train_method == "train_by_transfer_learning_using_resnet":
                model_api_instance = model_api_module.Model_API_class(args)

            # ================================================================================
            label_submission = pd.read_csv(
                "/mnt/1T-5e7/mycodehtml/bio_health/Kaggle_histopathologic-cancer-detection/Data/sample_submission.csv",
                encoding='utf8')
            base_names = label_submission.iloc[:, 0].tolist()
            # print("base_names",base_names)

            # ================================================================================
            predicted_values = []
            # @ Iterate all batches (batch1+batch2+...+batchn=entire images)
            for idx, data in enumerate(sub_dl):
                # print("idx",idx)
                # print("data",data)
                # 0
                # ['/mnt/1T-5e7/mycodehtml/bio_health/Kaggle_histopathologic-cancer-detection/Data/test/0b2ea2a822ad23fdb1b5dd26653da899fbd2c0d5.tif',

                imgs = data

                # ================================================================================
                test_imgs_list = []
                for one_img_path in imgs:
                    one_loaded_img = utils_image.load_img(one_img_path)
                    # print("one_loaded_img",one_loaded_img.shape)
                    # (96, 96, 3)

                    one_loaded_img = resize(one_loaded_img, (224, 224))

                    test_imgs_list.append(one_loaded_img)

                # ================================================================================
                test_imgs_np = np.array(test_imgs_list).transpose(0, 3, 1, 2)

                # @ If you want to use center (48,48) image from (96,96) image
                # test_imgs_np=test_imgs_np[:,:,24:72,24:72]
                # print("test_imgs_np",test_imgs_np.shape)
                # (11, 3, 48, 48)

                test_imgs_tc = Variable(torch.Tensor(test_imgs_np).cuda())
                # print("test_imgs_tc",test_imgs_tc.shape)
                # torch.Size([30, 3, 224, 224])

                # ================================================================================
                # @ Make predictions
                prediction = model_api_instance.gen_net(test_imgs_tc)
                # print("prediction",prediction)
                # tensor([[-2.0675],
                # ...
                #         [-1.2222]], device='cuda:0')

                sigmoid = torch.nn.Sigmoid()

                prediction_np = sigmoid(prediction).cpu().numpy()

                # ================================================================================
                # @ Make predicted labels

                prediction_np = np.where(prediction_np > 0.5, 1, 0).squeeze()
                # print("prediction_np",prediction_np)
                # [0 0 1 0 0 0 0 0 1 0 0 1 0 0 1 0 0 0 0 0 0 0 0 1 1 0 1 1 0 0]
                # print("lbls",lbls)
                # [1 0 0 0 0 1 0 0 1 0 0 1 1 0 1 0 1 0 0 0 0 0 0 0 1 0 1 1 1 0]

                predicted_values.extend(prediction_np)

            my_submission = pd.DataFrame({
                'id': base_names,
                'label': predicted_values
            })
            my_submission.to_csv('youngminpar2559_submission.csv', index=False)
コード例 #10
0
  # print(model)
  grad_cam = GradCam(model, target_layer_names = ["layer4"], use_cuda=args.use_cuda)
  # x=os.walk(args.image_path)  
  # for root,dirs,filename in x:
  #   # print(type(grad_cam))
  #   print(filename)
  
  # for s in filename:
  #     image.append(cv2.imread(args.image_path+s,1))
    #img = cv2.imread(filename, 1)

  image=[
    "/mnt/1T-5e7/mycodehtml/bio_health/Kaggle_histopathologic-cancer-detection/Data/test/ffcaef8b9006b4d0b128328e6df6e4d139d3c40a.tif",
    "/mnt/1T-5e7/mycodehtml/bio_health/Kaggle_histopathologic-cancer-detection/Data/test/ffcc29cf0e363737b577d1db470df0bb1adf7957.tif"]  
  for img in image:
    img=utils_image.load_img(img)/255.0
    img=img.astype("float32")
    # img = np.float32(cv2.resize(img, (224, 224))) / 255
    input = preprocess_image(img)
    # print("input",input.grad_fn)
    # None

    print('input.size()=',input.size())
    # If None, returns the map for the highest scoring category.
    # Otherwise, targets the requested index.
    target_index =None

    mask = grad_cam(input, target_index)
    # print(type(mask))
    i=i+1 
    show_cam_on_image(img, mask,i)
コード例 #11
0
def visualize_images(args):

    # ================================================================================
    loaded_path, num_imgs = utils_common.return_path_list_from_txt(train_imgs)
    # print("loaded_path",loaded_path)
    # ['/mnt/1T-5e7/mycodehtml/bio_health/Kaggle/human-protein-atlas-image-classification/Data/train/00070df0-bbc3-11e8-b2bc-ac1f6b6435d0_blue.png\n',
    #  '/mnt/1T-5e7/mycodehtml/bio_health/Kaggle/human-protein-atlas-image-classification/Data/train/00070df0-bbc3-11e8-b2bc-ac1f6b6435d0_green.png\n',

    # print("num_imgs",num_imgs)
    # 124288

    loaded_path_chunked = []
    for i in range(0, int(num_imgs / 4), 4):
        one_protein = loaded_path[i:i + 4]
        # print("one_protein",one_protein)
        # ['/mnt/1T-5e7/mycodehtml/bio_health/Kaggle/human-protein-atlas-image-classification/Data/train/00070df0-bbc3-11e8-b2bc-ac1f6b6435d0_blue.png\n',
        #  '/mnt/1T-5e7/mycodehtml/bio_health/Kaggle/human-protein-atlas-image-classification/Data/train/00070df0-bbc3-11e8-b2bc-ac1f6b6435d0_green.png\n',
        #  '/mnt/1T-5e7/mycodehtml/bio_health/Kaggle/human-protein-atlas-image-classification/Data/train/00070df0-bbc3-11e8-b2bc-ac1f6b6435d0_red.png\n',
        #  '/mnt/1T-5e7/mycodehtml/bio_health/Kaggle/human-protein-atlas-image-classification/Data/train/00070df0-bbc3-11e8-b2bc-ac1f6b6435d0_yellow.png\n']

        loaded_path_chunked.append(one_protein)

    # print("loaded_path_chunked",loaded_path_chunked)
    # [['/mnt/1T-5e7/mycodehtml/bio_health/Kaggle/human-protein-atlas-image-classification/Data/train/00070df0-bbc3-11e8-b2bc-ac1f6b6435d0_blue.png\n',
    #   '/mnt/1T-5e7/mycodehtml/bio_health/Kaggle/human-protein-atlas-image-classification/Data/train/00070df0-bbc3-11e8-b2bc-ac1f6b6435d0_green.png\n',
    #   '/mnt/1T-5e7/mycodehtml/bio_health/Kaggle/human-protein-atlas-image-classification/Data/train/00070df0-bbc3-11e8-b2bc-ac1f6b6435d0_red.png\n',
    #   '/mnt/1T-5e7/mycodehtml/bio_health/Kaggle/human-protein-atlas-image-classification/Data/train/00070df0-bbc3-11e8-b2bc-ac1f6b6435d0_yellow.png\n'

    path_3_proteins = loaded_path_chunked[:3]

    # ================================================================================
    images_of_3_proteins = []
    for one_protein in path_3_proteins:
        b_img = one_protein[0].replace("\n", "")
        g_img = one_protein[1].replace("\n", "")
        r_img = one_protein[2].replace("\n", "")
        y_img = one_protein[3].replace("\n", "")

        b_img = utils_image.load_img(b_img)
        g_img = utils_image.load_img(g_img)
        r_img = utils_image.load_img(r_img)
        y_img = utils_image.load_img(y_img)

        # print("b_img",b_img.shape)
        # (512, 512)
        # print("g_img",g_img.shape)
        # (512, 512)
        # print("r_img",r_img.shape)
        # (512, 512)
        # print("y_img",y_img.shape)
        # (512, 512)

        images_of_3_proteins.append([b_img, g_img, r_img, y_img])

    i = 0
    for one_protein_img in images_of_3_proteins:
        bg_img = np.zeros(
            (one_protein_img[0].shape[0], one_protein_img[0].shape[1], 3))
        # print("bg_img",bg_img.shape)
        # (512, 512, 3)

        bg_img_flat_for_b = bg_img.reshape(-1, 3).copy()
        bg_img_flat_for_g = bg_img.reshape(-1, 3).copy()
        bg_img_flat_for_r = bg_img.reshape(-1, 3).copy()
        bg_img_flat_for_y = bg_img.reshape(-1, 3).copy()

        # print("one_protein_img[0]",one_protein_img[0].shape)
        # (512, 512)
        # print("one_protein_img[1]",one_protein_img[1].shape)
        # (512, 512)
        # print("one_protein_img[2]",one_protein_img[2].shape)
        # (512, 512)
        # print("one_protein_img[3]",one_protein_img[3].shape)
        # (512, 512)

        b_img = one_protein_img[0].reshape(-1)
        g_img = one_protein_img[1].reshape(-1)
        r_img = one_protein_img[2].reshape(-1)
        y_img = one_protein_img[3].reshape(-1)

        rgb_img = np.stack((one_protein_img[0], one_protein_img[1],
                            one_protein_img[2])).transpose(1, 2, 0)
        # print("rgb_img",rgb_img.shape)

        # ================================================================================
        import scipy.misc
        scipy.misc.imsave('./img_out/rgb_img_' + str(i) + '.png', rgb_img)

        bg_img_flat_for_b[:, 2] = b_img
        scipy.misc.imsave('./img_out/b_img_' + str(i) + '.png',
                          bg_img_flat_for_b.reshape(512, 512, 3))

        bg_img_flat_for_g[:, 1] = g_img
        scipy.misc.imsave('./img_out/g_img_' + str(i) + '.png',
                          bg_img_flat_for_g.reshape(512, 512, 3))

        bg_img_flat_for_r[:, 0] = r_img
        scipy.misc.imsave('./img_out/r_img_' + str(i) + '.png',
                          bg_img_flat_for_r.reshape(512, 512, 3))

        bg_img_flat_for_y[:, 0] = y_img
        scipy.misc.imsave('./img_out/y_img_' + str(i) + '.png',
                          bg_img_flat_for_y.reshape(512, 512, 3))

        i = i + 1
コード例 #12
0
def train(args):
    epoch = int(args.epoch)
    batch_size = int(args.batch_size)
    # print("epoch",epoch)
    # print("batch_size",batch_size)
    # 9
    # 2

    # ================================================================================
    text_file_instance = text_file_path_api_module.Path_Of_Text_Files(args)

    txt_of_image_data = text_file_instance.train_data
    # print("txt_of_image_data",txt_of_image_data)
    # /mnt/1T-5e7/mycodehtml/bio_health/Bacteria/bacteria-classification-at-the-genus-level/Data/bacteria-classification-at-the-genus-level/trn_imgs_paths.txt

    txt_of_label_data = text_file_instance.label_data
    # print("txt_of_label_data",txt_of_label_data)
    # /mnt/1T-5e7/mycodehtml/bio_health/Bacteria/bacteria-classification-at-the-genus-level/Data/bacteria-classification-at-the-genus-level/trn_imgs_labels.csv

    # ================================================================================
    model_api_instance = model_api_module.Model_API_class(args)

    path_of_imgs_tumor = []
    with open(txt_of_image_data) as f:
        lines = f.readlines()
        path_of_imgs_tumor.extend(lines)
    # print("path_of_imgs_tumor",path_of_imgs_tumor)
    # ['/mnt/1T-5e7/mycodehtml/bio_health/Bacteria/bacteria-classification-at-the-genus-level/Data/bacteria-classification-at-the-genus-level/Train_Images/0004.png\n', '/mnt/1T-5e7/mycodehtml/

    pre_sale = pd.read_csv(txt_of_label_data, encoding='utf8')
    # print("pre_sale",pre_sale)
    #      Image ID          Class
    # 0    4         ecoli
    # 1    5         salmonella

    bacteria_label_np = np.array(pre_sale.iloc[:, 1])
    # print("bacteria_label_np",bacteria_label_np)
    # ['ecoli' 'salmonella' 'staphylococus' 'listeria' 'ecoli' 'ecoli'

    zipped = list(zip(path_of_imgs_tumor, bacteria_label_np))
    # print("zipped",zipped)
    # zipped [('/mnt/1T-5e7/mycodehtml/bio_health/Bacteria/bacteria-classification-at-the-genus-level/Data/bacteria-classification-at-the-genus-level/Train_Images/0004.png\n', array([4, 'ecoli'], dtype=object)), ('/mnt/1T-5e7/mycodehtml/bio_health/Bacteria/bacteria-classification-at-the-genus-level/Data/bacteria-classification-at-the-genus-level/Train_Images/0005.png\n', array([5,
    # [('/mnt/1T-5e7/mycodehtml/bio_health/Bacteria/bacteria-classification-at-the-genus-level/Data/bacteria-classification-at-the-genus-level/Train_Images/0004.png\n', 'ecoli'), ('/mnt/1T-5e7/

    shuffle(zipped)

    zipped_np = np.array(zipped)
    X = zipped_np[:, 0]
    y = zipped_np[:, 1]

    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.10,
                                                        random_state=42)
    # print("X_train",np.array(X_train).shape)
    # # (545,)
    # print("y_train",np.array(y_train).shape)
    # # (545,)
    # print("X_test",np.array(X_test).shape)
    # # (61,)
    # print("y_test",np.array(y_test).shape)
    # # (61,)

    # ================================================================================
    # @ Test Grad CAM
    # imgs=["/mnt/1T-5e7/mycodehtml/bio_health/Kaggle_histopathologic-cancer-detection/Data/train_split/33/82d4d190d2fed1be255fc3bac36a37c860bb31c0.tif",
    #       "/mnt/1T-5e7/mycodehtml/bio_health/Kaggle_histopathologic-cancer-detection/Data/train_split/33/82a5300cd61628fb9bae332cdb7d5e7e37b1fb36.tif"]
    # grad_cam.initialize_grad_cam(model=model_api_instance.gen_net,list_of_img_paths=imgs,args=args)
    # afaf 1: grad_cam.initialize_grad_cam(model=model_api_instance.gen_net,list_of_img_paths=imgs,args=args)

    tumor_loss_temp = []
    # ================================================================================
    if args.task_mode == "train":  # If you're in train mode

        # ================================================================================
        # @ Configure learning rate scheduler

        # Update learning rate 4 times during entire epochs
        # For example, if you use 10 epochs, int(10/4), 1 2 / 3 4 / 5 6 / 7 8 / 9 10
        # 0-1 epochs: 0.001 -> 2-3 epochs: 0.0001 -> 4-5 epochs: 0.00001 -> 5-6 epochs: 0.000001

        # scheduler=StepLR(model_api_instance.optimizer,step_size=int(epoch/4),gamma=0.1)

        # ================================================================================
        for one_ep in range(epoch):  # @ Iterates all epochs

            # c dataset_inst_trn_tumor: dataset instance of tumor
            dataset_inst_trn_tumor = custom_ds.Custom_DS(
                single_train_k=X_train, single_train_lbl_k=y_train, args=args)

            # Test iterator
            # iter_dataset_inst_trn_tumor=iter(dataset_inst_trn_tumor)
            # trn=next(iter_dataset_inst_trn_tumor)
            # print("trn",trn)
            # ('/mnt/1T-5e7/mycodehtml/bio_health/Kaggle_histopathologic-cancer-detection/Data/train/4036b2e1e551e14b88f7f9ada275935ec4b5bdcc.tif\n',
            #  ['4036b2e1e551e14b88f7f9ada275935ec4b5bdcc', 0])

            # ================================================================================
            # c dataloader_tumor_trn: create dataloader
            dataloader_tumor_trn = torch.utils.data.DataLoader(
                dataset=dataset_inst_trn_tumor,
                batch_size=batch_size,
                shuffle=False,
                num_workers=3)

            # c dataloader_tumor_trn_iter: iterator of dataloader
            # dataloader_tumor_trn_iter=iter(dataloader_tumor_trn)

            # Test dataloader
            # pairs=next(dataloader_tumor_trn_iter)
            # print("pairs",pairs)
            # [('/mnt/1T-5e7/mycodehtml/bio_health/Kaggle_histopathologic-cancer-detection/Data/train/45da9ea0958e83c3a5d6efe1e9912e80ab204b59.tif\n',
            #   '/mnt/1T-5e7/mycodehtml/bio_health/Kaggle_histopathologic-cancer-detection/Data/train/9f49570e69b43a10e58566c5207f87c4a96f04d2.tif\n'),
            #  [('45da9ea0958e83c3a5d6efe1e9912e80ab204b59',
            #    '9f49570e69b43a10e58566c5207f87c4a96f04d2'),
            #   tensor([1, 0])]]

            # ================================================================================
            # c num_imgs_trn: number of train image
            num_imgs_trn = len(dataset_inst_trn_tumor)
            # print("num_imgs_trn",num_imgs_trn)
            # 198022

            args.__setattr__("num_imgs_trn", num_imgs_trn)
            # print("args",args)
            # print("Current batch size:",batch_size)
            # print("Possible batch size:",list(utils_common.divisorGenerator(num_imgs_trn)))

            # assert str(num_imgs_trn/batch_size).split(".")[-1]==str(0),"Check batch size, currently it's incorrect"

            # ================================================================================
            # @ If you don't use Augmentor
            if args.use_augmentor == "False":
                pass

            else:  # @ If you use Augmentor

                # @ Iterate all images in dataset during single epoch
                for idx, data in enumerate(dataloader_tumor_trn):
                    # print("data",data)
                    # [('/mnt/1T-5e7/mycodehtml/bio_health/Bacteria/bacteria-classification-at-the-genus-level/Data/bacteria-classification-at-the-genus-level/Train_Images/1611.png\n',
                    #   '/mnt/1T-5e7/mycodehtml/bio_health/Bacteria/bacteria-classification-at-the-genus-level/Data/bacteria-classification-at-the-genus-level/Train_Images/1475.png\n'),
                    #  ('staphylococus', 'staphylococus')]

                    # c paths_of_imgs: paths of images
                    paths_of_imgs = data[0]
                    # print("paths_of_imgs",paths_of_imgs)
                    # ('/mnt/1T-5e7/mycodehtml/bio_health/Bacteria/bacteria-classification-at-the-genus-level/Data/bacteria-classification-at-the-genus-level/Train_Images/0849.png\n', '/mnt/1T-5e7/mycodehtml/bio_health/Bacteria/bacteria-classification-at-the-genus-level/Data/bacteria-classification-at-the-genus-level/Train_Images/0969.png\n')

                    # c labels_in_scalar: label values of images
                    labels_in_scalar = np.array(data[1])
                    # print("labels_in_scalar",labels_in_scalar)
                    # ['ecoli' 'salmonella']

                    # c bs_pa_tumor_d: batchsized paths of tumor dataset
                    bs_pa_tumor_d = [paths_of_imgs, labels_in_scalar]

                    # ================================================================================
                    # @ Perform data augmentation

                    # c sampled_trn_imgs_tc: sampled train images in torch tensor
                    # c label_values: corresponding label values
                    sampled_trn_imgs_tc, label_values = utils_data.use_augmetor_for_tumor_data(
                        bs_pa_tumor_d, args)
                    # print("sampled_trn_imgs_tc",sampled_trn_imgs_tc.shape)
                    # (2, 1, 224, 224)

                    # print("label_values",label_values)
                    # ['ecoli', 'staphylococus']

                    I_4x4 = np.eye(4)
                    # print("I_4x4",I_4x4)
                    # [[1. 0. 0. 0.]
                    #  [0. 1. 0. 0.]
                    #  [0. 0. 1. 0.]
                    #  [0. 0. 0. 1.]]
                    bac_name_dict = {
                        'ecoli': [
                            1.,
                            0.,
                            0.,
                            0.,
                        ],
                        'salmonella': [0., 1., 0., 0.],
                        'staphylococus': [0., 0., 1., 0.],
                        'listeria': [0., 0., 0., 1.]
                    }

                    one_hot_label = []
                    for one_bac_name in label_values:
                        # print("one_bac_name",one_bac_name)
                        # listeria

                        one_hot = bac_name_dict[one_bac_name]
                        # print("one_hot",one_hot)
                        # [0.0, 0.0, 0.0, 1.0]

                        one_hot_label.append(one_hot)

                    # print("one_hot_label",one_hot_label)
                    # [[1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]]

                    # bac_name_dict={'ecoli':1,'salmonella':2,'staphylococus':3,'listeria':4}

                    # one_hot_label=[]
                    # for one_bac_name in label_values:
                    #   # print("one_bac_name",one_bac_name)
                    #   # listeria

                    #   one_hot=bac_name_dict[one_bac_name]
                    #   # print("one_hot",one_hot)
                    #   # [0.0, 0.0, 0.0, 1.0]

                    #   one_hot_label.append(one_hot)

                    # # print("one_hot_label",one_hot_label)
                    # # [3, 3]

                    y1 = torch.FloatTensor(one_hot_label)
                    _, targets = y1.max(dim=1)
                    # print("targets",targets)
                    # afaf

                    # ================================================================================
                    trn_imgs_tcv = utils_pytorch.get_Variable(
                        sampled_trn_imgs_tc)
                    # print("trn_imgs_tcv",trn_imgs_tcv.shape)
                    # torch.Size([2, 1, 224, 224])

                    # ================================================================================
                    # @ Remove existing gradients
                    model_api_instance.remove_existing_gradients_before_starting_new_training(
                    )

                    # ================================================================================
                    # @ c predicted_labels: pass input images and get predictions
                    predicted_labels = model_api_instance.gen_net(trn_imgs_tcv)
                    # print("predicted_labels",predicted_labels)
                    # tensor([[-0.1724, -0.1855, -0.1478,  0.1190],
                    #         [-0.1837, -0.1834, -0.1849,  0.0015]], device='cuda:0',
                    #       grad_fn=<AddmmBackward>)

                    # max_indics=torch.argmax(predicted_labels,dim=1)
                    # # print("max_indics",max_indics)
                    # # tensor([3, 3], device='cuda:0')
                    # print("max_indics",max_indics.shape)
                    # afaf

                    label_tc = Variable(
                        torch.tensor(targets, device=predicted_labels.device))
                    # print("label_tc",label_tc)
                    # tensor([[0., 1., 0., 0.],
                    #         [0., 1., 0., 0.]], device='cuda:0')

                    # ================================================================================
                    # @ Calculate loss values

                    criterion = nn.CrossEntropyLoss()

                    # print("max_indics",max_indics)
                    # tensor([2, 2], device='cuda:0')

                    # print("label_tc",label_tc)
                    # tensor([2, 1], device='cuda:0')

                    print("predicted_labels", predicted_labels.shape)
                    # print("label_tc",label_tc.shape)
                    # predicted_labels torch.Size([2, 4])
                    # label_tc torch.Size([2])

                    loss_val = criterion(predicted_labels, label_tc.long())
                    # print("loss_val",loss_val)
                    # tensor(1.7040, device='cuda:0', grad_fn=<NllLossBackward>)

                    loss_val = 10.0 * loss_val

                    # ================================================================================
                    # @ When you use 2 feature output and Cross Entropy loss function

                    # # c m: LogSoftmax layer
                    # m=nn.LogSoftmax()
                    # # c loss: NLLLoss layer
                    # loss=nn.NLLLoss()
                    # # c loss_val: calculated loss value
                    # loss_val=loss(m(predicted_labels),Variable(torch.Tensor(label_values).long().cuda()))

                    # ================================================================================
                    # @ Calculate gradient values through backpropagation
                    loss_val.backward()

                    # ================================================================================
                    # @ Update parameters of the network based on gradients
                    model_api_instance.optimizer.step()

                    # ================================================================================
                    # @ If you want to print loss
                    if args.use_loss_display == "True":
                        if idx % int(
                                args.leapping_term_when_displaying_loss) == 0:
                            print("Epoch:", one_ep, ", Batch:", idx)
                            print("loss_from_one_batch", loss_val.item())

                    tumor_loss_temp.append(loss_val.item())

                    # ================================================================================
                    # @ Save model after every batch you configure
                    # by using args.leapping_term_when_saving_model_after_batch
                    if idx % int(
                            args.leapping_term_when_saving_model_after_batch
                    ) == 0:
                        num_batch = "batch_" + str(idx)
                        model_api_instance.save_model_after_epoch(num_batch)

            # ================================================================================
            # @ Save model after epoch
            num_epoch = "epoch_" + str(one_ep)
            model_api_instance.save_model_after_epoch(num_epoch)

            # ================================================================================
            # @ Update learning rate

            # scheduler.step()
            # print("scheduler.base_lrs",scheduler.base_lrs)

        # ================================================================================
        # @ Plot loss value
        plt.plot(tumor_loss_temp)
        plt.savefig("loss.png")
        plt.show()

        with torch.no_grad():

            dataset_inst_vali_tumor = custom_ds.Custom_DS_vali(
                single_vali_k=X_test, single_vali_lbl_k=y_test, args=args)

            dataloader_tumor_vali = torch.utils.data.DataLoader(
                dataset=dataset_inst_vali_tumor,
                batch_size=5,
                shuffle=False,
                num_workers=3)

            # ================================================================================
            num_imgs_trn = len(dataset_inst_vali_tumor)
            # print("num_imgs_trn",num_imgs_trn)
            # 198022

            pred_np_li = []
            label_np_li = []

            # @ Iterate all images in dataset during single epoch
            for idx, data in enumerate(dataloader_tumor_vali):
                # print("data",data)
                # [('/mnt/1T-5e7/mycodehtml/bio_health/Bacteria/bacteria-classification-at-the-genus-level/Data/bacteria-classification-at-the-genus-level/Train_Images/1611.png\n',
                #   '/mnt/1T-5e7/mycodehtml/bio_health/Bacteria/bacteria-classification-at-the-genus-level/Data/bacteria-classification-at-the-genus-level/Train_Images/1475.png\n'),
                #  ('staphylococus', 'staphylococus')]

                # c paths_of_imgs: paths of images
                paths_of_imgs = data[0]
                # print("paths_of_imgs",paths_of_imgs)
                # ('/mnt/1T-5e7/mycodehtml/bio_health/Bacteria/bacteria-classification-at-the-genus-level/Data/bacteria-classification-at-the-genus-level/Train_Images/0849.png\n', '/mnt/1T-5e7/mycodehtml/bio_health/Bacteria/bacteria-classification-at-the-genus-level/Data/bacteria-classification-at-the-genus-level/Train_Images/0969.png\n')

                intput_imgs = []
                for one_path in paths_of_imgs:
                    loaded_img = utils_image.load_img(
                        one_path.replace("\n", ""))
                    # print("loaded_img",loaded_img.shape)
                    # (1024, 1280)

                    loaded_img = utils_image.resize_img(loaded_img, 224, 224)
                    intput_imgs.append(loaded_img)

                intput_imgs_np = np.array(intput_imgs)
                # print("intput_imgs_np",intput_imgs_np.shape)
                # (2, 224, 224)

                intput_imgs_np = intput_imgs_np[:, np.newaxis, :, :]
                intput_imgs_tc = torch.tensor(intput_imgs_np).float()

                trn_imgs_tcv = utils_pytorch.get_Variable(intput_imgs_tc)
                # print("trn_imgs_tcv",trn_imgs_tcv.shape)
                # torch.Size([2, 1, 224, 224])

                # c labels_in_scalar: label values of images
                labels_in_scalar = np.array(data[1])
                # print("labels_in_scalar",labels_in_scalar)
                # ['salmonella' 'listeria' 'salmonella' 'salmonella' 'salmonella']

                I_4x4 = np.eye(4)
                # print("I_4x4",I_4x4)
                # [[1. 0. 0. 0.]
                #  [0. 1. 0. 0.]
                #  [0. 0. 1. 0.]
                #  [0. 0. 0. 1.]]
                bac_name_dict = {
                    'ecoli': [
                        1.,
                        0.,
                        0.,
                        0.,
                    ],
                    'salmonella': [0., 1., 0., 0.],
                    'staphylococus': [0., 0., 1., 0.],
                    'listeria': [0., 0., 0., 1.]
                }

                one_hot_label = []
                for one_bac_name in labels_in_scalar:
                    # print("one_bac_name",one_bac_name)
                    # salmonella

                    one_hot = bac_name_dict[one_bac_name]
                    # print("one_hot",one_hot)
                    # [1.0, 0.0, 0.0, 0.0]

                    one_hot_label.append(one_hot)

                # print("one_hot_label",one_hot_label)
                # [[1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]]

                y1 = torch.FloatTensor(one_hot_label)
                _, targets = y1.max(dim=1)
                # print("targets",targets)
                # tensor([1, 0, 1, 1, 3])

                # ================================================================================
                # @ Remove existing gradients
                # model_api_instance.remove_existing_gradients_before_starting_new_training()

                # ================================================================================
                # @ c predicted_labels: pass input images and get predictions
                predicted_labels = model_api_instance.gen_net(trn_imgs_tcv)
                # print("predicted_labels",predicted_labels)
                # tensor([[ 0.6513, -0.1330, -0.5001, -0.0210],
                #         [ 0.1990,  0.1838, -0.1278, -0.1546]], device='cuda:0')

                max_indics = torch.argmax(predicted_labels, dim=1)
                # print("max_indics",max_indics)
                # tensor([0, 0], device='cuda:0')

                # print("targets",targets)
                # tensor([0, 1])

                label_tc = Variable(
                    torch.tensor(targets, device=predicted_labels.device))
                # print("label_tc",label_tc)
                # tensor([1, 3, 2, 0, 2], device='cuda:0')

                pred_np = max_indics.detach().cpu().numpy()
                label_np = label_tc.detach().cpu().numpy()

                pred_np_li.append(pred_np)
                label_np_li.append(label_np)

            # pred_np_li [array([0, 1])]
            # label_np_li [array([3, 1])]

            label_np_li_len = len(label_np_li)

            y_true = np.array(label_np_li)
            y_pred = np.array(pred_np_li)
            print("y_true", y_true)
            # y_true [
            # array([2, 1, 2, 2, 1]) array([0, 2, 2, 3, 2]) array([1, 1, 0, 0, 2])
            # array([1, 0, 1, 3, 2]) array([2, 3, 3, 0, 0]) array([0, 3, 3, 3, 1])
            # array([3, 2, 3, 2, 1]) array([1, 2, 3, 0, 2]) array([1, 1, 2, 3, 3])
            # array([1, 3, 3, 0, 3]) array([0, 1, 0, 3, 2]) array([3, 3, 3, 3, 1])
            # array([1])]

            print("y_pred", y_pred)
            # y_pred [
            # array([2, 0, 2, 2, 0]) array([0, 2, 2, 3, 2]) array([1, 1, 0, 0, 2])
            # array([1, 0, 1, 3, 2]) array([2, 3, 3, 0, 0]) array([0, 3, 3, 3, 1])
            # array([3, 2, 3, 2, 1]) array([1, 2, 3, 0, 2]) array([1, 1, 2, 3, 3])
            # array([1, 3, 2, 0, 3]) array([0, 1, 0, 3, 2]) array([3, 2, 3, 3, 1])
            # array([0])]

            num_entire_sam = 0
            num_matched = 0
            for one_pair in list(zip(y_true, y_pred)):
                y_one_true = one_pair[0]
                y_one_pred = one_pair[1]

                num_sample = y_one_true.reshape(-1).shape[0]
                num_entire_sam = num_sample + num_entire_sam

                y_one_true = np.array(y_one_true)
                y_one_pred = np.array(y_one_pred)

                matched = (y_one_true == y_one_pred)

                matched_num = matched.sum()
                num_matched = num_matched + matched_num
            acc = num_matched / num_entire_sam
            print("acc", acc)
            afaf

            y_true_rs = np.reshape(y_true, (-1))
            print("y_true_rs", y_true_rs.shape)
            # (10,)
            num_sam = y_true_rs.shape[0]
            print("num_sam", num_sam)

            matched = np.array(y_true == y_pred)
            print("matched", matched)
            # [[False  True]]

            matched_num = matched.sum()
            print("matched_num", matched_num)
            # 1

            print("label_np_li_len", label_np_li_len)
            acc = matched_num / num_sam
            print("acc", acc)
            afaf

            # zpped=list(zip(y_true,y_pred))
            # n_classes = 4
            # y_true_oh=[]
            # y_pred_oh=[]
            # for o_true,o_pred in zpped:
            #   aa=np.eye(n_classes)[o_true]
            #   bb=np.eye(n_classes)[o_pred]
            #   y_true_oh.append(list(aa))
            #   y_pred_oh.append(list(bb))

            # # print("y_true_oh",y_true_oh)
            # # print("y_pred_oh",y_pred_oh)
            # # [array([[0., 0., 0., 1.],
            # #         [1., 0., 0., 0.]])]
            # # [array([[0., 0., 1., 0.],
            # #         [0., 0., 1., 0.]])]

            # y_true=np.array(y_true_oh)
            # y_pred=np.array(y_pred_oh)
            # # print("y_true",y_true)
            # # print("y_pred",y_pred)
            # # print("y_true",type(y_true))
            # # print("y_pred",type(y_pred))

            # ================================================================================
            # @ Binary Confusion Matrix

            # b_c_mat=confusion_matrix(y_true,y_pred,labels=[0,1,2,3])
            # print("b_c_mat",b_c_mat)
            # [[30  2]
            #  [ 0 68]]

            # True Positive (Tumor pic is predicted as tumor)      False Negative (Tumor pic is predicted as non-tumor)
            # False Positive (Non-tumor pic is predicted as tumor) True Negative (Non-tumor pic is predicted as non-tumor)

            # ================================================================================
            # @ metric report

            # report=classification_report(y_true,y_pred,target_names=['class Non tumor (neg)', 'class Tumor (pos)'])
            # print(report)
            #                        precision    recall  f1-score   support

            # class Non tumor (neg)       0.97      1.00      0.99        68
            #     class Tumor (pos)       1.00      0.94      0.97        32

            #             micro avg       0.98      0.98      0.98       100
            #             macro avg       0.99      0.97      0.98       100
            #          weighted avg       0.98      0.98      0.98       100

            # ================================================================================
            print("accuracy_score", accuracy_score(y_true, y_pred))
            # 0.98

            print("precision_score", precision_score(y_true, y_pred))
            # 1.0

            print("recall_score", recall_score(y_true, y_pred))
            # 0.9375

            # print("fbeta_score",fbeta_score(y_true, y_pred, beta))

            print("f1_score", fbeta_score(y_true, y_pred, beta=1))
            # 0.967741935483871

            # ================================================================================
            # @ ROC curve
            fpr, tpr, thresholds = roc_curve(y_true, y_pred)
            plt.plot(fpr, tpr, 'o-', label="Logistic Regression")
            plt.title('Receiver operating characteristic example')
            plt.show()
            afaf

    # ================================================================================
    elif args.task_mode == "validation":
        with torch.no_grad():  # @ Use network without calculating gradients
            # tumor_trn=args.dir_where_text_file_for_image_paths_is_in+"/tumor_trn.txt"
            # tumor_lbl=args.dir_where_text_file_for_image_paths_is_in+"/train_labels.csv"
            # print("tumor_trn",tumor_trn)
            # print("tumor_lbl",tumor_lbl)
            # /mnt/1T-5e7/mycodehtml/bio_health/Kaggle_histopathologic-cancer-detection/Data/tumor_trn.txt
            # /mnt/1T-5e7/mycodehtml/bio_health/Kaggle_histopathologic-cancer-detection/Data/train_labels.csv

            # ================================================================================
            # @ Dataset and Dataloader

            # @ c dataset_inst_test_tumor: dataset instance of tumor dataset
            dataset_inst_test_tumor = dataset_tumor.Dataset_Tumor(
                txt_containing_paths=tumor_trn,
                txt_containing_labels=tumor_lbl,
                is_train=False,
                args=args)

            # @ c dataloader_tumor_test: dataloader instance of tumor dataset
            dataloader_tumor_test = torch.utils.data.DataLoader(
                dataset=dataset_inst_test_tumor,
                batch_size=batch_size,
                shuffle=False,
                num_workers=3)

            # ================================================================================
            # @ c num_imgs_test: number of entire test images
            num_imgs_test = len(dataset_inst_test_tumor)

            # ================================================================================
            # @ Create network and optimizer
            if args.train_method == "train_by_transfer_learning_using_resnet":
                model_api_instance = model_api_module.Model_API_class(args)

            # ================================================================================
            predicted_values = []
            true_values = []
            img_paths = []

            # ================================================================================
            # @ Iterate all batches (batch1+batch2+...+batchn=entire images)
            for idx, data in enumerate(dataloader_tumor_test):
                # print("idx",idx)
                # print("data",data)
                # [('/mnt/1T-5e7/mycodehtml/bio_health/Kaggle_histopathologic-cancer-detection/Data/train/e693f9ac4097289c317831960514b78701999cd9.tif\n',
                #   '/mnt/1T-5e7/mycodehtml/bio_health/Kaggle_histopathologic-cancer-detection/Data/train/e6941f6c6825e7c409b9364e2fb6c2d629df8a76.tif\n',),
                #  [('e693f9ac4097289c317831960514b78701999cd9','e6941f6c6825e7c409b9364e2fb6c2d629df8a76'),tensor([1,0])]]

                # ================================================================================
                # @ c imgs: paths of validation images
                imgs = data[0]

                img_paths.extend(imgs)

                # @ c imgs: labels to validation images
                lbls = data[1][1].numpy()

                # @ c num_imgs: number of validation image in one batch
                num_imgs = lbls.shape[0]
                # print("num_imgs",num_imgs)
                # 11
                # @ Load images from paths

                # ================================================================================
                test_imgs_list = []
                for one_img_path in imgs:
                    one_loaded_img = utils_image.load_img(one_img_path)
                    # print("one_loaded_img",one_loaded_img.shape)
                    # (96, 96, 3)

                    one_loaded_img = resize(one_loaded_img, (224, 224))

                    test_imgs_list.append(one_loaded_img)

                # ================================================================================
                test_imgs_np = np.array(test_imgs_list).transpose(0, 3, 1, 2)

                # @ If you want to use center (48,48) image from (96,96) image
                # test_imgs_np=test_imgs_np[:,:,24:72,24:72]
                # print("test_imgs_np",test_imgs_np.shape)
                # (11, 3, 48, 48)

                test_imgs_tc = Variable(torch.Tensor(test_imgs_np).cuda())

                # ================================================================================
                # @ Make predictions

                prediction = model_api_instance.gen_net(test_imgs_tc)
                # print("prediction",prediction)
                # tensor([[-2.0675],
                #         [-2.9296],

                sigmoid = torch.nn.Sigmoid()

                prediction_np = sigmoid(prediction).cpu().numpy()

                # ================================================================================
                # @ Make predicted labels

                prediction_np = np.where(prediction_np > 0.5, 1, 0).squeeze()
                # print("prediction_np",prediction_np)
                # [0 0 1 0 0 0 0 0 1 0 0 1 0 0 1 0 0 0 0 0 0 0 0 1 1 0 1 1 0 0]
                # print("lbls",lbls)
                # [1 0 0 0 0 1 0 0 1 0 0 1 1 0 1 0 1 0 0 0 0 0 0 0 1 0 1 1 1 0]

                predicted_values.extend(prediction_np)

                true_values.extend(lbls)

            # ================================================================================
            y_true = true_values
            y_pred = predicted_values

            # ================================================================================
            # @ Binary Confusion Matrix

            b_c_mat = confusion_matrix(true_values,
                                       predicted_values,
                                       labels=[0, 1])
            # print("b_c_mat",b_c_mat)
            # [[30  2]
            #  [ 0 68]]

            # True Positive (Tumor pic is predicted as tumor)      False Negative (Tumor pic is predicted as non-tumor)
            # False Positive (Non-tumor pic is predicted as tumor) True Negative (Non-tumor pic is predicted as non-tumor)

            # ================================================================================
            # @ metric report

            report = classification_report(
                y_true,
                y_pred,
                target_names=['class Non tumor (neg)', 'class Tumor (pos)'])
            # print(report)
            #                        precision    recall  f1-score   support

            # class Non tumor (neg)       0.97      1.00      0.99        68
            #     class Tumor (pos)       1.00      0.94      0.97        32

            #             micro avg       0.98      0.98      0.98       100
            #             macro avg       0.99      0.97      0.98       100
            #          weighted avg       0.98      0.98      0.98       100

            # ================================================================================
            print("accuracy_score", accuracy_score(y_true, y_pred))
            # 0.98

            print("precision_score", precision_score(y_true, y_pred))
            # 1.0

            print("recall_score", recall_score(y_true, y_pred))
            # 0.9375

            # print("fbeta_score",fbeta_score(y_true, y_pred, beta))

            print("f1_score", fbeta_score(y_true, y_pred, beta=1))
            # 0.967741935483871

            # ================================================================================
            # @ ROC curve
            fpr, tpr, thresholds = roc_curve(y_true, y_pred)
            plt.plot(fpr, tpr, 'o-', label="Logistic Regression")
            plt.title('Receiver operating characteristic example')
            plt.show()

    elif args.task_mode == "submission":
        with torch.no_grad():  # @ Use network without calculating gradients

            sub_ds = dataset_tumor_test.Dataset_Tumor_Submission()
            print("sub_ds", sub_ds)

            sub_dl = torch.utils.data.DataLoader(dataset=sub_ds,
                                                 batch_size=batch_size,
                                                 shuffle=False,
                                                 num_workers=3)
            print("sub_dl", sub_dl)

            # ================================================================================
            # @ c num_imgs_test: number of entire test images

            num_imgs_test = len(sub_ds)

            # ================================================================================
            # @ Create network and optimizer

            if args.train_method == "train_by_transfer_learning_using_resnet":
                model_api_instance = model_api_module.Model_API_class(args)

            # ================================================================================
            label_submission = pd.read_csv(
                "/mnt/1T-5e7/mycodehtml/bio_health/Kaggle_histopathologic-cancer-detection/Data/sample_submission.csv",
                encoding='utf8')
            base_names = label_submission.iloc[:, 0].tolist()
            # print("base_names",base_names)

            # ================================================================================
            predicted_values = []
            # @ Iterate all batches (batch1+batch2+...+batchn=entire images)
            for idx, data in enumerate(sub_dl):
                # print("idx",idx)
                # print("data",data)
                # 0
                # ['/mnt/1T-5e7/mycodehtml/bio_health/Kaggle_histopathologic-cancer-detection/Data/test/0b2ea2a822ad23fdb1b5dd26653da899fbd2c0d5.tif',

                imgs = data

                # ================================================================================
                test_imgs_list = []
                for one_img_path in imgs:
                    one_loaded_img = utils_image.load_img(one_img_path)
                    # print("one_loaded_img",one_loaded_img.shape)
                    # (96, 96, 3)

                    one_loaded_img = resize(one_loaded_img, (224, 224))

                    test_imgs_list.append(one_loaded_img)

                # ================================================================================
                test_imgs_np = np.array(test_imgs_list).transpose(0, 3, 1, 2)

                # @ If you want to use center (48,48) image from (96,96) image
                # test_imgs_np=test_imgs_np[:,:,24:72,24:72]
                # print("test_imgs_np",test_imgs_np.shape)
                # (11, 3, 48, 48)

                test_imgs_tc = Variable(torch.Tensor(test_imgs_np).cuda())
                # print("test_imgs_tc",test_imgs_tc.shape)
                # torch.Size([30, 3, 224, 224])

                # ================================================================================
                # @ Make predictions
                prediction = model_api_instance.gen_net(test_imgs_tc)
                # print("prediction",prediction)
                # tensor([[-2.0675],
                # ...
                #         [-1.2222]], device='cuda:0')

                sigmoid = torch.nn.Sigmoid()

                prediction_np = sigmoid(prediction).cpu().numpy()

                # ================================================================================
                # @ Make predicted labels

                prediction_np = np.where(prediction_np > 0.5, 1, 0).squeeze()
                # print("prediction_np",prediction_np)
                # [0 0 1 0 0 0 0 0 1 0 0 1 0 0 1 0 0 0 0 0 0 0 0 1 1 0 1 1 0 0]
                # print("lbls",lbls)
                # [1 0 0 0 0 1 0 0 1 0 0 1 1 0 1 0 1 0 0 0 0 0 0 0 1 0 1 1 1 0]

                predicted_values.extend(prediction_np)

            my_submission = pd.DataFrame({
                'id': base_names,
                'label': predicted_values
            })
            my_submission.to_csv('youngminpar2559_submission.csv', index=False)
def train(args):
  epoch=int(args.epoch)
  batch_size=int(args.batch_size)

  # ================================================================================
  if args.use_visdom=="True":
    global plotter
    plotter=visdom_module.VisdomLinePlotter(env_name='IID network plot')
    visdom_i=1
    # vis=visdom.Visdom(port=8097,server="http://localhost/")
    vis=visdom.Visdom(env="IID network plot")

  # ================================================================================
  # @ Create network and optimizer
  
  model_api_inst=model_api_module.Model_API_class(args)

  # ================================================================================
  # @ c hyper_param_inst: instance of hyper parameter class
  
  hyper_param_inst=set_hyper_parameters.Set_h_p(args)

  # ================================================================================
  # @ For graph illustration of loss values

  lists_for_visualizing_losses=visualize_loss_values.Visualize_Loss_Values()

  # ================================================================================
  # @ c path_of_text_files
  path_of_text_files=text_file_api_module.Path_Of_Text_Files(args)

  # c txt_paths_in_dict: path of text files in dictionary
  txt_paths_in_dict=path_of_text_files.__dict__
  # print("txt_paths_in_dict",txt_paths_in_dict)

  # ================================================================================
  # # @ Check exception for batch size
  # (This is commented out because Pytorch's DataSet and DataLoader can manage this potential issue)

  # cgmit=len(dataset_dense_inst)

  # assert cgmit==len(dataset_iiw_inst),"The number of 'cgmit images' and 'iiw images' is different"

  # print("Current number of 'cgmit images' and 'iiw images':",cgmit)
  # print("Current cgmit/iiw batch size:",batch_size)
  # print("Possible cgmit/iiw batch size:",list(utils_common.divisorGenerator(cgmit)))
  
  # assert str(cgmit/batch_size).split(".")[-1]==str(0),"Check batch size"
  
  # ================================================================================
  # @ Grad CAM (tested visualization on gradient on trained network)

  # cam=utils_visualize_gradients.CAM(gen_decoder_R)
  # gen_decoder_S.right_conv_6.register_forward_hook(utils_hook_functions.printnorm)
  # gen_decoder_S.right_conv_6.register_backward_hook(utils_hook_functions.printgradnorm)

  # ================================================================================
  # @ Measure training time

  if args.measure_train_time=="True":
    start=timeit.default_timer()
  else:
    pass

  # ================================================================================
  if args.train_mode=="True":
    # @ Iterates all epochs
    for one_ep in range(epoch):

      # ================================================================================
      # c dataset_dense_inst: dataset instance of dense dataset
      dataset_dense_inst=dataset_returning_one_pair_of_path.Dataset_Returning_One_Pair_Of_Path(
        txt_paths_in_dict["cgintrinsic_trn"],txt_paths_in_dict["cgintrinsic_rgt"],args)
      
      dataset_dense_iter=iter(dataset_dense_inst)

      # ================================================================================
      dataset_tinghuiz_inst=dataset_returning_one_pair_of_path.Dataset_Returning_One_Pair_Of_Path(
        txt_paths_in_dict["tinghuiz_trn"],txt_paths_in_dict["tinghuiz_rgt"],args)

      dataset_tinghuiz_iter=iter(dataset_tinghuiz_inst)
      
      # ================================================================================
      # In my test, CGINTRINSIC + Tinghuiz + IIW was not that good
      # I thinks it's because CGINTRINSIC + Tinghuiz and IIW are too diferent in its format of dataset
      # This joint training was tested by being inspired by CGINTRINSICS project
      # Original code: https://github.com/lixx2938/CGIntrinsics

      # c dataset_iiw_inst: instance of dataset iiw
      # dataset_iiw_inst=dataset_returning_one_pair_of_path.Dataset_Returning_One_Pair_Of_Path(
      #   txt_paths_in_dict["iiw_trn"],txt_paths_in_dict["iiw_json"],args)

      # dataset_iiw_iter=iter(dataset_iiw_inst)

      # ================================================================================
      # Tested joint training along with SAW dataset, but my test with SAW didn't create good result
      # Original code: https://github.com/lixx2938/CGIntrinsics

      # dataset_saw_inst=dataset_returning_one_pair_of_path.Dataset_Returning_One_Pair_Of_Path(
      #   txt_paths_in_dict["saw_trn"],txt_paths_in_dict["saw_npy"],args)
      
      # dataset_saw_iter=iter(dataset_saw_inst)

      # ================================================================================
      # BigTime data is designed to train IID network in unsupervised manners
      # Original code: https://github.com/lixx2938/unsupervised-learning-intrinsic-images

      # dataset_bigtime_inst=dataset_returning_one_pair_of_path.Dataset_Returning_One_Pair_Of_Path_non_shuffle(
      #   txt_paths_in_dict["bigtime_trn"],txt_paths_in_dict["bigtime_mask"],args)

      # dataset_bigtime_iter=iter(dataset_bigtime_inst)

      # ================================================================================
      # num_entire_imgs=len(dataset_dense_inst)+len(dataset_tinghuiz_inst)+len(dataset_iiw_inst)
      num_entire_imgs=len(dataset_dense_inst)+len(dataset_tinghuiz_inst)
      # print("num_entire_imgs",num_entire_imgs)
      # 16

      args.__setattr__("num_entire_imgs",num_entire_imgs)
      
      # ================================================================================
      num_iteration_for_iter=int(len(dataset_dense_inst)/int(args.batch_size))

      print("args.leaping_batchsize_for_saving_model)",args.leaping_batchsize_for_saving_model)
      print("num_iteration_for_iter",num_iteration_for_iter)

      if int(args.leaping_batchsize_for_saving_model)==num_iteration_for_iter or int(args.leaping_batchsize_for_saving_model)>num_iteration_for_iter:
        raise Exception("args.leaping_batchsize_for_saving_model must be smaller than num_iteration_for_iter")
      else:
        pass

      # ================================================================================
      # If you don't use Augmentor
      if args.use_augmentor=="False":

        # c dense_d_bs_pa: dense dataset batch size paths
        dense_d_bs_pa=dense_d_list[bs:bs+batch_size]
        cgmit_d_bs_pa=cgmit_d_list[bs:bs+batch_size]

        # ================================================================================
        dense_trn_imgs,dense_rgt_imgs=utils_common.generate_dense_dataset(
          dense_d_bs_pa,img_h=512,img_w=512)

        trn_imgs_dense,rgt_imgs_dense,sgt_imgs_dense,mask_imgs_dense=\
          utils_common.generate_cgmit_dataset(cgmit_d_bs_pa,img_h=512,img_w=512)

      else: # If you use Augmentor
        # @ Iterate all entire images during single epoch
        for one_iter in range(num_iteration_for_iter):
          # @ Remove gradients
          model_api_inst.remove_existing_gradients_before_starting_new_training()

          # ================================================================================
          # @ Train over dense dataset

          loss_r_data_dense,loss_s_data_dense,loss_o_data_dense,loss_o_grad_dense,loss_s_grad_dense,loss_r_grad_dense,pred_r_color_dense,pred_s_dense=\
            train_over_dense_dataset.train(
              dataset_dense_iter,batch_size,model_api_inst,hyper_param_inst,lists_for_visualizing_losses,args)

          # ================================================================================
          # @ Train over tinghuiz dataset

          loss_r_data_tinghuiz,loss_s_data_tinghuiz,loss_o_data_tinghuiz,loss_o_grad_tinghuiz,loss_s_grad_tinghuiz,loss_r_grad_tinghuiz,tinghuiz_r,tinghuiz_s=\
            train_over_tinghuiz_dataset.train(
              dataset_tinghuiz_iter,batch_size,model_api_inst,hyper_param_inst,lists_for_visualizing_losses,args)

          # ================================================================================
          # @ Train over IIW dataset

          # iiw_I_loss
          # loss_iiw=train_over_iiw_dataset.train(
          #   dataset_iiw_iter,batch_size,model_api_inst,hyper_param_inst,lists_for_visualizing_losses,args)
          # print("loss_iiw",loss_iiw)
          # tensor([0.0001], device='cuda:0', grad_fn=<MulBackward0>)
          # 3.633527994155884

          # Took large time
          # 0:02:10.749899
          
          # After removing "for loop in data augmentation", small time
          # 0:00:00.720973

          # After one-image-by-one-image traing, more small time
          # 0:00:00.405261

          # ================================================================================
          # @ Train over SAW dataset
          
          # saw_I_loss
          # saw_loss,saw_I_loss=train_over_saw_dataset.train(
          #   dataset_saw_iter,batch_size,model_api_inst,hyper_param_inst,lists_for_visualizing_losses,args)
          # print("saw_loss",saw_loss)
          # 1.520530343055725

          # ================================================================================
          # Update based on bigtime dataset (BT)

          # bigtime_loss_I
          # bigtime_loss_r,bigtime_loss_I=train_over_bigtime_dataset.train(
          #   dataset_bigtime_iter,batch_size,model_api_inst,hyper_param_inst,lists_for_visualizing_losses,args)
          # print("bigtime_loss_I",bigtime_loss_I)
          # print("bigtime_loss_r",bigtime_loss_r)

          # ================================================================================
          entire_loss=loss_r_data_dense+loss_s_data_dense+loss_o_data_dense+loss_o_grad_dense+loss_s_grad_dense+loss_r_grad_dense+\
                      loss_r_data_tinghuiz+loss_s_data_tinghuiz+loss_o_data_tinghuiz+loss_o_grad_tinghuiz+loss_s_grad_tinghuiz+loss_r_grad_tinghuiz

          entire_loss.backward()
          model_api_inst.optimizer.step()
          model_api_inst.empty_cache_of_gpu_after_training_batch()

          # ================================================================================
          # region @ Save loss into tensorboard 
          # (I tested tensorboard but this had been commented out because I can plot losses and predicted image by using visdom)

          # --------------------------------------------------------------------------------
          # conda activate py36gputorch100 &&
          # cd /mnt/1T-5e7/papers/cv/IID/Deep_Adversial_Residual_Network_for_IID/prj_root/src/tensorboard &&
          # tensorboard --logdir='./logs' --port=6006

          # --------------------------------------------------------------------------------
          # tb_logger=logger.Logger('./src/tensorboard/logs')

          # --------------------------------------------------------------------------------
          # print(lists_for_visualizing_losses.lo_list)
          # [('loss_r_data_dense',[75.04694366455078]),
          #  ('loss_r_grad_dense',[1.0475735664367676]),
          #  ('loss_s_data_dense',[48.632972717285156]),
          #  ('loss_s_grad_dense',[0.5619246363639832]),
          #  ('loss_o_data_dense',[46.09053039550781]),
          #  ('loss_o_grad_dense',[1.4934182167053223]),
          #  ('iiw_loss_temp',[2.321777105331421]),
          #  ('saw_loss_temp',[0.8101050853729248]),
          #  ('bigtime_loss_temp',[0.346628874540329])]

          # --------------------------------------------------------------------------------
          # aaa,bbb,ccc,ddd,eee,fff,ggg,hhh,iii=lists_for_visualizing_losses.lo_list
          # print("a",a)
          # a ('loss_r_data_dense', [82.376953125])

          # --------------------------------------------------------------------------------
          # info={aaa[0]:aaa[1][0],bbb[0]:bbb[1][0],ccc[0]:ccc[1][0],ddd[0]:ddd[1][0],eee[0]:eee[1][0],
          #       fff[0]:fff[1][0],ggg[0]:ggg[1][0],hhh[0]:hhh[1][0],iii[0]:iii[1][0]}

          # --------------------------------------------------------------------------------
          # for tag,value in info.items():
          #   tb_logger.scalar_summary(tag,value,int(one_iter)+1)
          # endregion

          # ================================================================================
          # region @ Use visdom
          if args.use_visdom=="True":
            # pip install visdom
            # conda install -c conda-forge visdom
            # python -m visdom.server
            # http://localhost:8097
            # <your_remote_server_ip>:<visdom port (default 8097)>
            plotter.plot("loss_r_data_dense","train","loss_r_data_dense",visdom_i,loss_r_data_dense.item())
            plotter.plot("loss_s_data_dense","train","loss_s_data_dense",visdom_i,loss_s_data_dense.item())
            plotter.plot("loss_o_data_dense","train","loss_o_data_dense",visdom_i,loss_o_data_dense.item())
            plotter.plot("loss_o_grad_dense","train","loss_o_grad_dense",visdom_i,loss_o_grad_dense.item())
            plotter.plot("loss_s_grad_dense","train","loss_s_grad_dense",visdom_i,loss_s_grad_dense.item())
            plotter.plot("loss_r_grad_dense","train","loss_r_grad_dense",visdom_i,loss_r_grad_dense.item())
            plotter.plot("loss_r_data_tinghuiz","train","loss_r_data_tinghuiz",visdom_i,loss_r_data_tinghuiz.item())
            plotter.plot("loss_s_data_tinghuiz","train","loss_s_data_tinghuiz",visdom_i,loss_s_data_tinghuiz.item())
            plotter.plot("loss_o_data_tinghuiz","train","loss_o_data_tinghuiz",visdom_i,loss_o_data_tinghuiz.item())
            plotter.plot("loss_o_grad_tinghuiz","train","loss_o_grad_tinghuiz",visdom_i,loss_o_grad_tinghuiz.item())
            plotter.plot("loss_s_grad_tinghuiz","train","loss_s_grad_tinghuiz",visdom_i,loss_s_grad_tinghuiz.item())
            plotter.plot("loss_r_grad_tinghuiz","train","loss_r_grad_tinghuiz",visdom_i,loss_r_grad_tinghuiz.item())
            # plotter.plot("loss_iiw","train","loss_iiw",visdom_i,loss_iiw.item())
            
            # images
            vis.images(Variable(torch.tensor(pred_r_color_dense[:2,:,:,:])),opts=dict(title='Reflectance', caption='Reflectance.'),win='wazzup')
            vis.images(Variable(torch.tensor(pred_s_dense[:2,:,:,:])),opts=dict(title='Shading', caption='Shading.'),win='wazzup2')

            visdom_i=visdom_i+1
          # endregion 

          # ================================================================================
          del loss_r_data_dense,loss_s_data_dense,loss_o_data_dense,loss_o_grad_dense,loss_s_grad_dense,loss_r_grad_dense,\
              loss_r_data_tinghuiz,loss_s_data_tinghuiz,loss_o_data_tinghuiz,loss_o_grad_tinghuiz,loss_s_grad_tinghuiz,loss_r_grad_tinghuiz

          # ================================================================================
          # @ Save model after batch

          if int(one_iter)==0:
            pass
          elif one_iter%int(args.leaping_batchsize_for_saving_model)==0:
            model_api_inst.save_model(is_batch=True,iter_num=one_iter)
          else:
            pass
          
      # ================================================================================
      # @ Save model after epoch

      model_api_inst.save_model(is_batch=False,iter_num=one_ep)

      # ================================================================================
      # @ Time for single epoch

      if args.measure_train_time=="True":
        stop=timeit.default_timer()
        took_time_sec=int(stop-start)
        took_time_min=str(datetime.timedelta(seconds=took_time_sec))
        print("Took minutes until finishing epoch of "+str(one_ep)+\
              " with "+str(args.scheduler)+" scheduler:",took_time_min)
      else:
        pass
    
    # ================================================================================
    # @ Visualize loss at the end of training

    if args.visualize_loss_values=="True":
      loss_list=lists_for_visualizing_losses.lo_list
      for i,tup in enumerate(loss_list):
        plt.subplot(int(np.ceil(len(loss_list)/3)),3,i+1)
        plt.title(tup[0])
        plt.plot(tup[1])
      plt.savefig("loss.png")
      plt.show()
    else:
      pass
  
  # ================================================================================
  else: # @ Test trained model
    with torch.no_grad():

      # @ Set test directories
      dense_trn_img_p="./Data/Images_for_inference/*.png"
      # dense_trn_img_p="/mnt/1T-5e7/mycodehtml/data_col/cv/IID_f_w_w/iiw-dataset/test/*.png"
      # dense_trn_img_p="/mnt/1T-5e7/mycodehtml/data_col/cv/IID_f_w_w/iiw-dataset/office/*.png"
      # dense_trn_img_p="/home/young/Downloads/images/training/temp/*.jpg"
      # dense_trn_img_p="/home/young/Downloads/Sticheo_example_video/0_No_Movement/sample/FPS15/*.png"
      # dense_trn_img_p="/home/young/Downloads/Sticheo_example_video/1_Small_Movement/sample/original_frame_FPS15/*.png"
      # dense_trn_img_p="/home/young/Downloads/Sticheo_example_video/2_Medium_Movement/sample/original_frame_FPS15/*.png"

      # @ For video frames
      # r_images="/home/young/Downloads/Sticheo_example_video/0_No_Movement/sample/FPS15/Decomposed/R/*.png"
      # s_images="/home/young/Downloads/Sticheo_example_video/0_No_Movement/sample/FPS15/Decomposed/S/*.png"
      # r_images="/home/young/Downloads/Sticheo_example_video/1_Small_Movement/sample/original_frame_FPS15/R/*.png"
      # s_images="/home/young/Downloads/Sticheo_example_video/1_Small_Movement/sample/original_frame_FPS15/S/*.png"
      # r_images="/home/young/Downloads/Sticheo_example_video/2_Medium_Movement/sample/R/*.png"
      # s_images="/home/young/Downloads/Sticheo_example_video/2_Medium_Movement/sample/S/*.png"

      # @ Recovered directories
      # recovered_imgs="/home/young/Downloads/Sticheo_example_video/0_No_Movement/sample/FPS15/Decomposed/Recovered/*.png"
      # recovered_imgs="/home/young/Downloads/Sticheo_example_video/1_Small_Movement/sample/original_frame_FPS15/Recovered/*.png"
      # recovered_imgs="/home/young/Downloads/Sticheo_example_video/2_Medium_Movement/sample/Recovered/*.png"

      # ================================================================================
      # @ Following codes can enhance resolution and can adjust brightness and gamma level

      # r_img_li=utils_common.get_file_list(r_images)
      # s_img_li=utils_common.get_file_list(s_images)
      
      # for one_pair in list(zip(r_img_li,s_img_li)):
      #   # print("one_pair[0]",one_pair[0])
      #   # /home/young/Downloads/Sticheo_example_video/0_No_Movement/sample/FPS15/Decomposed/R/dinner_0001_predref.png

      #   recovered_fn=one_pair[0].replace("_predref","").replace("/R/","/Recovered/")

      #   one_r_img=utils_image.load_img(one_pair[0])/255.0
      #   one_s_img=utils_image.load_img(one_pair[1])/255.0
      #   one_s_img=one_s_img[:,:,np.newaxis]
      #   recovered_img=one_r_img*one_s_img

      #   # recovered_img=cv2.normalize(recovered_img,None,alpha=0,beta=1,norm_type=cv2.NORM_MINMAX,dtype=cv2.CV_32F)
      #   # print("np.min(recovered_img)",np.min(recovered_img))
      #   # print("np.max(recovered_img)",np.max(recovered_img))

      #   # kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
      #   # kernel = np.array([[-0.1,-0.1,-0.1], [-0.1,0.9,-0.1], [-0.1,-0.1,-0.1]])
      #   # recovered_img = cv2.filter2D(recovered_img, -1, kernel)

      #   scipy.misc.imsave(recovered_fn,recovered_img)

      # # ================================================================================
      # @ Following codes can enhance resolution and can adjust brightness and gamma level

      # recovered_img_li=utils_common.get_file_list(recovered_imgs)

      # for one_img in recovered_img_li:
      #   fn=one_img.replace("/Recovered/","/Recovered_Sharpened_Edited_brightness_and_constrast/")
      #   img = cv2.imread(one_img, 1)[:,:,::-1]
      #   # kernel = np.array([[-1,-1,-1], [-1,5,-1], [-1,-1,-1]])
      #   kernel = np.array([[-1,-1,-1,-1,-1],
      #                      [-1,2,2,2,-1],
      #                      [-1,2,8,2,-1],
      #                      [-2,2,2,2,-1],
      #                      [-1,-1,-1,-1,-1]])/8.0
      #   im = cv2.filter2D(img, -1, kernel)

      #   brightness = 50
      #   contrast = 30
      #   img = np.int16(im)
      #   img = img * (contrast/127+1) - contrast + brightness
      #   img = np.clip(img, 0, 255)
      #   im = np.uint8(img)

      #   # img = cv2.imread('your path',0)
      #   # brt = 40
      #   # img[img < 255-brt] += brt    //change any value in the 2D list < max limit
      #   # cv2.imshow('img'+ img)

      #   scipy.misc.imsave(fn,im)

      # ================================================================================
      dense_tr_img_li=utils_common.get_file_list(dense_trn_img_p)
      iteration=int(len(dense_tr_img_li))
      num_imgs=len(dense_tr_img_li)

      # Iterates all train images
      for itr in range(iteration):
        fn=dense_tr_img_li[itr].split("/")[-1].split(".")[0]

        ol_dense_tr_i=utils_image.load_img(dense_tr_img_li[itr])/255.0
        ori_h=ol_dense_tr_i.shape[0]
        ori_w=ol_dense_tr_i.shape[1]

        # Test various input resolutions
        ol_dense_tr_i=utils_image.resize_img(ol_dense_tr_i,img_h=512,img_w=512)
        # ol_dense_tr_i=utils_image.resize_img(ol_dense_tr_i,img_h=1280,img_w=1280)
        # ol_dense_tr_i=utils_image.resize_img(ol_dense_tr_i,img_h=1536,img_w=1536)
        # ol_dense_tr_i=utils_image.resize_img(ol_dense_tr_i,img_h=1792,img_w=1792)
        # ol_dense_tr_i=utils_image.resize_img(ol_dense_tr_i,img_h=1920,img_w=1920)
        # ol_dense_tr_i=utils_image.resize_img(ol_dense_tr_i,img_h=2048,img_w=2048)

        # ================================================================================
        ol_dense_tr_i=ol_dense_tr_i.transpose(2,0,1)
        ol_dense_tr_i=ol_dense_tr_i[np.newaxis,:,:,:]
        con_O_img_arr=Variable(torch.Tensor(ol_dense_tr_i).cuda())

        # ================================================================================
        if args.use_integrated_decoders=="True":
          
          dense_pred_S_imgs_direct=model_api_inst.gen_net(con_O_img_arr)

          for one_pred_img in range(dense_pred_S_imgs_direct.shape[0]):
            # --------------------------------------------------------------------------------
            pred_one_S_img=dense_pred_S_imgs_direct[one_pred_img,:,:,:].unsqueeze(0)
            pred_one_S_img=torch.nn.functional.interpolate(
              pred_one_S_img,size=(ori_h,ori_w),scale_factor=None,mode='bilinear',align_corners=True)
            
            one_tr_img=con_O_img_arr[one_pred_img,:,:,:].unsqueeze(0)
            one_tr_img=torch.nn.functional.interpolate(
              one_tr_img,size=(ori_h,ori_w),scale_factor=None,mode='bilinear',align_corners=True)

            # --------------------------------------------------------------------------------
            one_tr_img=one_tr_img.detach().cpu().numpy().squeeze().transpose(1,2,0)
            pred_one_S_img=pred_one_S_img.detach().cpu().numpy().squeeze()
            # pred_one_S_img=np.clip(pred_one_S_img,0.0,1.3)
            # pred_one_S_img=(pred_one_S_img-np.min(pred_one_S_img))/np.ptp(pred_one_S_img)
            pred_one_S_img[pred_one_S_img==0.0]=0.001
            pred_one_S_img=pred_one_S_img[:,:,np.newaxis]

            pred_one_R_img=one_tr_img/pred_one_S_img
            print("pred_one_R_img",np.min(pred_one_R_img))
            print("pred_one_R_img",np.max(pred_one_R_img))

            pred_one_R_img=np.clip(pred_one_R_img,0.0,1.2)
            pred_one_R_img=(pred_one_R_img-np.min(pred_one_R_img))/np.ptp(pred_one_R_img)

            # Tested various combinations, for example,
            # option1: no normalize + colorize
            # option2: normalize + colorize + clip
            # option2: no normalize + colorize + clip
            # ...

            # pred_one_S_img=np.clip(pred_one_S_img,0.0,1.3)
            # pred_one_S_img=(pred_one_S_img-np.min(pred_one_S_img))/np.ptp(pred_one_S_img)

            # img=np.clip(pred_one_R_img,0.0,1.2)
            # img=(img-np.mean(img))/np.std(img)
            # pred_one_R_img=(img-np.min(img))/np.ptp(img)
            # pred_one_R_img=utils_image.bilateral_f(one_tr_img,pred_one_R_img)

            # pred_one_R_img=utils_image.colorize_tc(pred_one_R_img,one_tr_img)
            # pred_one_R_img=pred_one_R_img.detach().cpu().numpy().squeeze().transpose(1,2,0)
            # pred_one_S_img=pred_one_S_img.detach().cpu().numpy().squeeze()

            # print("pred_one_R_img",np.min(pred_one_R_img))
            # print("pred_one_R_img",np.max(pred_one_R_img))
            # print("pred_one_S_img",np.min(pred_one_S_img))
            # print("pred_one_S_img",np.max(pred_one_S_img))

            scipy.misc.imsave('./result/'+fn+'_predsha.png',pred_one_S_img.squeeze())
            scipy.misc.imsave('./result/'+fn+'_predref.png',pred_one_R_img)

            if args.measure_train_time=="True":
              stop=timeit.default_timer()
              time_for_inference=stop-start
            else:
              pass
        else:
          o_conv_8_avgpool,o_conv_7_avgpool,o_conv_6_avgpool,o_conv_5_avgpool,\
          o_conv_4_avgpool,o_conv_3_avgpool,o_conv_2_avgpool,o_conv_1_avgpool=gen_encoder(con_O_img_arr)

          dense_pred_R_imgs_direct=gen_decoder_R(
            o_conv_8_avgpool,o_conv_7_avgpool,o_conv_6_avgpool,o_conv_5_avgpool,\
            o_conv_4_avgpool,o_conv_3_avgpool,o_conv_2_avgpool,o_conv_1_avgpool)
          dense_pred_S_imgs_direct=gen_decoder_S(
            o_conv_8_avgpool,o_conv_7_avgpool,o_conv_6_avgpool,o_conv_5_avgpool,\
            o_conv_4_avgpool,o_conv_3_avgpool,o_conv_2_avgpool,o_conv_1_avgpool)
          
          # out=cam.get_cam(0)
          
          for one_pred_img in range(dense_pred_S_imgs_direct.shape[0]):
            # --------------------------------------------------------------------------------
            # Resizebytorch.nn.functional.interpolate()
            pred_one_R_img=dense_pred_R_imgs_direct[one_pred_img,:,:,:].unsqueeze(0)
            pred_one_R_img=torch.nn.functional.interpolate(
              pred_one_R_img,size=(ori_h,ori_w),scale_factor=None,mode='bilinear',align_corners=True)
            
            pred_one_S_img=dense_pred_S_imgs_direct[one_pred_img,:,:,:].unsqueeze(0)
            pred_one_S_img=torch.nn.functional.interpolate(
              pred_one_S_img,size=(ori_h,ori_w),scale_factor=None,mode='bilinear',align_corners=True)
            
            one_tr_img=con_O_img_arr[one_pred_img,:,:,:].unsqueeze(0)
            one_tr_img=torch.nn.functional.interpolate(
              one_tr_img,size=(ori_h,ori_w),scale_factor=None,mode='bilinear',align_corners=True)

            # --------------------------------------------------------------------------------
            pred_one_R_img=pred_one_R_img.detach().cpu().numpy().squeeze()
            one_tr_img=one_tr_img.detach().cpu().numpy().squeeze().transpose(1,2,0)

            # pred_one_R_img,sha=utils_image.colorize(pred_one_R_img,one_tr_img)
            # img=np.clip(pred_one_R_img,0.0,1.2)
            # img=(img-np.mean(img))/np.std(img)
            # pred_one_R_img=(img-np.min(img))/np.ptp(img)
            # pred_one_R_img=utils_image.bilateral_f(one_tr_img,pred_one_R_img)

            pred_one_S_img=pred_one_S_img.detach().cpu().numpy().squeeze()

            # pred_one_R_img=utils_image.colorize_tc(pred_one_R_img,one_tr_img)
            # pred_one_R_img=pred_one_R_img.detach().cpu().numpy().squeeze().transpose(1,2,0)
            # pred_one_S_img=pred_one_S_img.detach().cpu().numpy().squeeze()

            # print("pred_one_R_img",np.min(pred_one_R_img))
            # print("pred_one_R_img",np.max(pred_one_R_img))
            # print("pred_one_S_img",np.min(pred_one_S_img))
            # print("pred_one_S_img",np.max(pred_one_S_img))

            scipy.misc.imsave('./result/'+fn+'_predsha.png',pred_one_S_img)
            scipy.misc.imsave('./result/'+fn+'_predref.png',pred_one_R_img)

      print("input_size",args.input_size)      
      print("args.batchsize",args.batch_size)
      print("num_imgs",num_imgs)
      print("time_for_inference",time_for_inference)
コード例 #14
0
def visualize(args):
    img_crop_path = "/mnt/1T-5e7/mycodehtml/bio_health/SPIE-AAPM-NCI_BreastPathQ_Cancer_Cellularity_Challenge_2019/Data/breastpathq/datasets/cells/1_Region 1_crop.tif"
    loaded_img_crop = utils_image.load_img(img_crop_path)
    # print("loaded_img",loaded_img.shape)
    # (451, 521, 3)

    xml_key_path = "/mnt/1T-5e7/mycodehtml/bio_health/SPIE-AAPM-NCI_BreastPathQ_Cancer_Cellularity_Challenge_2019/Data/breastpathq/datasets/cells/1_Region 1_key.xml"
    whole_xml_key_data = utils_data.load_xml_file(xml_key_path)

    img_mask_path = "/mnt/1T-5e7/mycodehtml/bio_health/SPIE-AAPM-NCI_BreastPathQ_Cancer_Cellularity_Challenge_2019/Data/breastpathq/datasets/cells/1_Region 1_mask.tif"
    loaded_img_mask = utils_image.load_img(img_mask_path)

    xml_crop_path = "/mnt/1T-5e7/mycodehtml/bio_health/SPIE-AAPM-NCI_BreastPathQ_Cancer_Cellularity_Challenge_2019/Data/breastpathq/datasets/cells/Sedeen/1_Region 1_crop.session.xml"
    whole_xml_crop_data = utils_data.load_xml_file(xml_crop_path)

    # ================================================================================
    # print("whole_xml_key_data",whole_xml_key_data)

    data_name = whole_xml_key_data[0][0]
    data_region = whole_xml_key_data[0][1]
    data_color = whole_xml_key_data[0][2]
    data_points = whole_xml_key_data[0][3]
    # print("data_points",dir(data_points))

    data_points_li = str(data_points).replace("\n", "").replace(
        "<point-list>", "").replace("<point>",
                                    "").replace("</point-list>",
                                                "").split("</point>")[:-1]
    # print("data_points_li",data_points_li)
    # ['32,18', '51,13',

    x_vals = []
    y_vals = []
    for one_coord in data_points_li:
        x_val = one_coord.split(",")[0]
        y_val = one_coord.split(",")[1]
        x_vals.append(x_val)
        y_vals.append(y_val)

    # print("x_vals",x_vals)
    # ['32', '51',
    # print("y_vals",y_vals)
    # ['18', '13',

    x_vals = list(map(int, x_vals))
    y_vals = list(map(int, y_vals))
    # print("x_vals",x_vals)
    # [32, 51,
    # print("y_vals",y_vals)
    # [18, 13,

    # ================================================================================
    data_name_1 = whole_xml_key_data[1][0]
    data_region_1 = whole_xml_key_data[1][1]
    data_color_1 = whole_xml_key_data[1][2]
    data_points_1 = whole_xml_key_data[1][3]
    # print("data_points_1",data_points_1)

    data_points_1_li = str(data_points_1).replace("\n", "").replace(
        "<point-list>", "").replace("<point>",
                                    "").replace("</point-list>",
                                                "").split("</point>")[:-1]
    # print("data_points_1_li",data_points_1_li)
    # ['32,18', '51,13',

    x_vals_1 = []
    y_vals_1 = []
    for one_coord in data_points_1_li:
        x_val_1 = one_coord.split(",")[0]
        y_val_1 = one_coord.split(",")[1]
        x_vals_1.append(x_val_1)
        y_vals_1.append(y_val_1)

    # print("x_vals_1",x_vals_1)
    # ['32', '51',
    # print("y_vals_1",y_vals_1)
    # ['18', '13',

    x_vals_1 = list(map(int, x_vals_1))
    y_vals_1 = list(map(int, y_vals_1))
    # print("x_vals_1",x_vals_1)
    # [32, 51,
    # print("y_vals_1",y_vals_1)
    # [18, 13,
    # fafaf

    # ================================================================================
    data_name_2 = whole_xml_key_data[2][0]
    data_region_2 = whole_xml_key_data[2][1]
    data_color_2 = whole_xml_key_data[2][2]
    data_points_2 = whole_xml_key_data[2][3]
    # print("data_points_2",dir(data_points_2))

    data_points_2_li = str(data_points_2).replace("\n", "").replace(
        "<point-list>", "").replace("<point>",
                                    "").replace("</point-list>",
                                                "").split("</point>")[:-1]
    # print("data_points_2_li",data_points_2_li)
    # ['32,18', '51,13',

    x_vals_2 = []
    y_vals_2 = []
    for one_coord in data_points_2_li:
        x_val_2 = one_coord.split(",")[0]
        y_val_2 = one_coord.split(",")[1]
        x_vals_2.append(x_val_2)
        y_vals_2.append(y_val_2)

    # print("x_vals_2",x_vals_2)
    # ['32', '51',
    # print("y_vals_2",y_vals_2)
    # ['18', '13',

    x_vals_2 = list(map(int, x_vals_2))
    y_vals_2 = list(map(int, y_vals_2))
    # print("x_vals_2",x_vals_2)
    # [32, 51,
    # print("y_vals_2",y_vals_2)
    # [18, 13,

    # ================================================================================
    utils_image.scatter_points_onto_img(
        loaded_img_crop,
        x_vals,
        y_vals,
        color="b",
        title="Image: 1_Region 1_crop.tif, XML: 1_Region 1_key.xml Region 2")
    # /home/young/Pictures/2019_04_14_01:14:03.png
    utils_image.scatter_points_onto_img(
        loaded_img_crop,
        x_vals_1,
        y_vals_1,
        color="g",
        title="Image: 1_Region 1_crop.tif, XML: 1_Region 1_key.xml Region 3")
    # /home/young/Pictures/2019_04_14_01:14:35.png
    utils_image.scatter_points_onto_img(
        loaded_img_crop,
        x_vals_2,
        y_vals_2,
        color="r",
        title="Image: 1_Region 1_crop.tif, XML: 1_Region 1_key.xml Region 4")
    # /home/young/Pictures/2019_04_14_01:14:58.png

    # ================================================================================
    # print(np.unique(loaded_img_mask))
    # [  0  85 127 255]
    # print("loaded_img_mask",loaded_img_mask[:,:,:3])
    unique_colors = list(
        set(tuple(v) for m2d in loaded_img_mask[:, :, :3] for v in m2d))

    # print("unique_colors",unique_colors)
    # [(0, 255, 127), (0, 0, 0), (255, 255, 255), (0, 85, 0)]

    def use_mask_tif():
        for one_color in unique_colors:
            # print("one_color",one_color)
            # (0, 255, 127)
            # new_img=np.where(loaded_img_mask[:,:,:3]==np.array(one_color),1.0,0.0)

            loaded_img_mask_proc = loaded_img_mask[:, :, :3]

            # (0, 255, 127) locations are True
            new_img = np.all(loaded_img_mask_proc == np.array(one_color),
                             axis=2)
            # print("new_img",new_img.shape)
            # print("new_img",new_img)

            new_img = skimage.morphology.binary_dilation(new_img, square(3))
            new_img = new_img[:, :, np.newaxis].astype("uint8")
            # None (0, 255, 127) locations are 1
            new_img = np.where(new_img == 0, 1, 0)
            # print("new_img",new_img)

            masked_img = loaded_img_crop * new_img
            stacked = np.vstack(masked_img)
            # # print("stacked",stacked.shape)
            # # (234971, 3)
            # # afaf

            # # stacked=loaded_img_crop_mask.reshape((-1,3))
            # # print("stacked",stacked.shape)
            # # (234971, 3)

            idx = np.all(stacked == [0, 0, 0], 1)
            # # print("idx",idxs.shape)
            # # idx (234971,)
            # # print("idx",idx)
            # # afaf

            # a1=np.vstack(loaded_img_crop)

            stacked[idx] = [0, 255, 0]
            # # print("a1",a1.shape)
            # # a1 (234971, 3)

            loaded_img_crop_new = stacked.reshape(loaded_img_crop.shape[0],
                                                  loaded_img_crop.shape[1], 3)
            # print("loaded_img_crop",loaded_img_crop.shape)

            # loaded_img_crop_masked=loaded_img_crop[loaded_img_crop_mask]
            # print("loaded_img_crop_masked",loaded_img_crop_masked)
            # # loaded_img_crop[loaded_img_crop_mask] (18, 3)
            # # loaded_img_crop[loaded_img_crop_mask] (387, 3)
            # # loaded_img_crop[loaded_img_crop_mask] (234971, 3)
            # # loaded_img_crop[loaded_img_crop_mask] (1629, 3)
            # loaded_img_crop[loaded_img_crop_mask]=[0,250,0]

            # # print("loaded_img_crop",loaded_img_crop)

            # plt.imshow(loaded_img_crop*nesw_img)
            plt.imshow(loaded_img_crop_new)
            plt.title("File: 1_Region 1_mask.tif, " + str(one_color))
            # /home/young/Pictures/2019_04_14_10:01:03.png
            # /home/young/Pictures/2019_04_14_10:01:18.png
            # /home/young/Pictures/2019_04_14_10:01:34.png

            plt.show()

    use_mask_tif()

    # ================================================================================
    # print("whole_xml_crop_data",whole_xml_crop_data)

    data_name = whole_xml_crop_data[0][0]
    data_region = whole_xml_crop_data[0][1]
    data_color = whole_xml_crop_data[0][2]
    data_points = whole_xml_crop_data[0][3]
    # print("data_points",dir(data_points))

    data_points_li = str(data_points).replace("\n", "").replace(
        "<point-list>", "").replace("<point>",
                                    "").replace("</point-list>",
                                                "").split("</point>")[:-1]
    # print("data_points_li",data_points_li)
    # ['32,18', '51,13',

    x_vals = []
    y_vals = []
    for one_coord in data_points_li:
        x_val = one_coord.split(",")[0]
        y_val = one_coord.split(",")[1]
        x_vals.append(x_val)
        y_vals.append(y_val)

    # print("x_vals",x_vals)
    # ['32', '51',
    # print("y_vals",y_vals)
    # ['18', '13',

    x_vals = list(map(int, x_vals))
    y_vals = list(map(int, y_vals))
    # print("x_vals",x_vals)
    # [32, 51,
    # print("y_vals",y_vals)
    # [18, 13,

    # ================================================================================
    data_name_1 = whole_xml_crop_data[1][0]
    data_region_1 = whole_xml_crop_data[1][1]
    data_color_1 = whole_xml_crop_data[1][2]
    data_points_1 = whole_xml_crop_data[1][3]
    # print("data_points_1",data_points_1)
    # afaf

    data_points_1_li = str(data_points_1).replace("\n", "").replace(
        "<point-list>", "").replace("<point>",
                                    "").replace("</point-list>",
                                                "").split("</point>")[:-1]
    # print("data_points_1_li",data_points_1_li)
    # ['32,18', '51,13',

    x_vals_1 = []
    y_vals_1 = []
    for one_coord in data_points_1_li:
        x_val_1 = one_coord.split(",")[0]
        y_val_1 = one_coord.split(",")[1]
        x_vals_1.append(x_val_1)
        y_vals_1.append(y_val_1)

    # print("x_vals_1",x_vals_1)
    # ['32', '51',
    # print("y_vals_1",y_vals_1)
    # ['18', '13',

    x_vals_1 = list(map(int, x_vals_1))
    y_vals_1 = list(map(int, y_vals_1))
    # print("x_vals_1",x_vals_1)
    # [32, 51,
    # print("y_vals_1",y_vals_1)
    # [18, 13,
    # fafaf

    # ================================================================================
    data_name_2 = whole_xml_crop_data[2][0]
    data_region_2 = whole_xml_crop_data[2][1]
    data_color_2 = whole_xml_crop_data[2][2]
    data_points_2 = whole_xml_crop_data[2][3]
    # print("data_points_2",dir(data_points_2))

    data_points_2_li = str(data_points_2).replace("\n", "").replace(
        "<point-list>", "").replace("<point>",
                                    "").replace("</point-list>",
                                                "").split("</point>")[:-1]
    # print("data_points_2_li",data_points_2_li)
    # ['32,18', '51,13',

    x_vals_2 = []
    y_vals_2 = []
    for one_coord in data_points_2_li:
        x_val_2 = one_coord.split(",")[0]
        y_val_2 = one_coord.split(",")[1]
        x_vals_2.append(x_val_2)
        y_vals_2.append(y_val_2)

    # print("x_vals_2",x_vals_2)
    # ['32', '51',
    # print("y_vals_2",y_vals_2)
    # ['18', '13',

    x_vals_2 = list(map(int, x_vals_2))
    y_vals_2 = list(map(int, y_vals_2))
    # print("x_vals_2",x_vals_2)
    # [32, 51,
    # print("y_vals_2",y_vals_2)
    # [18, 13,

    # ================================================================================
    utils_image.scatter_points_onto_img(
        loaded_img_crop,
        x_vals,
        y_vals,
        color="b",
        title=
        "Image: 1_Region 1_crop.tif, XML: 1_Region 1_crop.session.xml Region 2"
    )
    # /home/young/Pictures/2019_04_14_10:02:20.png
    utils_image.scatter_points_onto_img(
        loaded_img_crop,
        x_vals_1,
        y_vals_1,
        color="g",
        title=
        "Image: 1_Region 1_crop.tif, XML: 1_Region 1_crop.session.xml Region 3"
    )
    # /home/young/Pictures/2019_04_14_10:02:44.png
    utils_image.scatter_points_onto_img(
        loaded_img_crop,
        x_vals_2,
        y_vals_2,
        color="r",
        title=
        "Image: 1_Region 1_crop.tif, XML: 1_Region 1_crop.session.xml Region 4"
    )