def detect_faces(image):
        detected_results = CF.face.detect(
            image, attributes="age,gender,smile,emotion")
        print("Detected Faces", detected_results)
        face_ids = []
        for result in detected_results:
            face_ids.append(result['faceId'])
            #json_filename = 'captured_json/' + current_time_to_string() + ".json"
            #save_dict_to_file(json_filename,detected_results)
            save_dict_to_file('attributes.json', detected_results)
            save_dict_to_file('json.txt', detected_results)

            #load_dict_from_file(json_filename)
        return face_ids
    def identify_faces(face_ids,
                       large_person_group,
                       person_group_id=None,
                       max_candidates_return=1,
                       threshold=None):
        identify_results = CF.face.identify(
            face_ids,
            large_person_group_id=large_person_group,
            person_group_id=person_group_id,
            max_candidates_return=max_candidates_return,
            threshold=threshold)
        person_ids = []

        for identify_result in identify_results:
            for candidate in identify_result['candidates']:
                person_id = candidate['personId']
                person_ids.append(person_id)
                #json_filename1 = 'captured_json1/' + current_time_to_string() + ".json"
                #save_dict_to_file(json_filename1,identify_result['candidates'])
                save_dict_to_file('faceId.json', person_ids)

        return person_ids
def main():
    """
    Process data source files.

    Generates a discrete distribution of probabilities for each state.

    """
    # Load csv files into dataframes.
    first_name = pandas.read_csv(config.staging_dir + config.fn_file,
                                 sep=',',
                                 encoding=config.encoding)
    first_name = first_name.dropna(axis='rows')

    last_name = pandas.read_csv(config.staging_dir + config.ln_file,
                                sep=',',
                                encoding=config.encoding)
    last_name = last_name.dropna(axis='rows')

    # Generate probability distributions for tokens and particles
    # A) first names and particles of first name
    first_name_tokens = {}
    part_first_name_tokens = {}
    first_name_total = 0
    part_first_name_total = 0
    for index, row in first_name.iterrows():
        text = utils.normalize(row['nombre'], config.text_case)
        freq = row['frec']
        words = re.findall(config.word_pattern, text)

        for word in words:
            if word in config.particles:
                if word not in part_first_name_tokens:
                    part_first_name_tokens[word] = freq
                else:
                    part_first_name_tokens[word] += freq
                part_first_name_total += freq
            else:
                token = utils.to_token(word, config.token_length)
                if token not in first_name_tokens:
                    first_name_tokens[token] = freq
                else:
                    first_name_tokens[token] += freq
                first_name_total += freq

    # Calculate probabilities
    for key, value in part_first_name_tokens.items():
        part_first_name_tokens[key] = \
            float(value) / float(part_first_name_total)
    for key, value in first_name_tokens.items():
        first_name_tokens[key] = \
            float(value) / float(first_name_total)

    # Save probability distributions to file
    utils.save_dict_to_file(
        part_first_name_tokens,
        config.input_dir + config.token_files['part_first_name'])
    utils.save_dict_to_file(
        first_name_tokens, config.input_dir + config.token_files['first_name'])

    # B) last names and particles of last name
    last_name1_tokens = {}
    part_last_name1_tokens = {}
    last_name2_tokens = {}
    part_last_name2_tokens = {}
    last_name1_total = 0
    part_last_name1_total = 0
    last_name2_total = 0
    part_last_name2_total = 0
    for index, row in last_name.iterrows():
        text = utils.normalize(row['apellido'], config.text_case)
        freq1 = row['frec_pri']
        freq2 = row['frec_seg']
        freqr = row['freq_rep']
        words = re.findall(config.word_pattern, text)

        for word in words:
            if word in config.particles:
                if word not in part_last_name1_tokens:
                    part_last_name1_tokens[word] = freq1 + freqr
                else:
                    part_last_name1_tokens[word] += freq1 + freqr
                part_last_name1_total += freq1 + freqr
                if word not in part_last_name2_tokens:
                    part_last_name2_tokens[word] = freq2 + freqr
                else:
                    part_last_name2_tokens[word] += freq2 + freqr
                part_last_name2_total += freq2 + freqr
            else:
                token = utils.to_token(word, config.token_length)
                if token not in last_name1_tokens:
                    last_name1_tokens[token] = freq1 + freqr
                else:
                    last_name1_tokens[token] += freq1 + freqr
                last_name1_total += freq1 + freqr
                if token not in last_name2_tokens:
                    last_name2_tokens[token] = freq2 + freqr
                else:
                    last_name2_tokens[token] += freq2 + freqr
                last_name2_total += freq2 + freqr

    # Calculate probabilities
    # Last name 1
    for key, value in part_last_name1_tokens.items():
        part_last_name1_tokens[key] = \
            float(value) / float(part_last_name1_total)
    for key, value in last_name1_tokens.items():
        last_name1_tokens[key] = \
            float(value) / float(last_name1_total)
    # Save probability distributions to file
    utils.save_dict_to_file(
        part_last_name1_tokens,
        config.input_dir + config.token_files['part_last_name1'])
    utils.save_dict_to_file(
        last_name1_tokens, config.input_dir + config.token_files['last_name1'])
    # Last name 2
    for key, value in part_last_name2_tokens.items():
        part_last_name2_tokens[key] = \
            float(value) / float(part_last_name2_total)
    for key, value in last_name2_tokens.items():
        last_name2_tokens[key] = \
            float(value) / float(last_name2_total)
    # Save probability distributions to file
    utils.save_dict_to_file(
        part_last_name2_tokens,
        config.input_dir + config.token_files['part_last_name2'])
    utils.save_dict_to_file(
        last_name2_tokens, config.input_dir + config.token_files['last_name2'])
Beispiel #4
0
def run(args: argparse.Namespace) -> None:
    # save args to dict
    d = vars(args)
    d['time'] = str(datetime.datetime.now())
    save_dict_to_file(d,args.workdir)

    temperature: float = 0.1
    n_class: int = args.n_class
    metric_axis: List = args.metric_axis
    lr: float = args.l_rate
    dtype = eval(args.dtype)

    # Proper params
    savedir: str = args.workdir
    n_epoch: int = args.n_epoch

    net, optimizer, device, loss_fns, loss_weights, loss_fns_source, loss_weights_source, scheduler = setup(args, n_class, dtype)
    print(f'> Loss weights cons: {loss_weights}, Loss weights source:{loss_weights_source}')
    shuffle = False
    #if args.mix:
    #    shuffle = True
    #print("args.dataset",args.dataset)
    loader, loader_val = get_loaders(args, args.dataset,args.source_folders,
                                           args.batch_size, n_class,
                                           args.debug, args.in_memory, dtype, False,fix_size=[0,0])

    target_loader, target_loader_val = get_loaders(args, args.target_dataset,args.target_folders,
                                           args.batch_size, n_class,
                                           args.debug, args.in_memory, dtype, shuffle,fix_size=[0,0])

    num_steps = n_epoch * len(loader)
    #print(num_steps)
    print("metric axis",metric_axis)
    best_dice_pos: Tensor = np.zeros(1)
    best_dice: Tensor = np.zeros(1)
    best_2d_dice: Tensor = np.zeros(1)
    best_3d_dice: Tensor = np.zeros(1)
    best_3d_dice_source: Tensor = np.zeros(1)

    print("Results saved in ", savedir)
    print(">>> Starting the training")
    for i in range(n_epoch):

        tra_losses_vec, tra_target_vec,tra_source_vec                                    = do_epoch(args, "train", net, device,
                                                                                           loader, i, loss_fns,
                                                                                           loss_weights,
                                                                                           loss_fns_source,
                                                                                           loss_weights_source,
                                                                                           args.resize,
                                                                                           num_steps, n_class, metric_axis,
                                                                                           savedir="",
                                                                                           optimizer=optimizer,
                                                                                           target_loader=target_loader)

        with torch.no_grad():
            val_losses_vec, val_target_vec,val_source_vec                                        = do_epoch(args, "val", net, device,
                                                                                               loader_val, i, loss_fns,
                                                                                               loss_weights,
                                                                                               loss_fns_source,
                                                                                               loss_weights_source,
                                                                                               args.resize,
                                                                                               num_steps, n_class,metric_axis,
                                                                                               savedir=savedir,
                                                                                               target_loader=target_loader_val)

        #if i == 0:
         #   keep_tra_baseline_target_vec = tra_baseline_target_vec
          #  keep_val_baseline_target_vec = val_baseline_target_vec
        # print(keep_val_baseline_target_vec)

        # print(val_target_vec)
        # df_t_tmp = pd.DataFrame({
        #     "val_dice_3d": [val_target_vec[0]],
        #     "val_dice_3d_sd": [val_target_vec[1]]})

        df_s_tmp = pd.DataFrame({
            "tra_dice_3d": [tra_source_vec[0]],
            "tra_dice_3d_sd": [tra_source_vec[1]],
            "val_dice_3d": [val_source_vec[0]],
            "val_dice_3d_sd": [val_source_vec[1]]})

        if i == 0:
            df_s = df_s_tmp
        else:
            df_s = df_s.append(df_s_tmp)

        df_s.to_csv(Path(savedir, "_".join((args.source_folders.split("'")[1],"source", args.csv))), float_format="%.4f", index_label="epoch")


        df_t_tmp = pd.DataFrame({
            "tra_loss_inf":[tra_losses_vec[0]],
            "tra_loss_cons":[tra_losses_vec[1]],
            "tra_loss_fs":[tra_losses_vec[2]],
            "val_loss_inf":[val_losses_vec[0]],
            "val_loss_cons":[val_losses_vec[1]],
            "val_loss_fs":[val_losses_vec[2]],
            "tra_dice_3d": [tra_target_vec[0]],
            "tra_dice_3d_sd": [tra_target_vec[1]],
            "tra_dice": [tra_target_vec[2]],
            "val_dice_3d": [val_target_vec[0]],
            "val_dice_3d_sd": [val_target_vec[1]],
            'val_dice': [val_target_vec[2]]})

        if i == 0:
            df_t = df_t_tmp
        else:
            df_t = df_t.append(df_t_tmp)

        df_t.to_csv(Path(savedir, "_".join((args.target_folders.split("'")[1],"target", args.csv))), float_format="%.4f", index_label="epoch")

        # Save model if better
        current_val_target_2d_dice = val_target_vec[2]
        '''
        if current_val_target_2d_dice > best_2d_dice:
            best_epoch = i
            best_2d_dice = current_val_target_2d_dice
            with open(Path(savedir, "best_epoch_2.txt"), 'w') as f:
                f.write(str(i))
            best_folder_2d = Path(savedir, "best_epoch_2d")
            if best_folder_2d.exists():
                rmtree(best_folder_2d)
            copytree(Path(savedir, f"iter{i:03d}"), Path(best_folder_2d))
            torch.save(net, Path(savedir, "best_2d.pkl"))
        '''
        current_val_target_3d_dice = val_target_vec[0]

        if current_val_target_3d_dice > best_3d_dice:
            best_epoch = i
            best_3d_dice = current_val_target_3d_dice
            with open(Path(savedir, "best_epoch_3d.txt"), 'w') as f:
                f.write(str(i))
            best_folder_3d = Path(savedir, "best_epoch_3d")
            if best_folder_3d.exists():
                rmtree(best_folder_3d)
            copytree(Path(savedir, f"iter{i:03d}"), Path(best_folder_3d))
            torch.save(net, Path(savedir, "best_3d.pkl"))

        #Save source model if better
        current_val_source_3d_dice = val_source_vec[0]

        if current_val_source_3d_dice > best_3d_dice_source:
            best_epoch = i
            best_3d_dice_s = current_val_source_3d_dice
            with open(Path(savedir, "best_epoch_3d_source.txt"), 'w') as f:
                f.write(str(i))
            torch.save(net, Path(savedir, "best_3d_source.pkl"))

        if i == n_epoch - 1:
            with open(Path(savedir, "last_epoch.txt"), 'w') as f:
                f.write(str(i))
            last_folder = Path(savedir, "last_epoch")
            if last_folder.exists():
                rmtree(last_folder)
            copytree(Path(savedir, f"iter{i:03d}"), Path(last_folder))
            torch.save(net, Path(savedir, "last.pkl"))

        # remove images from iteration
        rmtree(Path(savedir, f"iter{i:03d}"))

        if args.flr==False:
            #adjust_learning_rate(optimizer, i, args.l_rate, n_epoch, 0.9)
            exp_lr_scheduler(optimizer, i, args.lr_decay)
    print("Results saved in ", savedir)
def execute_experiment(dataset_name, encoders_list, validation_type,
                       file_name_apex, experiment_description):
    dataset_pth = f"./data/{dataset_name}/{dataset_name}.gz"
    results = {}

    # training params
    N_SPLITS = 5
    model_validation = StratifiedKFold(n_splits=N_SPLITS,
                                       shuffle=True,
                                       random_state=42)
    encoder_validation = StratifiedKFold(n_splits=N_SPLITS,
                                         shuffle=True,
                                         random_state=2019)

    # load processed dataset
    data = pd.read_csv(dataset_pth)

    # make train-test split
    cat_cols = [col for col in data.columns if col.startswith("cat")]
    X_train, X_test, y_train, y_test = train_test_split(data.drop("target",
                                                                  axis=1),
                                                        data["target"],
                                                        test_size=0.4,
                                                        shuffle=False)
    X_train, X_test = X_train.reset_index(drop=False), X_test.reset_index(
        drop=False)
    y_train, y_test = np.array(y_train), np.array(y_test)

    results[dataset_name] = {}
    results[dataset_name]["info"] = {
        "experiment_description": experiment_description,
        "train_shape": X_train.shape,
        "test_shape": X_test.shape,
        "mean_target_train": np.mean(y_train),
        "mean_target_test": np.mean(y_test),
        "num_cat_cols": len(cat_cols),
        "cat_cols_info": cat_cols_info(X_train, X_test, cat_cols),
    }

    for encoders_tuple in encoders_list:
        print(f"\n\nCurrent itteration : {encoders_tuple}, {dataset_name}\n\n")

        time_start = time.time()

        # train models
        lgb_model = Model(cat_validation=validation_type,
                          encoders_names=encoders_tuple,
                          cat_cols=cat_cols)
        train_score, val_score, avg_num_trees = lgb_model.fit(X_train, y_train)
        y_hat, test_features = lgb_model.predict(X_test)
        #file_pth = f"./preds/{file_name_apex}_{dataset_name}_{str(encoders_tuple)}.csv"
        #pd.DataFrame({"predictions": y_hat}).to_csv(file_pth, index=False)

        # check score
        test_score = roc_auc_score(y_test, y_hat)
        time_end = time.time()

        # write and save results
        results[dataset_name][str(encoders_tuple)] = {
            "train_score": train_score,
            "val_score": val_score,
            "test_score": test_score,
            "time": time_end - time_start,
            "features_before_encoding": X_train.shape[1],
            "features_after_encoding": test_features,
            "avg_tress_number": avg_num_trees
        }

    for k, v in results[dataset_name].items():
        print(k, v, "\n\n")

    save_dict_to_file(dic=results[dataset_name],
                      path=f"./results/{file_name_apex}{dataset_name}.txt",
                      save_raw=False)
    save_dict_to_file(dic=results[dataset_name],
                      path=f"./results/{file_name_apex}{dataset_name}_r.txt",
                      save_raw=True)
Beispiel #6
0
def main(args: argparse.Namespace) -> None:
    print("\n>>> Setting up")
    d = vars(args)
    d['time'] = str(datetime.datetime.now())
    #save_dict_to_file(d,args.workdir)
    cpu: bool = args.cpu or not torch.cuda.is_available()
    device = torch.device("cpu") if cpu else torch.device("cuda")

    cudnn.benchmark = True

    if args.weights:
        print(f">> Loading weights from {args.weights}")
        net = torch.load(args.weights)
    elif args.pretrained:
        print(">> Starting from pre-trained network")
        net = models.resnet101(pretrained=True)
        print("> Recreating its last FC layer")
        in_, out_ = net.fc.in_features, net.fc.out_features
        print(f"> Going from shape {(in_, out_)} to {(8192, args.n_class)}")
        net.fc = nn.Linear(8192, args.n_class)  # Change only the last layer
    else:
        #print(">> Using a brand new netwerk")
        #net = resnext101(baseWidth=args.base_width, cardinality=args.cardinality, n_class=args.n_class)
        net_class = getattr(__import__('networks'), 'Enet')
        net = net_class(1, args.n_class)
        net.apply(weights_init)
    net.to(device)

    lr: float = args.lr
    criterion = torch.nn.MSELoss(reduction="sum")
    if not args.adam:
        optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=args.momentum, weight_decay=args.weight_decay)
    else:
        optimizer = torch.optim.Adam(net.parameters(), lr=lr, betas=(0.9, 0.999))

    # Dataloaderz and shitz
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    png_transform = transforms.Compose([
        lambda img: img.convert('L'),
        ImageOps.equalize if args.equalize else id_,
        lambda img: np.array(img)[np.newaxis, ...],
        lambda nd: nd / 255,  # max <= 1
        lambda nd: torch.tensor(nd, dtype=torch.float32),
        normalize if args.pretrained else id_
    ])
    gt_transform = transforms.Compose([
        lambda img: img.convert('L'),
        lambda img: np.array(img)[np.newaxis, ...],
        lambda nd: torch.tensor(nd, dtype=torch.int64),
        partial(class2one_hot, C=args.n_class),
        itemgetter(0)
    ])

    gen_dataset = partial(SliceDataset,
                          transforms=[png_transform, gt_transform],
                          are_hots=[False, True],
                          debug=args.debug,
                          C=args.n_class,
                          in_memory=args.in_memory,
                          bounds_generators=[])

    data_loader = partial(DataLoader,
                          num_workers=4,
                          pin_memory=True)

    if not args.GT:
        gt="GT"
    else:
        gt= args.GT
    if not args.val_GT:
        val_gt="GT"
    else:
        val_gt= args.val_GT
    train_filenames: List[str] = map_(lambda p: str(p.name), Path(args.data_root, args.train_subfolder, args.modality).glob("*"))
    train_folders: List[Path] = [Path(args.data_root, args.train_subfolder, f) for f in [args.modality, gt]]

    val_filenames: List[str] = map_(lambda p: str(p.name), Path(args.data_root, args.val_subfolder, args.val_modality).glob("*"))
    val_folders: List[Path] = [Path(args.data_root, args.val_subfolder, f) for f in [args.val_modality, val_gt]]


    train_set: Dataset = gen_dataset(train_filenames, train_folders, augment=args.augment)
    train_set =  Concat([train_set, train_set])
    val_set: Dataset = gen_dataset(val_filenames, val_folders)
    val_set = Concat([val_set, val_set, val_set, val_set, val_set])

    train_loader: DataLoader = data_loader(train_set, batch_size=args.batch_size, shuffle=True, drop_last=False)
    val_loader: DataLoader = data_loader(val_set, batch_size=args.batch_size, shuffle=False, drop_last=False)

    print()

    best_perf: float = -1
    best_epc: int = -1

    metrics: Dict[str, Tensor] = {"tra_loss": torch.zeros((args.epc, len(train_loader)),
                                                          dtype=torch.float32, device=device),
                                  "tra_diff": torch.zeros((args.epc, len(train_set), args.n_class),
                                                          dtype=torch.float32, device=device),
                                  "tra_gt_size": torch.zeros((args.epc, len(train_set), args.n_class),
                                                          dtype=torch.float32, device=device),
                                  "tra_pred_size": torch.zeros((args.epc, len(train_set), args.n_class),
                                                          dtype=torch.float32, device=device),
                                  "val_loss": torch.zeros((args.epc, len(val_loader)),
                                                          dtype=torch.float32, device=device),
                                  "val_diff": torch.zeros((args.epc, len(val_set), args.n_class),
                                                          dtype=torch.float32, device=device),
                                  "val_gt_size": torch.zeros((args.epc, len(val_set), args.n_class),
                                                          dtype=torch.float32, device=device),
                                  "val_pred_size": torch.zeros((args.epc, len(val_set), args.n_class),
                                                          dtype=torch.float32, device=device)}
    for i in range(args.epc):
        sizes: Tensor
        predicted_sizes: Tensor
        loss: Tensor

        if not args.no_training:
            net, train_metrics,train_ids = do_epc(i, "train", net, train_loader, device, criterion, args, optimizer)
            for k in train_metrics:
                metrics["tra_" + k][i] = train_metrics[k][...]

        with torch.no_grad():
            net, val_metrics,val_ids = do_epc(i, "val", net, val_loader, device, criterion, args)
            for k in val_metrics:
                metrics["val_" + k][i] = val_metrics[k][...]

        diff = metrics["val_diff"][i,..., args.idc]
        gt_size = metrics["val_gt_size"][i,..., args.idc]

        savepath = Path(args.save_dest)
        Path(savepath).parent.mkdir(parents=True, exist_ok=True)
        savepath.mkdir(parents=True, exist_ok=True)
        if i==0:
            save_dict_to_file(d,savepath)
        #epc_perf = float(metrics["val_diff"][i, metrics["val_gt_size"][i,...,args.idc]!=0, args.idc].mean())
        #epc_perf: float = float(metrics["val_diff"][i, ..., args.idc].mean())
        epc_perf = float(diff[gt_size!=0].mean())
        if epc_perf < best_perf or i == 0:
            best_perf = epc_perf
            best_epc = i
            d = pd.DataFrame(0, index=np.arange(len(val_set)), columns=["val_ids","val_diff",
                                                                      "val_gt_size","val_pred_size"])
            d['val_ids'] = val_ids
            d['val_diff'] = metrics["val_diff"].cpu().tolist()[i]
            d['val_gt_size'] = metrics["val_gt_size"].cpu().tolist()[i]
            d['val_pred_size'] = metrics["val_pred_size"].cpu().tolist()[i]
            d.to_csv(Path(args.save_dest, args.val_subfolder+str(i)+'reg_metrics.csv'), float_format="%.4f")

            print(f"> Best results at epoch {best_epc}: diff: {best_perf:12.2f}")
            print(f"> Saving network weights to {args.save_dest}")
            Path(args.save_dest,'pred_size.pkl').parent.mkdir(parents=True, exist_ok=True)
            torch.save(net, str(Path(args.save_dest,'pred_size.pkl')))

        if not i% 10:
            torch.save(net, str(Path(args.save_dest,'pred_size'+str(i)+'.pkl')))

        if i in [args.epc // 2, 3 * args.epc // 4]:
            for param_group in optimizer.param_groups:
                lr *= 0.5
                param_group['lr'] = lr
                print(f'> New learning Rate: {lr}')
Beispiel #7
0
def run(args: argparse.Namespace) -> None:
    # save args to dict
    d = vars(args)
    d['time'] = str(datetime.datetime.now())
    d['server']=platform.node()
    save_dict_to_file(d,args.workdir)

    temperature: float = 0.1
    n_class: int = args.n_class
    metric_axis: List = args.metric_axis
    lr: float = args.l_rate
    dtype = eval(args.dtype)

    # Proper params
    savedir: str = args.workdir
    n_epoch: int = args.n_epoch

    net, optimizer, device, loss_fns, loss_weights, scheduler, n_epoch = setup(args, n_class, dtype)
    shuffle = True
    print(args.target_folders)
    target_loader, target_loader_val = get_loaders(args, args.target_dataset,args.target_folders,
                                           args.batch_size, n_class,
                                           args.debug, args.in_memory, dtype, shuffle, "target", args.val_target_folders)

    print("metric axis",metric_axis)
    best_dice_pos: Tensor = np.zeros(1)
    best_dice: Tensor = np.zeros(1)
    best_hd3d_dice: Tensor = np.zeros(1)
    best_3d_dice: Tensor = 0 
    best_2d_dice: Tensor = 0 
    print("Results saved in ", savedir)
    print(">>> Starting the training")
    for i in range(n_epoch):

       if args.mode =="makeim":
            with torch.no_grad():
                 
                val_losses_vec, val_target_vec,val_source_vec                                        = do_epoch(args, "val", net, device,
                                                                                                i, loss_fns,
                                                                                               loss_weights,
                                                                                               args.resize,
                                                                                                n_class,metric_axis,
                                                                                               savedir=savedir,
                                                                                               target_loader=target_loader_val, best_dice3d_val=best_3d_dice)
                tra_losses_vec = val_losses_vec
                tra_target_vec = val_target_vec
                tra_source_vec = val_source_vec
       else:
            tra_losses_vec, tra_target_vec,tra_source_vec                                    = do_epoch(args, "train", net, device,
                                                                                           i, loss_fns,
                                                                                           loss_weights,
                                                                                           args.resize,
                                                                                           n_class, metric_axis,
                                                                                           savedir=savedir,
                                                                                           optimizer=optimizer,
                                                                                           target_loader=target_loader, best_dice3d_val=best_3d_dice)
       
            with torch.no_grad():
                val_losses_vec, val_target_vec,val_source_vec                                        = do_epoch(args, "val", net, device,
                                                                                               i, loss_fns,
                                                                                               loss_weights,
                                                                                               args.resize,
                                                                                               n_class,metric_axis,
                                                                                               savedir=savedir,
                                                                                               target_loader=target_loader_val, best_dice3d_val=best_3d_dice)

       current_val_target_3d_dice = val_target_vec[0]
       if args.dice_3d:
           if current_val_target_3d_dice > best_3d_dice:
               best_3d_dice = current_val_target_3d_dice
               with open(Path(savedir, "3dbestepoch.txt"), 'w') as f:
                   f.write(str(i)+','+str(best_3d_dice))
               best_folder_3d = Path(savedir, "best_epoch_3d")
               if best_folder_3d.exists():
                    rmtree(best_folder_3d)
               if args.saveim:
                    copytree(Path(savedir, f"iter{i:03d}"), Path(best_folder_3d))
           torch.save(net, Path(savedir, "best_3d.pkl"))

       
       if not(i % 10) :
            print("epoch",str(i),savedir,'best 3d dice',best_3d_dice)
            torch.save(net, Path(savedir, "epoch_"+str(i)+".pkl"))
       
       if i == n_epoch - 1:
            with open(Path(savedir, "last_epoch.txt"), 'w') as f:
                f.write(str(i))
            last_folder = Path(savedir, "last_epoch")
            if last_folder.exists():
                rmtree(last_folder)
            if args.saveim:
                copytree(Path(savedir, f"iter{i:03d}"), Path(last_folder))
            torch.save(net, Path(savedir, "last.pkl"))

        # remove images from iteration
       if args.saveim:
           rmtree(Path(savedir, f"iter{i:03d}"))

       if args.source_metrics:
            df_s_tmp = pd.DataFrame({
            "val_dice_3d": [val_source_vec[0]],
            "val_dice_3d_sd": [val_source_vec[1]],
            "val_dice_2d": [val_source_vec[2]]})
            if i == 0:
               df_s = df_s_tmp
            else:
                df_s = df_s.append(df_s_tmp)
            df_s.to_csv(Path(savedir, "_".join((args.source_folders.split("'")[1],"source", args.csv))), float_format="%.4f", index_label="epoch")
       df_t_tmp = pd.DataFrame({
            "epoch":i,
            "tra_loss_s":[tra_losses_vec[0]],
            "tra_loss_cons":[tra_losses_vec[1]],
            "tra_loss_tot":[tra_losses_vec[2]],
            "tra_size_mean":[tra_losses_vec[3]],
            "tra_size_mean_pos":[tra_losses_vec[4]],
            "val_loss_s":[val_losses_vec[0]],
            "val_loss_cons":[val_losses_vec[1]],
            "val_loss_tot":[val_losses_vec[2]],
            "val_size_mean":[val_losses_vec[3]],
            "val_size_mean_pos":[val_losses_vec[4]],
            "val_gt_size_mean":[val_losses_vec[5]],
            "val_gt_size_mean_pos":[val_losses_vec[6]],
            'tra_dice': [tra_target_vec[4]],
           'val_asd': [val_target_vec[2]],
           'val_asd_sd': [val_target_vec[3]],
            'val_hd': [val_target_vec[4]],
            'val_hd_sd': [val_target_vec[5]],
            'val_dice': [val_target_vec[6]],
            "val_dice_3d_sd": [val_target_vec[1]],
            "val_dice_3d": [val_target_vec[0]]})

       if i == 0:
            df_t = df_t_tmp
       else:
            df_t = df_t.append(df_t_tmp)

       df_t.to_csv(Path(savedir, "_".join((args.target_folders.split("'")[1],"target", args.csv))), float_format="%.4f", index=False)

       if args.flr==False:
            exp_lr_scheduler(optimizer, i, args.lr_decay,args.lr_decay_epoch)
    print("Results saved in ", savedir, "best 3d dice",best_3d_dice)
Beispiel #8
0
def run(args: argparse.Namespace) -> None:

    # save args to dict
    d = vars(args)
    d['time'] = str(datetime.datetime.now())
    save_dict_to_file(d,args.workdir)

    n_class: int = args.n_class
    lr: float = args.l_rate
    dtype = eval(args.dtype)

    # Proper params
    savedir: str = args.workdir
    n_epoch: int = args.n_epoch

    net, optimizer, device, loss_fns, loss_weights, loss_fns_source, loss_weights_source, scheduler = setup(args, n_class, dtype)
    print(f'> Loss weights cons: {loss_weights}, Loss weights source:{loss_weights_source}, Loss weights adv: {args.lambda_adv_target}')
    shuffle = False
    if args.mix:
        shuffle = True
    loader, loader_val = get_loaders(args, args.dataset,args.folders,
                                           args.batch_size, n_class,
                                           args.debug, args.in_memory, dtype, False)

    target_loader, target_loader_val = get_loaders(args, args.target_dataset,args.target_folders,
                                           args.batch_size, n_class,
                                           args.debug, args.in_memory, dtype, shuffle)

    n_tra: int = len(loader.dataset)  # Number of images in dataset
    l_tra: int = len(loader)  # Number of iteration per epoch: different if batch_size > 1
    n_val: int = len(loader_val.dataset)
    l_val: int = len(loader_val)

    num_steps = n_epoch * len(loader)

    best_dice_pos: Tensor = np.zeros(1)
    best_dice: Tensor = np.zeros(1)
    best_3d_dice: Tensor = np.zeros(1)

    print(">>> Starting the training")
    for i in range(n_epoch):
        # Do training and validation loops

        tra_losses_vec, tra_source_vec, tra_target_vec, tra_baseline_target_vec = do_epoch(args, "train", net, device, loader, i, loss_fns, loss_weights, loss_fns_source, loss_weights_source, args.resize,
                                                                            num_steps, n_class, savedir=savedir, optimizer=optimizer, target_loader=target_loader,  lambda_adv_target = args.lambda_adv_target)

        with torch.no_grad():
            val_losses_vec, val_source_vec, val_target_vec, val_baseline_target_vec = do_epoch(args, "val", net, device, loader_val, i, loss_fns, loss_weights,loss_fns_source, loss_weights_source, args.resize,
                                                                            num_steps, n_class, savedir=savedir, target_loader=target_loader_val, lambda_adv_target=args.lambda_adv_target )

        if i == 0:
            keep_tra_baseline_target_vec = tra_baseline_target_vec
            keep_val_baseline_target_vec = val_baseline_target_vec

        df_s_tmp = pd.DataFrame({"tra_dice": tra_source_vec[0],
                                 "tra_dice_pos": tra_source_vec[1],
                                 "tra_dice_neg": tra_source_vec[2],
                                 "tra_dice_3d": tra_source_vec[3],
                                 "tra_dice_3d_sd": tra_source_vec[4],
                                 "tra_haussdorf": tra_source_vec[5],
                                 "tra_loss_seg": tra_losses_vec[0],
                                 "tra_loss_adv": tra_losses_vec[1],
                                 "tra_loss_inf": tra_losses_vec[2],
                                 "tra_loss_cons": tra_losses_vec[3],
                                 "tra_loss_D": tra_losses_vec[4],
                                 "val_dice": val_source_vec[0],
                                 "val_dice_pos": val_source_vec[1],
                                 "val_dice_neg": val_source_vec[2],
                                 "val_dice_3d": val_source_vec[3],
                                 "val_dice_3d_sd": val_source_vec[4],
                                 "val_haussdorf": val_source_vec[5],
                                 "val_loss_seg": val_losses_vec[0]}, index=[i])

        df_t_tmp = pd.DataFrame({
                                "tra_dice": tra_target_vec[0],
                                "tra_dice_pos": tra_target_vec[1],
                                "tra_dice_neg": tra_target_vec[2],
                                "tra_dice_3d": tra_target_vec[3],
                                "tra_dice_3d_sd": tra_target_vec[4],
                                "tra_haussdorf": tra_target_vec[5],
                                "tra_dice_3d_baseline": keep_tra_baseline_target_vec[0],
                                "tra_dice_3d_sd_baseline": keep_tra_baseline_target_vec[1],
                                "val_dice": val_target_vec[0],
                                "val_dice_pos": val_target_vec[1],
                                "val_dice_neg": val_target_vec[2],
                                "val_dice_3d": val_target_vec[3],
                                "val_dice_3d_sd": val_target_vec[4],
                                "val_haussdorf": val_target_vec[5],
                                "val_dice_3d_baseline": keep_val_baseline_target_vec[0],
                                "val_dice_3d_sd_baseline": keep_val_baseline_target_vec[1]}, index=[i])

        if i == 0:
            df_s = df_s_tmp
            df_t = df_t_tmp
        else:
            df_s = df_s.append(df_s_tmp)
            df_t = df_t.append(df_t_tmp)

        df_s.to_csv(Path(savedir, args.csv), float_format="%.4f", index_label="epoch")
        df_t.to_csv(Path(savedir, "_".join(("target", args.csv))), float_format="%.4f", index_label="epoch")

        # Save model if better
        current_val_target_3d_dice = val_target_vec[3]

        if current_val_target_3d_dice > best_3d_dice:
            best_epoch = i
            best_3d_dice = current_val_target_3d_dice
            with open(Path(savedir, "best_epoch_3d.txt"), 'w') as f:
                f.write(str(i))
            best_folder_3d = Path(savedir, "best_epoch_3d")
            if best_folder_3d.exists():
                rmtree(best_folder_3d)
            copytree(Path(savedir, f"iter{i:03d}"), Path(best_folder_3d))
            torch.save(net, Path(savedir, "best_3d.pkl"))

        # remove images from iteration
        rmtree(Path(savedir, f"iter{i:03d}"))

        if args.scheduler:
            optimizer, loss_fns, loss_weights = scheduler(i, optimizer, loss_fns, loss_weights)
            if (i % (best_epoch + 20) == 0) and i > 0 :
                for param_group in optimizer.param_groups:
                    lr *= 0.5
                    param_group['lr'] = lr
                    print(f'> New learning Rate: {lr}')