def predict_regression(config):
    split_folder = config["split_folder"]
    split_idx = config["split_idx"]
    img_folder = config["img_folder"]
    file_col = config["file_col"]
    par_col = config["par_col"]
    tag_col = config["tag_col"]
    tags_to_label = config["tags_to_label"]
    size = config["size"]
    ch = config["ch"]
    par_mean = config["par_mean"]
    par_std = config["par_std"]
    n_classes = config["n_classes"]
    BATCH_SIZE = config["BATCH_SIZE"]
    AUTOTUNE = config["AUTOTUNE"]
    model_folder = config["model_folder"]
    model = config["model"]
    csv_folpath = config["csv_folpath"]

    # 標準化predictの逆変換用
    train_df = pd.read_csv(os.path.join(split_folder,
                                        "train_{}.csv".format(split_idx)),
                           encoding="shift-jis")
    tag_mu = train_df[tag_col].mean()
    tag_sigma = train_df[tag_col].std()
    par_mu = train_df[par_col].mean()
    par_sigma = train_df[par_col].std()

    test_df = pd.read_csv(os.path.join(split_folder,
                                       "test_{}.csv".format(split_idx)),
                          encoding="shift-jis")
    test_paths = [os.path.join(img_folder, f) for f in list(test_df[file_col])]
    test_pars = list(test_df[par_col])
    test_tags = list(test_df[tag_col])
    testset = tf.data.Dataset.from_tensor_slices(
        (test_paths, test_pars, test_tags))
    testset = testset.map(lambda path, par, tag: (
        (load_img(path, size, ch), standardized_params(par, par_mu, par_sigma)
         ), standardized_params(tag, tag_mu, tag_sigma))).batch(
             BATCH_SIZE).prefetch(buffer_size=AUTOTUNE)

    last_models = glob.glob(
        os.path.join(model_folder, "models_{}*".format(split_idx)))
    # modelが保存されなかったときはTrueを返す
    if last_models == []:
        return True

    last_model_path = glob.glob(
        os.path.join(model_folder, "models_{}*".format(split_idx)))[0]
    model.load_weights(last_model_path)
    predict_result = model.predict(testset)
    predict_result = [(i[0] * tag_sigma + tag_mu) for i in predict_result]
    result_arr = np.array(
        [list(test_df[file_col]), test_pars, test_tags, predict_result]).T
    df_result = pd.DataFrame(data=result_arr,
                             columns=[file_col, par_col, "true", "predict"])
    df_result.to_csv(os.path.join(csv_folpath,
                                  "miss_{}.csv".format(split_idx)),
                     index=False)
    return False
def non_par_predict_classification(config):
    split_folder = config["split_folder"]
    split_idx = config["split_idx"]
    img_folder = config["img_folder"]
    file_col = config["file_col"]
    par_col = config["par_col"]
    tag_col = config["tag_col"]
    tags_to_label = config["tags_to_label"]
    size = config["size"]
    ch = config["ch"]
    par_mean = config["par_mean"]
    par_std = config["par_std"]
    n_classes = config["n_classes"]
    BATCH_SIZE = config["BATCH_SIZE"]
    AUTOTUNE = config["AUTOTUNE"]
    model_folder = config["model_folder"]
    model = config["model"]
    csv_folpath = config["csv_folpath"]

    # 標準化predictの逆変換用
    train_df = pd.read_csv(os.path.join(split_folder,
                                        "train_{}.csv".format(split_idx)),
                           encoding="shift-jis")
    tag_mu = train_df[tag_col].mean()
    tag_sigma = train_df[tag_col].std()

    test_df = pd.read_csv(os.path.join(split_folder,
                                       "test_{}.csv".format(split_idx)),
                          encoding="shift-jis")
    test_paths = [os.path.join(img_folder, f) for f in list(test_df[file_col])]
    test_labels = [tags_to_label[tag] for tag in list(test_df[tag_col])]
    testset = tf.data.Dataset.from_tensor_slices((test_paths, test_labels))
    testset = testset.map(lambda path, label: ((load_img(
        path, size, ch)), load_and_convert_onehot(label, n_classes))).batch(
            BATCH_SIZE).prefetch(buffer_size=AUTOTUNE)

    last_models = glob.glob(
        os.path.join(model_folder, "models_{}*".format(split_idx)))
    # modelが保存されなかったときはTrueを返す
    if last_models == []:
        return True

    last_model_path = glob.glob(
        os.path.join(model_folder, "models_{}*".format(split_idx)))[0]
    model.load_weights(last_model_path)
    y_pred = model.predict(testset)
    predict_result = np.argmax(y_pred, axis=1)
    result_arr = np.array(
        [list(test_df[file_col]), test_labels, predict_result]).T
    result_arr = np.concatenate([result_arr, y_pred], axis=1)
    df_result = pd.DataFrame(data=result_arr,
                             columns=[file_col, "true", "predict", 0, 1])
    df_result.to_csv(os.path.join(csv_folpath,
                                  "miss_{}.csv".format(split_idx)),
                     index=False)
    return False
def train_regression(config):
    split_folder = config["split_folder"]
    split_idx = config["split_idx"]
    img_folder = config["img_folder"]
    file_col = config["file_col"]
    par_col = config["par_col"]
    tag_col = config["tag_col"]
    tags_to_label = config["tags_to_label"]
    size = config["size"]
    ch = config["ch"]
    basic_process = config["basic_process"]
    extra_process = config["extra_process"]
    par_mean = config["par_mean"] # 非使用
    par_std = config["par_std"] # 非使用
    n_classes = config["n_classes"]
    epochs = config["epochs"]
    BATCH_SIZE = config["BATCH_SIZE"]
    AUTOTUNE = config["AUTOTUNE"]
    optimizer = config["optimizer"]
    model_folder = config["model_folder"]
    model = config["model"]
    loss = config["loss"]
    metrics = config["metrics"]

    train_df = pd.read_csv(os.path.join(split_folder, "train_{}.csv".format(split_idx)), encoding="shift-jis")
    tag_mu = train_df[tag_col].mean()
    tag_sigma = train_df[tag_col].std()

    par_mu = train_df[par_col].mean()
    par_sigma = train_df[par_col].std()


    train_paths = [os.path.join(img_folder, f) for f in list(train_df[file_col])]
    train_pars = list(train_df[par_col])
    train_tags = list(train_df[tag_col])
    trainset = tf.data.Dataset.from_tensor_slices((train_paths, train_pars, train_tags))
    trainset = trainset.map(
        lambda path, par, tag: ((load_and_data_augment(path, size, ch, basic_process, extra_process),
                                   standardized_params(par, par_mu, par_sigma)),
                                  standardized_params(tag, tag_mu, tag_sigma)),
        num_parallel_calls=AUTOTUNE
    ).cache()
    trainset = trainset.shuffle(buffer_size=len(train_paths)).repeat(epochs*2).batch(BATCH_SIZE).prefetch(
        buffer_size=AUTOTUNE)

    # valset前処理
    val_df = pd.read_csv(os.path.join(split_folder, "test_{}.csv".format(split_idx)), encoding="shift-jis")
    val_paths = [os.path.join(img_folder, f) for f in list(val_df[file_col])]
    val_pars = list(val_df[par_col])
    val_tags = list(val_df[tag_col])
    valset = tf.data.Dataset.from_tensor_slices((val_paths, val_pars, val_tags))
    valset = valset.map(
        lambda path, par, tag: ((load_img(path, size, ch),
                                   standardized_params(par, par_mu, par_sigma)),
                                  standardized_params(tag, tag_mu, tag_sigma)),
        num_parallel_calls=AUTOTUNE
    ).cache()
    valset = valset.repeat(epochs*2).batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE)

    model_path = os.path.join(model_folder, "models_" + str(split_idx) + "_epoch{epoch:02d}.h5")
    mc_cb = tf.keras.callbacks.ModelCheckpoint(model_path, monitor='val_loss', mode='min', verbose=1,
                                               save_weights_only=True, save_best_only=True)
    rl_cb = tf.keras.callbacks.ReduceLROnPlateau(monitor='loss', factor=0.2, patience=3, verbose=1, mode='auto',
                                                 min_delta=0.0001, cooldown=0, min_lr=0)
    es_cb = tf.keras.callbacks.EarlyStopping(monitor='loss', min_delta=0, patience=5, verbose=1, mode='auto')
    steps_per_epoch = math.ceil(len(train_paths) / BATCH_SIZE)
    model.compile(optimizer=optimizer, loss=loss, metrics=metrics)

    hist = model.fit(trainset, validation_data=valset, epochs=epochs, steps_per_epoch=steps_per_epoch,
                     callbacks=[mc_cb, rl_cb, es_cb])

    return hist
Ejemplo n.º 4
0
opt = parser.parse_args()

netG = pix2pix.define_G(3, 3, 64, 'batch', False, [])
netG.load_state_dict(torch.load(opt.model))

image_dir = "facades/test/a/"
image_filenames = [x for x in os.listdir(image_dir) if is_image_file(x)]

transform_list = [
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]

transform = transforms.Compose(transform_list)

for image_name in image_filenames:
    img = load_img(image_dir + image_name)
    img = transform(img)
    input = Variable(img, volatile=True).view(1, -1, 256, 256)

    if opt.cuda:
        netG = netG.cuda()
        input = input.cuda()

    out = netG(input)
    out = out.cpu()
    out_img = out.data[0]
    if not os.path.exists("facades/result"):
        os.mkdir("facades/result")
    save_img(out_img, "facades/result/{}".format(image_name))
Ejemplo n.º 5
0
def network_trainer(file_name,
                    data_dir,
                    template_dir,
                    test,
                    loss,
                    epochss,
                    shape,
                    data_gen_argss,
                    blacklist,
                    data_type,
                    slice_view,
                    visualisation=False,
                    pretrained_model=False,
                    data_sets=[''],
                    excluded_from_training=['']):
    """
    This function loads the data, preprocesses it and trains the network with given parameters.
    It trains the network successively with different data augmentation values.
    If the training is early stopped before 'min_epochs', the training is started again with reduced augmetnation values

    :param test: Bool: If Test is True, every parameter is set to increase learning speed. Used to test if the code runs
    :param remote: Bool: If remote is True, the paths are set for remote computer
    :param loss: string: loss with which the model will be trained
    :param epochss: Array with epochs. Should have the same length than data_gen_argss
    :param shape: Tuple (y,x): Shape of the images that should come out of the preprocessing
    :param data_gen_argss: Array of dicts : arguments for the data augmentations, should have the same length than epochss
    :param min_epochs: int: The minimum amount of epochs the network should be trained on. If this number is not reached, the training will start again with reduced augmentation values
    :param max_tries: int: Integer indicating how many times the training should be started again with reduced augmentation values
    :param visualisation: Bool: if True, all images after preprocessing are saved
    :return: Bool: True if min_epochs is not reached, False otherwise
    """

    seed = random.randint(0, 1000)

    print('Training with seed: ', seed)

    if data_type == 'anat':
        img_data = dl.load_img(data_dir, blacklist, studies=data_sets)
        excluded_img_data = dl.load_img(data_dir,
                                        studies=excluded_from_training)
    elif data_type == 'func':
        img_data = dl.load_func_img(data_dir, blacklist, studies=data_sets)
        excluded_img_data = dl.load_func_img(data_dir,
                                             studies=excluded_from_training)

    if test == True:
        epochss = np.ones(len(data_gen_argss), dtype=int) * 5
        save_dir = 'test/{loss}_{epochs}_{date}/'.format(
            loss=loss, epochs=np.sum(epochss), date=datetime.date.today())
        import shutil
        if os.path.exists('test/'):
            shutil.rmtree('test/')

    else:
        save_dir = data_dir + 'results/' + file_name + '/{loss}_{epochs}_{date}/'.format(
            loss=loss, epochs=np.sum(epochss), date=datetime.date.today())

    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
        print('creating dir: ', save_dir)
    """shape = (z,y,x)"""

    temp = dl.load_mask(template_dir)
    mask_data = []
    excluded_mask_data = []

    for i in range(len(img_data)):
        mask_data.append(copy.deepcopy(temp[0]))

    for i in range(len(excluded_img_data)):
        excluded_mask_data.append(copy.deepcopy(temp[0]))

    # utils.get_image_and_mask(img_data,mask_data, shape,  save_dir, remove_black_labels_and_columns, slice_view)           #with this line all the images with the mask can be saved to create a blacklist

    print('*** Splitting data into Train, Validation and Test set ***')
    if test == True:
        x_train1_data, x_test_data, y_train1_data, y_test_data = model_selection.train_test_split(
            img_data,
            mask_data,
            random_state=seed,
            test_size=0.9,
            shuffle=True)
    else:
        x_train1_data, x_test_data, y_train1_data, y_test_data = model_selection.train_test_split(
            img_data,
            mask_data,
            random_state=seed,
            test_size=0.1,
            shuffle=True)

    print('*** Preprocessing ***')
    x_test_data.extend(excluded_img_data)
    y_test_data.extend(excluded_mask_data)
    x_test, y_test, x_test_affines, x_test_headers, file_names, y_test_affines, y_test_headers = utils.general.get_image_and_mask(
        x_test_data,
        y_test_data,
        shape,
        save_dir,
        slice_view=slice_view,
        visualisation=visualisation,
        blacklist_bool=blacklist)
    x_train1, y_train1, x_train1_affines, x_train1_headers, x_train1_file_names, = utils.general.get_image_and_mask(
        x_train1_data,
        y_train1_data,
        shape,
        save_dir,
        slice_view=slice_view,
        visualisation=visualisation,
        blacklist_bool=blacklist)[:5]

    x_train_struct = {
        'x_train': x_train1,
        'x_train_affines': x_train1_affines,
        'x_train_headers': x_train1_headers,
        'file_names': x_train1_file_names,
    }

    y_train_struct = {'y_train': y_train1}

    x_test_struct = {
        'x_test': x_test,
        'x_test_affines': x_test_affines,
        'x_test_headers': x_test_headers,
        'file_names': file_names
    }

    y_test_struct = {
        'y_test': y_test,
        'y_test_affines': y_test_affines,
        'y_test_headers': y_test_headers,
    }

    xfile = open(save_dir + 'x_test_struct.pkl', 'wb')
    pickle.dump(x_test_struct, xfile)
    xfile.close()
    yfile = open(save_dir + 'y_test_struct.pkl', 'wb')
    pickle.dump(y_test_struct, yfile)
    yfile.close()
    xfile = open(save_dir + 'y_train_struct.pkl', 'wb')
    pickle.dump(y_train_struct, xfile)
    xfile.close()
    xfile = open(save_dir + 'x_train_struct.pkl', 'wb')
    pickle.dump(x_train_struct, xfile)
    xfile.close()

    x_train1 = np.concatenate(x_train1, axis=0)
    y_train1 = np.concatenate(y_train1, axis=0)
    x_train1 = np.expand_dims(x_train1, -1)
    y_train1 = np.expand_dims(y_train1, -1)
    x_train, x_val, y_train, y_val = model_selection.train_test_split(
        x_train1, y_train1, test_size=0.25, shuffle=True, random_state=seed)

    print('TRAINING SHAPE: ' + str(x_train.shape[1:4]))
    print('*** Training with {} slices ***'.format(x_train.shape[0]))
    print('*** Validating with {} slices ***'.format(x_val.shape[0]))
    input_shape = (x_train.shape[1:4])
    """
    Callbacks
    """

    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.1,
                                  verbose=1,
                                  patience=5)
    earlystopper = EarlyStopping(monitor='val_loss', patience=20, verbose=1)
    Adam = keras.optimizers.Adam(learning_rate=1e-4,
                                 beta_1=0.9,
                                 beta_2=0.999,
                                 amsgrad=True)

    callbacks = [reduce_lr, earlystopper]

    if loss == 'bincross':
        loss = 'binary_crossentropy'

    elif loss == 'dice':
        print('\n*********\n\nTraining with loss: dice-loss\n\n*********\n')
        loss = unet.dice_coef_loss

    elif loss == 'dice_bincross':
        print(
            '\n*********\n\nTraining with loss: dice_bincross\n\n*********\n')
        loss = unet.dice_bincross_loss

    elif loss == 'thr_bincross':
        print('\n*********\n\nTraining with loss: thr_bincross\n\n*********\n')
        loss = unet.thr_dice_coef

    else:
        print(
            '\n*********\n\nWrong loss function, choose between bincross, dice or dice_bincross\n\n*********\n'
        )

    if pretrained_model == False:
        if test == True:
            model = attention_unet.att_unet(input_shape[0], input_shape[1], 1)
            # model = unet.twolayernetwork(input_shape, 3, 0.5)

        else:
            # model = unet.unet(input_shape)
            model = attention_unet.att_r2_unet(input_shape[0], input_shape[1],
                                               1)

    else:
        print(pretrained_model)
        model = keras.models.load_model(pretrained_model,
                                        custom_objects={
                                            'dice_coef_loss':
                                            unet.dice_coef_loss,
                                            'dice_coef': unet.dice_coef
                                        })
    """
    Training
    Two loop variables: 
    - counter counts the number of steps
    - nmbr_tries counts the number of tries per step
    """
    counter = 1

    for data_gen_args, epochs in zip(data_gen_argss, epochss):

        if data_gen_args == None:
            augmentation = False
        else:
            augmentation = True

        if counter > 1:
            print(
                '\n\n\n\n********* \nTraining with higher augmentation values! Taking model from try {} \n*********\n\n\n\n'
                .format(best_try + 1))
            model = history.model

        histories = []
        new_save_dir = save_dir + '{counter}_Step/'.format(counter=counter)

        if not os.path.exists(new_save_dir):
            os.makedirs(new_save_dir)
        if data_gen_args == data_gen_argss[-1]:
            last_step = True
            # callbacks[-1] = EarlyStopping(monitor='val_loss', patience=80, verbose=1)
        else:
            last_step = False

        print('Step', counter, 'of', len(epochss))
        temp_history = training(data_dir,
                                data_sets,
                                data_type,
                                data_gen_args,
                                epochs,
                                loss,
                                shape,
                                x_train,
                                y_train,
                                x_val,
                                y_val,
                                x_test,
                                y_test,
                                new_save_dir,
                                x_test_data,
                                model,
                                seed,
                                Adam,
                                callbacks,
                                slice_view,
                                augmentation=augmentation,
                                visualisation=visualisation,
                                last_step=last_step)
        histories.append(temp_history)

        history_epochs = []
        for x in histories:
            history_epochs.append(len(x.epoch))
            best_try = history_epochs.index(
                max(history_epochs))  #best_try is the try with the most epochs
        history = histories[best_try]
        counter += 1