Ejemplo n.º 1
0
def SVM_test_function(vals):
    s = GpyOptOption.convert_to_objects(*vals[0])
    dbd = GpyOptOption.convert_to_db_description(*vals[0])
    dbr = " ".join(GpyOptOption.tagit(vals[0]))

    print('Started: {} {}'.format(
        datetime.datetime.now().strftime("%D %H:%M:%S"), dbr))

    # algorithm
    version = (1, 2, 0)
    norma_data, norma_feature = s['norma_data'], s['norma_feature']
    norma_feature = norma_feature()

    ds = DatasetGenerator(150000, train_iterations=1, train=True)
    alg = LinearSVC(penalty=s['penalty_type'],
                    C=s['penalty'],
                    dual=s['dual'],
                    loss=s['loss'])

    acc_final, tested = 0., 0.

    for (Xqueue, Yqueue), (Xweek, Yweek) in ds.queue_iterator():
        Xqueue = norma_data(norma_feature.fit_transform(Xqueue))
        Xweek = norma_data(norma_feature.transform(Xweek))

        alg.fit(Xqueue, Yqueue)
        acc = alg.score(Xweek, Yweek)

        acc_final += acc
        tested += 1.
        print("Act: {}".format(acc))

    acc_final /= tested

    write_kwargs_to_db(
        TABLE='SVM',
        VERSION=version,
        RESULT=acc_final,
        **dbd,
    )  # dataset info

    print('Ended with {}: {}'.format(acc_final, dbr))
    return -acc_final
Ejemplo n.º 2
0
        print("Dice coefficient = {:.4f}, ".format(dice_coef), end="")
        plt.title("Prediction\nDice = {:.4f}".format(dice_coef), fontsize=20)

        save_name = os.path.join(png_directory,
                                 "prediction_tf_{}.png".format(idx))
        print("Saved as: {}".format(save_name))
        plt.savefig(save_name)


if __name__ == "__main__":

    model_filename = os.path.join(args.output_path, args.inference_filename)

    ds_testing = DatasetGenerator(os.path.join(args.data_path,
                                               "testing/*.npz"),
                                  crop_dim=args.crop_dim,
                                  batch_size=128,
                                  augment=False,
                                  seed=args.seed)
    # Load model
    if args.use_pconv:
        from model_pconv import unet
        unet_model = unet(use_pconv=True)
    else:
        from model import unet
        unet_model = unet()

    model = unet_model.load_model(model_filename)

    # Create output directory for images
    png_directory = args.output_pngs
    if not os.path.exists(png_directory):
Ejemplo n.º 3
0
def NN_test_function(vals):
    s = GpyOptOption.convert_to_objects(*vals[0])
    dbd = GpyOptOption.convert_to_db_description(*vals[0])
    dbr = " ".join(GpyOptOption.tagit(vals[0]))

    print('Started: {} {}'.format(
        datetime.datetime.now().strftime("%D %H:%M:%S"), dbr))
    start_time = datetime.datetime.now()

    # algorithm
    version = (1, 1, 0)
    layers = layersoo.get_layers_from_object_dict(s)
    batch_size = s['batch_size']
    l1 = s['l1_reg']
    l2 = s['l2_reg']
    dropout = s['dropout'] if s['use_dropout'] else 0.
    gaussian_noise_stddev = s['gaussian_noise'] if s[
        'use_gaussian_noise'] else 0.
    batchnormalization = s['use_batchnorm']

    norma_feature = s['norma_feature']()

    ds = DatasetGenerator(150000, train_iterations=1, train=True)
    network = NeuralNetworkClassifier(
        input_dim=540,
        output_dim=2,
        layers=layers,
        activation=s['activation'],
        batch_size=batch_size,
        learning_rate=s['learning_rate'],
        learning_rate_decay=s['learning_rate_decay'],
        l1=l1,
        l2=l2,
        dropout=dropout,
        gaussian_noise_stddev=gaussian_noise_stddev,
        batchnormalization=batchnormalization,
        use_amsgrad=s['use_amsgrad'],
        alpha=s['alpha'],
        verbose=1)

    acc_final, tested = 0., 0.

    for (Xqueue, Yqueue), (Xweek, Yweek) in ds.queue_iterator():
        Xqueue = norma_feature.fit_transform(Xqueue)
        Xweek = norma_feature.transform(Xweek)

        network.fit(Xqueue, Yqueue)
        acc = network.score(Xweek, Yweek)[1]

        acc_final += acc
        tested += 1.
        print("Act: {}".format(acc))
    acc_final /= tested

    end_time = (datetime.datetime.now() - start_time).seconds
    write_kwargs_to_db(TABLE='NeuralNetwork',
                       VERSION=version,
                       RESULT=acc_final,
                       DURATION=end_time,
                       **dbd)
    print('Ended with {} ({:.5}min): {}'.format(acc_final, end_time / 60, dbr))
    return -acc_final
Ejemplo n.º 4
0
def train_and_predict(data_path, crop_dim, batch_size, n_epoch):
    """
    Create a model, load the data, and train it.
    """
    """
    Step 1: Load the data
    """
    print("-" * 30)
    print("Loading the data from the NumPy files to tf.dataset ...")
    print("-" * 30)

    ds_train = DatasetGenerator(os.path.join(data_path, "train/*.npz"),
                                crop_dim=crop_dim,
                                batch_size=batch_size,
                                augment=True,
                                seed=args.seed)

    ds_validation = DatasetGenerator(os.path.join(data_path,
                                                  "validation/*.npz"),
                                     crop_dim=crop_dim,
                                     batch_size=batch_size,
                                     augment=False,
                                     seed=args.seed)

    ds_testing = DatasetGenerator(os.path.join(data_path, "testing/*.npz"),
                                  crop_dim=crop_dim,
                                  batch_size=batch_size,
                                  augment=False,
                                  seed=args.seed)

    print("-" * 30)
    print("Creating and compiling model ...")
    print("-" * 30)
    """
    Step 2: Define the model
    """
    if args.use_pconv:
        from model_pconv import unet
    else:
        from model import unet

    unet_model = unet()
    model = unet_model.create_model(ds_train.get_input_shape(),
                                    ds_train.get_output_shape())

    model_filename, model_callbacks = unet_model.get_callbacks()

    # If there is a current saved file, then load weights and start from
    # there.
    saved_model = os.path.join(args.output_path, args.inference_filename)
    if os.path.isfile(saved_model):
        model.load_weights(saved_model)
    """
    Step 3: Train the model on the data
    """
    print("-" * 30)
    print("Fitting model with training data ...")
    print("-" * 30)

    model.fit(ds_train.get_dataset(),
              epochs=n_epoch,
              validation_data=ds_validation.get_dataset(),
              verbose=1,
              callbacks=model_callbacks)
    """
    Step 4: Evaluate the best model
    """
    print("-" * 30)
    print("Loading the best trained model ...")
    print("-" * 30)

    unet_model.evaluate_model(model_filename, ds_testing.get_dataset())
    """
    Step 5: Print the command to convert TensorFlow model into OpenVINO format with model optimizer.
    """
    print("-" * 30)
    print("-" * 30)
    unet_model.print_openvino_mo_command(model_filename,
                                         ds_testing.get_input_shape())
Ejemplo n.º 5
0
    save_name = os.path.join(png_directory,
                             "prediction_tf_{}_{}.png".format(batch_num, idx))
    print("Saved as: {}".format(save_name))
    plt.savefig(save_name)


if __name__ == "__main__":

    model_filename = os.path.join(args.output_path, args.inference_filename)

    trainFiles, validateFiles, testFiles = get_decathlon_filelist(
        data_path=args.data_path, seed=args.seed, split=args.split)

    ds_test = DatasetGenerator(testFiles,
                               batch_size=128,
                               crop_dim=[args.crop_dim, args.crop_dim],
                               augment=False,
                               seed=args.seed)

    # Load model
    if args.use_pconv:
        from model_pconv import unet
        unet_model = unet(use_pconv=True)
    else:
        from model import unet
        unet_model = unet()

    model = unet_model.load_model(model_filename)

    # Create output directory for images
    png_directory = args.output_pngs
Ejemplo n.º 6
0
              tf.pywrap_tensorflow.IsMklEnabled())


test_intel_tensorflow()  # Prints if Intel-optimized TensorFlow is used.
"""
crop_dim = Dimensions to crop the input tensor
"""
crop_dim = (args.tile_height, args.tile_width, args.tile_depth,
            args.number_input_channels)
"""
1. Load the dataset
"""
brats_data = DatasetGenerator(crop_dim,
                              data_path=args.data_path,
                              batch_size=args.batch_size,
                              train_test_split=args.train_test_split,
                              validate_test_split=args.validate_test_split,
                              number_output_classes=args.number_output_classes,
                              random_seed=args.random_seed,
                              shard=hvd.rank())

if (hvd.rank() == 0):
    print("{} workers".format(hvd.size()))
    brats_data.print_info()  # Print dataset information
"""
2. Create the TensorFlow model
"""
model = unet_3d(input_dim=crop_dim,
                filters=args.filters,
                number_output_classes=args.number_output_classes,
                use_upsampling=args.use_upsampling,
                concat_axis=-1,
Ejemplo n.º 7
0
    """
    """
    Step 1: Define a data loader
    """
    print("-" * 30)
    print(
        "Loading the data from the Medical Decathlon directory to a TensorFlow data loader ..."
    )
    print("-" * 30)

    trainFiles, validateFiles, testFiles = get_decathlon_filelist(
        data_path=args.data_path, seed=args.seed, split=args.split)

    ds_train = DatasetGenerator(trainFiles,
                                batch_size=args.batch_size,
                                crop_dim=[args.crop_dim, args.crop_dim],
                                augment=True,
                                seed=args.seed)
    ds_validation = DatasetGenerator(validateFiles,
                                     batch_size=args.batch_size,
                                     crop_dim=[args.crop_dim, args.crop_dim],
                                     augment=False,
                                     seed=args.seed)
    ds_test = DatasetGenerator(testFiles,
                               batch_size=args.batch_size,
                               crop_dim=[args.crop_dim, args.crop_dim],
                               augment=False,
                               seed=args.seed)

    print("-" * 30)
    print("Creating and compiling model ...")
Ejemplo n.º 8
0

print(args)
test_intel_tensorflow()  # Prints if Intel-optimized TensorFlow is used.
"""
crop_dim = Dimensions to crop the input tensor
"""
crop_dim = (args.tile_height, args.tile_width, args.tile_depth,
            args.number_input_channels)
"""
1. Load the dataset
"""
brats_data = DatasetGenerator(crop_dim,
                              data_path=args.data_path,
                              batch_size=args.batch_size,
                              train_test_split=args.train_test_split,
                              validate_test_split=args.validate_test_split,
                              number_output_classes=args.number_output_classes,
                              random_seed=args.random_seed)

brats_data.print_info()  # Print dataset information
"""
2. Create the TensorFlow model
"""
model = unet_3d(input_dim=crop_dim,
                filters=args.filters,
                number_output_classes=args.number_output_classes,
                use_upsampling=args.use_upsampling,
                concat_axis=-1,
                model_name=args.saved_model_name)
Ejemplo n.º 9
0
def VAE_train_test(idexp):
    last_sample, last_week_nn, last_week_vae, scaler = saver.load_result()

    ds = DatasetGenerator(150000,
                          train_iterations=1,
                          train=False,
                          skip_weeks=idexp)
    debug = True

    if debug:
        maximum_epochs = 1
        generate_number = 150
    else:
        maximum_epochs = 1000
        generate_number = 150000

    i = 1
    for (Xqueue, Yqueue), (Xweek, Yweek) in ds.queue_iterator():
        print('Started {}: {}'.format(
            i,
            datetime.datetime.now().strftime("%D %H:%M:%S")))
        if i <= last_sample:
            print("Skipping iteration {} (already done)".format(i))
            i += 1
            continue
        else:
            print("Started iteration no. {}".format(i))

        if i == 1:
            scaler = MinMaxScaler()
            #scaler = PowerTransformer(standardize=False)
            print('Transform')
            Xqueue = scaler.fit_transform(Xqueue)
            Xweek = scaler.transform(Xweek)

            c = MyChecker.CheckManager(3, MyChecker.NanChecker.NanError)
            while c.not_done():
                with c:
                    print('\tFit NN')
                    last_week_nn = create_new_NN_V2()
                    last_week_nn.fit(Xqueue,
                                     Yqueue,
                                     maximum_epochs=maximum_epochs)
                if c.not_done():
                    last_week_nn.free()
                    last_week_nn = None

            print('\tScore NN')
            labels, probabilities = last_week_nn.get_label_and_probability(
                Xweek)
            print('\tSave result')
            saver.save_result(i, np.mean(np.equal(labels, Yweek)),
                              probabilities)

            c = MyChecker.CheckManager(3, MyChecker.NanChecker.NanError)
            while c.not_done():
                with c:
                    print('\tFit VAE')
                    last_week_vae = create_new_VAE_V2()
                    last_week_vae.fit(Xqueue, maximum_epochs=maximum_epochs)
                if c.not_done():
                    last_week_vae.free()
                    last_week_vae = None

            print('\tSave models')
            saver.save_models(i, last_week_nn, last_week_vae, scaler)
        else:
            print('\tScore with previous NN')
            labels, probabilities = last_week_nn.get_label_and_probability(
                scaler.transpose(Xweek))
            print('\tSave result')
            res = np.mean(np.equal(labels, Yweek))
            saver.save_result(i, res, probabilities)
            print('\t\tresult: {}'.format(res))

            c = MyChecker.CheckManager(3, MyChecker.NanChecker.NanError)
            while c.not_done():
                with c:
                    print('\tGenerate')
                    generated_data = last_week_vae.generate(generate_number)

            print('\tPredict')
            generated_prediction = last_week_nn.get_label(generated_data)
            print('\tInverse transform')
            generated_data = scaler.inverse_transform(generated_data)
            print('\tConcatenate old with new')
            Xqueue = np.concatenate([generated_data, Xqueue], axis=0)
            Yqueue = np.concatenate([generated_prediction, Yqueue], axis=0)

            print('\tTransform')
            pred = Xqueue
            Xqueue = scaler.fit_transform(Xqueue)
            np.savez('spatne.npz', tet=Xqueue, predtim=pred)

            c = MyChecker.CheckManager(3, MyChecker.NanChecker.NanError)
            while c.not_done():
                with c:
                    print('\tFit new NN')
                    new_week_nn = create_new_NN_V2()
                    new_week_nn.fit(Xqueue,
                                    Yqueue,
                                    maximum_epochs=maximum_epochs)
                if c.not_done():
                    new_week_nn.free()
                    new_week_nn = None

            c = MyChecker.CheckManager(3, MyChecker.NanChecker.NanError)
            while c.not_done():
                with c:
                    print('\tFit VAE')
                    new_week_vae = create_new_VAE_V2()
                    new_week_vae.fit(Xqueue, maximum_epochs=maximum_epochs)
                if c.not_done():
                    new_week_vae.free()
                    new_week_vae = None

            print('\tSave models')
            saver.save_models(i, new_week_nn, new_week_vae, scaler)

            last_week_nn.free()
            last_week_vae.free()
            last_week_nn = new_week_nn
            last_week_vae = new_week_vae

            print('\tClear old models')

        i += 1