Пример #1
0
def main(resume):
    train_iterator = utils.SimpleFilesIterator(SAMPLE_TRAIN_PATH, 5,
                                               input_config, output_config)
    validation_iterator = utils.SimpleFilesIterator(SAMPLE_VAL_PATH, 5,
                                                    input_config,
                                                    output_config)

    gen_train = utils.unet_generator(train_iterator)
    gen_val = utils.unet_generator(validation_iterator)

    unet = model.get_unet_model(input_config,
                                output_config,
                                64,
                                opt=Adam(lr=0.0003, beta_1=0.5))

    count = 0

    cb = callbacks.get_callbacks()

    reporter = report.Reporter(UNET_LOGS_PATH, output_config, resume)

    for epoch in range(100000):
        train_iteration(unet, gen_train, cb)

        u_input, u_fake = validation_iterator.next()
        u_output = unet.predict_on_batch(u_input)

        reporter.handle(u_input, u_fake, u_output)

        count = count + 1
def train_flow_branch():
    """
    Function to train a SaliencyBranch model on optical flow.
    """
    experiment_id = 'FLOW_{}'.format(uuid.uuid4())

    model = SaliencyBranch(input_shape=(3, frames_per_seq, h, w),
                           c3d_pretrained=True,
                           branch='flow')
    model.compile(optimizer=opt,
                  loss={
                      'prediction_fine': saliency_loss(name=full_frame_loss),
                      'prediction_crop': saliency_loss(name=crop_loss)
                  },
                  loss_weights={
                      'prediction_fine': w_loss_fine,
                      'prediction_crop': w_loss_cropped
                  })
    model.summary()

    model.fit_generator(
        generator=generate_dreyeve_OF_batch(batchsize=batchsize,
                                            nb_frames=frames_per_seq,
                                            image_size=(h, w),
                                            mode='train'),
        validation_data=generate_dreyeve_OF_batch(batchsize=batchsize,
                                                  nb_frames=frames_per_seq,
                                                  image_size=(h, w),
                                                  mode='val'),
        nb_val_samples=val_samples_per_epoch,
        samples_per_epoch=train_samples_per_epoch,
        nb_epoch=nb_epochs,
        callbacks=get_callbacks(experiment_id=experiment_id))
Пример #3
0
def transfer_and_repeat(model,
                        intermediate,
                        transfer_model,
                        data,
                        validation_split=0.33,
                        epochs=5):
    """
    Trains a new network using second split of data
    given a particular data split, stored in the data directory
    """
    X, Y = data

    # Save model weights to load into intermediate model
    intermediate = load_weights_by_name(model, intermediate)

    # Compute intermediate transformation from previous intermediate model over new data
    preds = intermediate.predict(X, batch_size=64)

    # Fit shallower model using predictions and labels of new data
    cbs = callbacks.get_callbacks(name="transfer_training")
    history = transfer_model.fit(preds,
                                 Y,
                                 validation_split=validation_split,
                                 batch_size=64,
                                 epochs=epochs,
                                 callbacks=cbs)

    return intermediate, transfer_model, history, cbs
def fine_tuning():
    """
    Function to launch training on DreyeveNet. It is called `fine_tuning` since supposes
    the three branches to be pretrained. Should also work from scratch.
    """

    experiment_id = 'DREYEVE_{}'.format(uuid.uuid4())

    model = DreyeveNet(frames_per_seq=frames_per_seq, h=h, w=w)
    model.compile(optimizer=opt,
                  loss={
                      'prediction_fine': saliency_loss(name=full_frame_loss),
                      'prediction_crop': saliency_loss(name=crop_loss)
                  },
                  loss_weights={
                      'prediction_fine': w_loss_fine,
                      'prediction_crop': w_loss_cropped
                  })
    model.summary()

    model.fit_generator(
        generator=generate_dreyeve_batch(batchsize=batchsize,
                                         nb_frames=frames_per_seq,
                                         image_size=(h, w),
                                         mode='train'),
        validation_data=generate_dreyeve_batch(batchsize=batchsize,
                                               nb_frames=frames_per_seq,
                                               image_size=(h, w),
                                               mode='val'),
        nb_val_samples=val_samples_per_epoch,
        samples_per_epoch=train_samples_per_epoch,
        nb_epoch=nb_epochs,
        callbacks=get_callbacks(experiment_id=experiment_id))
def main(args):
    dataset = get_dataset_name(args)
    X_train, X_val, y_train, y_val = get_data(dataset)
    model, model_dir = get_model(args, dataset['shape'])

    verbosity, callbacks = get_callbacks(model_dir, args, model)

    model.fit(X_train,
              y_train,
              epochs=config.EPOCHS,
              batch_size=config.BATCH_SIZE,
              callbacks=callbacks,
              validation_data=(X_val, y_val),
              verbose=verbosity)
Пример #6
0
def train_and_validate(model, data, validation_split=0.33, epochs=5):
    """
    Trains a model over specified amount of data with specified train/validation split
    """
    X, Y = data
    cbs = callbacks.get_callbacks(name="initial_training")
    history = model.fit(X,
                        Y,
                        validation_split=validation_split,
                        batch_size=64,
                        epochs=epochs,
                        callbacks=cbs)

    return model, history, cbs
Пример #7
0
               kernel_initializer="glorot_uniform",
               activation="softmax")(dropped3)

# Construct the model
model = Model(inputs=base_layer.input, outputs=output)

# Load previous best checkpoint
weights_file = "../models/weights.best_denseNet121." + str(width) + "x" + str(
    height) + ".hdf5"
if (os.path.exists(weights_file)):
    print("load weight file:", weights_file)
    model.load_weights(weights_file)

print("Get callbacks")
# Get callbacks
cb = callbacks.get_callbacks(weights_file)

print("Compile the model")
# Complie the model
model.compile(loss="categorical_crossentropy",
              optimizer=SGD(momentum=0.9),
              metrics=["acc"])

# Get data generator
train_gen, valid_gen = generators.get_generators(width, height)

print("Start fitting")

# execute fitting on main thread
model_output = model.fit_generator(train_gen,
                                   steps_per_epoch=constants.STEPS,
Пример #8
0
        logger.info(f"Fold {i}")
        logger.info("=" * 20)

        trn_df = train_all.loc[trn_idx, :].reset_index(drop=True)
        val_df = train_all.loc[val_idx, :].reset_index(drop=True)

        loaders = {
            phase: datasets.get_train_loader(df_, tp, fp, train_audio, config,
                                             phase)
            for df_, phase in zip([trn_df, val_df], ["train", "valid"])
        }
        model = models.get_model(config, fold=i).to(device)
        criterion = criterions.get_criterion(config)
        optimizer = training.get_optimizer(model, config)
        scheduler = training.get_scheduler(optimizer, config)
        callbacks = clb.get_callbacks(config)

        runner = training.get_runner(config, device)

        runner.train(model=model,
                     criterion=criterion,
                     loaders=loaders,
                     optimizer=optimizer,
                     scheduler=scheduler,
                     num_epochs=global_params["num_epochs"],
                     verbose=True,
                     logdir=logdir / f"fold{i}",
                     callbacks=callbacks,
                     main_metric=global_params["main_metric"],
                     minimize_metric=global_params["minimize_metric"])
import uuid

from batch_generators import generate_RMDN_batch

from models import RMDN_train

from config import C, batchsize, T, encoding_dim, lr, h, w, hidden_states, nb_epoch, samples_per_epoch
from keras.optimizers import RMSprop
from objectives import MDN_neg_log_likelyhood

from callbacks import get_callbacks

if __name__ == '__main__':

    # get a new experiment id
    experiment_id = str(uuid.uuid4())

    model = RMDN_train(hidden_states=hidden_states,
                       n_mixtures=C,
                       input_shape=(T, encoding_dim))
    model.compile(optimizer=RMSprop(lr=lr),
                  loss=MDN_neg_log_likelyhood(image_size=(h, w),
                                              B=batchsize,
                                              T=T,
                                              C=C))

    model.fit_generator(generator=generate_RMDN_batch(batchsize, mode='train'),
                        nb_epoch=nb_epoch,
                        samples_per_epoch=samples_per_epoch,
                        callbacks=get_callbacks(experiment_id))
from callbacks import get_callbacks

from model import ml_net_model, loss
from batch_generators import generate_batch

from config import shape_c, shape_r, batchsize
from config import nb_samples_per_epoch, nb_epoch, nb_imgs_val

if __name__ == '__main__':

    model = ml_net_model(img_cols=shape_c,
                         img_rows=shape_r,
                         downsampling_factor_product=10)
    sgd = SGD(lr=1e-3, decay=0.0005, momentum=0.9, nesterov=True)
    print("Compile ML-Net Model")
    model.compile(sgd, loss)
    model.summary()

    print("Training ML-Net")
    model.fit_generator(generator=generate_batch(batchsize=batchsize,
                                                 mode='train',
                                                 gt_type='fix'),
                        validation_data=generate_batch(batchsize=batchsize,
                                                       mode='val',
                                                       gt_type='fix'),
                        nb_val_samples=nb_imgs_val,
                        nb_epoch=nb_epoch,
                        samples_per_epoch=nb_samples_per_epoch,
                        callbacks=get_callbacks())
Пример #11
0
if args.gpus > 1: model = multi_gpu_model(model, gpus=args.gpus)

# Optimizer
optimizer = Adam(lr=args.lr, amsgrad=True)

# Compile the model
print(
    '\n\n\n', 'Compiling model..', runID, '\n\n\tGPU ' +
    (str(args.gpus) + ' gpus' if args.gpus > 1 else args.gpuids) +
    '\t\tBatch size [ ' + str(args.bs) + ' ] ' + ' \n\n')
model.compile(loss=depth_loss_function, optimizer=optimizer)

print('Ready for training!\n')

# Callbacks
callbacks = []
if args.data == 'nyu':
    callbacks = get_callbacks(model, basemodel, train_generator,
                              test_generator,
                              load_test_data() if args.full else None, runPath)

# Start training
model.fit_generator(train_generator,
                    callbacks=callbacks,
                    validation_data=test_generator,
                    epochs=args.epochs,
                    shuffle=True)

# Save the final trained model:
basemodel.save(runPath + '/model.h5')
Пример #12
0
def main():
    import os
    os.environ["CUDA_VISIBLE_DEVICES"]="0"
    
    # training data
    data_files = fetch_data_files(r'D:\Projects\QSM\Data', ['rdf_resharp4.nii.gz', 'brainmask.nii.gz', 'mag.nii.gz'])

    print("num of datasets %d" % (len(data_files)))

    # -------------------------------
    # create data generator for training and validatation
    training_list, validation_list = get_validation_split(data_files,
                                                          'training.pkl',
                                                          'val.pkl',
                                                          data_split=1,
                                                          overwrite=True)
    training_generator = DataGenerator(data_files,
                                       training_list,
                                       batch_size = config["batch_size"],
                                       patch_size = config["patch_size"],
                                       voxel_size = config["voxel_size"],
                                       shuffle=True)
    validation_generator = DataGenerator(data_files,
                                         validation_list,
                                         batch_size = config["valid_batch_size"],
                                         patch_size = config["valid_patch_size"],
                                         voxel_size = config["voxel_size"],
                                         shuffle=False)
    
    # -------------------------------
    # create the model
    umodel = unet_model_3d(pool_size=config["pool_size"],
                           deconvolution=config["deconvolution"],
                           depth=config["layer_depth"] ,
                           n_outputs=1,
                           n_base_filters=config["n_base_filters"],
                           kernel = config["conv_kernel"],
                           batch_normalization=config["batch_normalization"],
                           activation_name=config["activation"]) 
    
            
            
    print("model summary")
    print(umodel.summary())
    save_model(umodel, config["model_file"])
    
    model = semi_model_t(umodel)
        
    optimizer = optimizers.Adam(lr=config["initial_learning_rate"], beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)

    alpha = K.variable(1.0)
    beta = K.variable(0.06)
    gamma_roi, gamma_out = K.variable(0.0), K.variable(0.0)
    model.compile(loss=['mse', tv_loss, 'mae', 'mae'],
                  loss_weights=[alpha, beta, gamma_roi, gamma_out],
                  optimizer=optimizer)    
    
    callbacks=get_callbacks(model = [model, umodel], model_weights = [alpha, beta, gamma_roi, gamma_out],
                                                    weight_file = 'model_weight.h5',
                                                    initial_learning_rate=config["initial_learning_rate"],
                                                    learning_rate_drop=0.5, 
                                                    learning_rate_epochs=None, 
                                                    learning_rate_patience=20, 
                                                    early_stopping_patience=None)
    
    model.fit_generator(    generator=training_generator,
                            steps_per_epoch=200,
                            epochs=15,
                            max_queue_size=4,
                            use_multiprocessing=False,
                            workers=2,
                            validation_data=None, #validation_generator,
                            validation_steps=0, #len(validation_list)//config["batch_size"],
                            callbacks = callbacks
                            )

    umodel.save_weights('model_weight.h5', overwrite=True)
Пример #13
0
def main():

    print(results)
    df_train = pd.read_csv(results.train_data)
    df_train = prepare_df(df_train)
    df_train = df_train[:1000]
    df_target = df_train.copy()

    df_train = processtrain.generate_folds(df_train, folds=results.folds)

    print(results.output_dir)
    model_path = os.path.join(results.output_dir, results.backbone)

    K.clear_session()

    #
    for fold in range(results.folds):
        # fold is just and interger

        train = df_train[df_train["fold"] != fold].copy()
        valid = df_train[df_train["fold"] == fold].copy()
        print(train.shape)
        print(valid.shape)
        #df_train = None

        print("Training on : {} samples".format(len(train)))
        print("validating on: {}  samples".format(len(valid)))

        model = modelgenerator.get_model_unet(backbone=results.backbone,
                                              freeze_encoder=True)

        opt = RMSprop(lr=results.lr)

        model.compile(optimizer=opt,
                      loss=losses.pick_loss(results.loss),
                      metrics=[losses.dice_coef])

        if results.pretrain_weights is None:

            print("training from scratch ")

        else:

            model.load_weights(results.pretrain_weights)

        au = aumentations.get_augmentations("valid")
        generator_train = generator.DataGenerator(
            list_IDs=train.index,
            df=train,
            target_df=df_target,
            batch_size=results.batch_size,
            aumentations=au,
            base_path=results.images_path,
            mode="fit")
        generator_valid = generator.DataGenerator(
            list_IDs=valid.index,
            df=valid,
            target_df=df_target,
            batch_size=results.batch_size,
            aumentations=au,
            base_path=results.images_path,
            mode="fit")

        call_bks = callbacks.get_callbacks(
            results, fold, generator_train.samples)  # get the callbacks

        model.fit_generator(
            generator=generator_train,
            validation_data=generator_valid,
            epochs=results.epochs,
            steps_per_epoch=generator_train.samples // results.batch_size,
            validation_steps=generator_valid.samples // results.batch_size,
            callbacks=call_bks)

        gc.collect()

        ## TODO FROM HERE

    # train the whole thing

    # get the folds
    # get the data
    # create generators
    #build the model
    #train the model
    # save the model

    #log results

    return ""
Пример #14
0
def train(model):
    print("Model done")

    # x_train = []
    # y_train = []

    # # preprocessing_function :
    # function that will be implied on each input. The function will run after the image is
    # resized and augmented. The function should take one argument: one image (Numpy tensor
    # with rank 3), and should output a Numpy tensor with the same shape.
    # we create two instances with the same arguments
    data_gen_args = dict(
        preprocessing_function=random_crop,
        # rescale=1. / 255,
        # featurewise_center=True,
        # featurewise_std_normalization=True,
        horizontal_flip=True,
        vertical_flip=True,
        validation_split=0.1)
    x_image_gen = ImageDataGenerator(**data_gen_args)
    y_image_gen = ImageDataGenerator(**data_gen_args)

    print("Before Img Gen FIT")
    # Provide the same seed and keyword arguments to the fit and flow methods
    seed = 1
    # compute quantities required for featurewise normalization (std, center)
    # x_image_gen.fit(x_train, augment=True, seed=seed)  # TODO: x_train NEED to be 4 dimensional
    # y_image_gen.fit(y_train, augment=True, seed=seed)

    x_gen = x_image_gen.flow_from_directory(
        'pictures/keras_test',
        target_size=(img_width // scale_fact, img_width // scale_fact),
        batch_size=1,
        class_mode=None,  # TODO: could be "input"
        save_to_dir="pictures/keras_test/training/training",
        # save_prefix="t0_",
        subset="training",
        interpolation="lanczos",
        seed=seed)

    y_gen = y_image_gen.flow_from_directory(
        'pictures/keras_test',
        target_size=(img_width, img_width),
        batch_size=1,
        class_mode=None,  # TODO: was None
        save_to_dir="pictures/keras_test/training/validation",
        # save_prefix="t0_",
        subset="training",
        interpolation="lanczos",
        seed=seed)

    print("Before Zip")
    # combine generators into one which yields x and y together
    train_generator = itertools.zip_longest(x_gen, y_gen)

    optimizer = Adadelta(lr=1.0, rho=0.95, epsilon=None, decay=0.0)
    model.compile(optimizer=optimizer, loss='mean_squared_error')

    print("Before fit_generator")
    model.fit_generator(
        train_generator,
        verbose=2,
        steps_per_epoch=
        12,  # equal to (nbr samples of your dataset) // (batch size)
        epochs=6,
        callbacks=get_callbacks())

    run_tests(model)
Пример #15
0
    print_section("Setting GPU Settings")
    config = tf.compat.v1.ConfigProto()
    config.gpu_options.allow_growth = True
    session = tf.compat.v1.InteractiveSession(config=config)

    print_section("Loading training, validation and test datasets")
    modal_type = "multi_modal" if multi_modal else "bmode"

    train_dataset = load_dataset("train", args.batch_size, args.num_epochs,
                                 modal_type, augment)
    validation_dataset = load_dataset("validation", args.batch_size,
                                      args.num_epochs, modal_type, augment)
    test_dataset = load_dataset("test", args.batch_size, args.num_epochs,
                                modal_type, augment)

    dt = str(datetime.today().strftime('%Y-%m-%d'))
    path_prefix = generate_path_prefix(args.model, args.batch_size,
                                       args.num_epochs, n_filters, multi_modal,
                                       augment, early_fusion, late_fusion,
                                       pe_block, dt)

    print_section("Creating model callbacks")
    callbacks = get_callbacks(path_prefix)

    print_section("Creating model on multiple GPUs")
    create_model(args.model, train_dataset, validation_dataset, test_dataset,
                 callbacks, args.batch_size, args.num_epochs, multi_modal,
                 augment, test_only, early_fusion, late_fusion, pe_block,
                 path_prefix)
        print("Test model has been loaded:", args.model_dir + '/best.h5 ...')
    else:
        if args.resume is True:
            model = load_model(args.model_dir + '/best.h5')
            print("Last best model has been loaded:",
                  args.model_dir + '/best.h5 ...')
        else:
            model = get_model(args=args)
            print("Model has been initialized ...")

    optimizer = get_optimizer(args=args)
    model.compile(loss=args.use_loss, optimizer=optimizer, metrics=['acc'])
    print(model.summary())
    if args.only_test is not True:
        train_generator, test_generator = get_data_generator(args=args)
        callbacks = get_callbacks(args=args)
        print("Start training process..")
        model.fit_generator(
            train_generator,
            steps_per_epoch=train_generator.samples // args.batch_size,
            validation_data=test_generator,
            validation_steps=test_generator.samples // args.batch_size,
            workers=args.num_workers,
            callbacks=callbacks,
            epochs=args.epochs,
        )
        model.save_weights(args.model_dir + '/final-weights.h5')
        model.save(args.model_dir + '/final.h5')
    else:
        _, test_generator = get_data_generator(args=args)
        pred = model.evaluate_generator(test_generator,
Пример #17
0
def run():
    warnings.filterwarnings("ignore")
    FP16_PARAMS = None  # dict(opt_level="O1") 

    with open('../configs/000_PANNsCNN14.yml', 'r') as yml:
        config = yaml.load(yml)
    # args = utils.get_parser().parse_args()  # コマンドライン引数で参照するyamlファイルを指定
    # config = utils.load_config(args.config)

    global_params = config["globals"]

    # outputディレクトリの設定
    output_dir = Path(global_params["output_dir"])
    output_dir.mkdir(exist_ok=True, parents=True)
    logger = utils.get_logger(output_dir / "output.log")  # log結果を格納

    utils.set_seed(global_params["seed"])  # seedの固定
    device = C.get_device(global_params["device"])  # CPU or GPU

    # df, datadir = C.get_metadata(config)  # original meta dataの取得
    df = C.get_resampled_metadata(config)  # resample meta dataの取得
    splitter = C.get_split(config)  # CV

    for i, (trn_idx, val_idx) in enumerate(splitter.split(df, y=df["ebird_code"])):
        if i not in global_params["folds"]:  # 0出なければとばす(1fold-cvを意味する)
            continue

        logger.info("=" * 20)
        logger.info(f"Fold {i}")
        logger.info("=" * 20)

        trn_df = df.loc[trn_idx, :].reset_index(drop=True)
        val_df = df.loc[val_idx, :].reset_index(drop=True)
        
        # train/valid2種類のdata loaderをdict型で作成
        loaders = {
            phase: C.get_loader(df_, config, phase)
            for df_, phase in zip([trn_df, val_df], ["train", "valid"])
        }

        # configファイルの情報をもとにそれぞれのフレームを作成
        model = models.get_model_for_train(config).to(device)
        model.att_block = models.AttBlock(2048, 264, activation='sigmoid')
        model.att_block.init_weights()
        criterion = C.get_criterion(config).to(device)
        optimizer = C.get_optimizer(model, config)
        scheduler = C.get_scheduler(optimizer, config)
        callbacks = clb.get_callbacks(config)

        # model, optimizer = amp.initialize(model, optimizer, opt_level='O1')

        # catalystのクラスに渡す
        runner = SupervisedRunner(
            device=device,
            input_key=global_params["input_key"],
            input_target_key=global_params["input_target_key"])

        runner.train(
            model=model,
            criterion=criterion,
            loaders=loaders,
            optimizer=optimizer,
            scheduler=scheduler,
            num_epochs=global_params["num_epochs"],
            verbose=True,
            logdir=output_dir / f"fold{i}",
            callbacks=callbacks,
            main_metric=global_params["main_metric"],
            minimize_metric=global_params["minimize_metric"],
            fp16=FP16_PARAMS)