Exemple #1
0
def train_net():

    # Create a new train directory, where to save all.log and config.json
    #H['output_dir'] = 'output_dir/train_dir/%s' % datetime.now().strftime('%Y_%m_%d_%H.%M')
    if not tf.gfile.Exists(H['output_dir']):
        tf.gfile.MakeDirs(H['output_dir'])
    with open(H['output_dir'] + '/config.json', 'w') as conf_file:
        json.dump(H, conf_file, indent=4)

    shutil.copy('../tf_scripts/architecture/model_def/resnet2D.py',
                H['output_dir'] + '/resnet2D.py')
    shutil.copytree('../tf_scripts/architecture/',
                    H['output_dir'] + '/architecture')
    shutil.copytree('../tf_scripts/io_modules/',
                    H['output_dir'] + '/io_modules')

    initialize_logger(folder=H['output_dir'])

    with tf.Graph().as_default(), tf.device('/cpu:0'):

        train_data_iter = List_Iterator(
            H,
            img_lst=H['train_lst'],
            img_shape=H['image_shape'],
            label_shape=H['label_shape'],
            batch_size=H['batch_size'],
            num_preprocess_threads=4,
            shuffle=True,
            is_training=True,
        )

        valid_data_iter = List_Iterator(
            H,
            img_lst=H['val_lst'],
            img_shape=H['image_shape'],
            label_shape=H['label_shape'],
            batch_size=H['batch_size'],
            num_preprocess_threads=4,
            shuffle=True,
            is_training=False,
        )

        model = resnet2D

        update_scopes = []
        #update_scopes.append('logits')

        # Loss operations

        loss_op = metrics.logloss

        # Additional Evaluation metrics
        metric_ops = None  #metrics.logloss_rank

        H['train_image_summary'] = image_summaries.classification_image_summary
        H['validation_image_summary'] = image_summaries.classification_image_summary

        H['model_graph'] = model
        H['loss'] = loss_op
        H['metrics'] = metric_ops
        H['train_scopes'] = update_scopes
        H['train_iter'] = train_data_iter
        H['valid_iter'] = valid_data_iter
        H['VARIABLES_TO_RESTORE'] = tf.contrib.slim.get_variables_to_restore()
        H['UPDATE_OPS_COLLECTION'] = tf.GraphKeys.UPDATE_OPS

        args = []
        train(*args, **H)
Exemple #2
0
def train_net():

    # Create a new train directory, where to save all.log and config.json
    #H['output_dir'] = 'output_dir/train_dir/%s' % datetime.now().strftime('%Y_%m_%d_%H.%M')
    if not tf.gfile.Exists(H['output_dir']):
        tf.gfile.MakeDirs(H['output_dir'])
    with open(H['output_dir'] + '/config.json', 'w') as conf_file:
        json.dump(H, conf_file, indent=4)
    initialize_logger(folder=H['output_dir'])

    shutil.copy('../tf_scripts/architecture/model_def/unet.py',
                H['output_dir'] + '/unet.py')
    shutil.copytree('../tf_scripts/architecture/',
                    H['output_dir'] + '/architecture')
    shutil.copytree('../tf_scripts/io_modules/',
                    H['output_dir'] + '/io_modules')

    with tf.Graph().as_default(), tf.device('/cpu:0'):

        train_data_iter = NPY_construct_Iter(
            H,
            img_lst=H['train_lst'],
            npy_lst=H['train_npy_path'],
            img_shape=H['image_shape'],
            label_shape=H['label_shape'],
            batch_size=H['batch_size'],
            num_preprocess_threads=4,
            shuffle=True,
            is_training=True,
        )

        valid_data_iter = NPY_construct_Iter(
            H,
            img_lst=H['val_lst'],
            npy_lst=H['val_npy_path'],
            img_shape=H['image_shape'],
            label_shape=H['label_shape'],
            batch_size=H['batch_size'],
            num_preprocess_threads=4,
            shuffle=True,
            is_training=False,
        )

        model = unet_model.unet

        update_scopes = []
        #update_scopes.append('logits')

        # Loss operations
        # here correct LOSS is to choose
        #   - jaccard_separated_channels_with_center
        #   - logloss_with_center
        #   - MSE_segmentation_with_center
        loss_op = metrics.MSE_segmentation_with_center

        # Additional Evaluation metrics
        metric_ops = metrics.MSE_and_sensitivity_on_center

        H['train_image_summary'] = image_summaries.segmentation_image_summary
        H['validation_image_summary'] = image_summaries.segmentation_image_summary

        H['model_graph'] = model
        H['loss'] = loss_op
        H['metrics'] = metric_ops
        H['train_scopes'] = update_scopes
        H['train_iter'] = train_data_iter
        H['valid_iter'] = valid_data_iter
        H['VARIABLES_TO_RESTORE'] = tf.contrib.slim.get_variables_to_restore()
        H['UPDATE_OPS_COLLECTION'] = tf.GraphKeys.UPDATE_OPS

        args = []
        train(*args, **H)