コード例 #1
0
ファイル: data.py プロジェクト: YanCote/IFT6268-simclr
    def _input_fn(params):
        """Inner input function."""
        preprocess_fn_pretrain = get_preprocess_fn(
            is_training, color_distort=True)
        preprocess_fn_finetune = get_preprocess_fn(
            is_training, color_distort=False)

        def map_fn(data_point):
            """Produces multiple transformations of the same batch."""
            image = data_point['image']
            label = data_point['label']
            if FLAGS.train_mode == 'pretrain' or FLAGS.train_mode == 'eval':
                xs = []
                for _ in range(2):  # Two transformations
                    xs.append(preprocess_fn_pretrain(image))
                image = tf.concat(xs, -1)
            else:
                image = preprocess_fn_finetune(image)
            # if FLAGS.train_mode != 'pretrain':
            if metric:
                return image, label, 1.0, data_point.get('idx')
            else:
                return image, label, 1.0  # , data_point.get('idx')
            # else:
            #     return image, label, 1.0, data_point.get('idx')

        def map_fn2(image, label, mask):
            return (image, {'labels':label, 'mask':mask})

        def map_fn3(image, label, mask, idx):
             return (image, {'labels':label, 'mask':mask, 'idx': idx})


        dataset, info = chest_xray.XRayDataSet(data_path, config=None, train=is_training)
        if FLAGS.cache_dataset:
            dataset = dataset.cache()
        if is_training:
            buffer_multiplier = 50 if FLAGS.image_size <= 32 else 10
            dataset = dataset.shuffle(params['batch_size'] * buffer_multiplier)
            dataset = dataset.repeat(-1)
        dataset = dataset.map(map_fn,
                              num_parallel_calls=tf.data.experimental.AUTOTUNE)
        dataset = dataset.batch(
            params['batch_size'], drop_remainder=is_training)

        if use_multi_gpus:
            dataset = pad_to_batch(dataset, params['batch_size'])
            if metric:
                dataset = dataset.map(map_fn3,
                            num_parallel_calls=tf.data.experimental.AUTOTUNE)
            else:
                dataset = dataset.map(map_fn2,
                            num_parallel_calls=tf.data.experimental.AUTOTUNE)
            return dataset
        else:
            images, labels, mask = tf.data.make_one_shot_iterator(
                dataset).get_next()
            return images, {'labels': labels, 'mask': mask}
コード例 #2
0
def compute_ssl_metric():
    # SSL Metric computing
    tf.compat.v1.disable_eager_execution()

    data_path = "/Users/yancote/mila/IFT6268-simclr/NIH"
    data_path = FLAGS.data_dir
    hub_path = os.path.abspath('/Users/yancote/mila/IFT6268-simclr/models/29-11-2020-01-56-45/hub')  # ("./r50_1x_sk0/hub")
    hub_path = str(Path(FLAGS.checkpoint_path) / 'hub')
    module = hub.Module(hub_path, trainable=False)

    sess = tf.compat.v1.Session()

    nb_of_patients = 100
    features = {}

    chest_xray_dataset = build_chest_xray_fn(True, data_path, None, False,metric=True)({'batch_size': 100}).prefetch(1)
    _, info = chest_xray.XRayDataSet(data_path, train=False)
    chest_xray_dataset_itr = tf.compat.v1.data.make_one_shot_iterator(chest_xray_dataset)
    x = chest_xray_dataset_itr.get_next()
    chest_xray_dataset_init = chest_xray_dataset_itr.make_initializer(chest_xray_dataset)
    with sess.as_default():
        sess.run(tf.compat.v1.global_variables_initializer())
        for step in range(10):
            # Keep a total of 200 patients
            if len(features) >= nb_of_patients:
                break
            x1, x2 = tf.split(x[0], 2, -1)
            feat1, feat2, idx_s = sess.run(fetches=(module(x1), module(x2), x[1].get('idx')))
            for i in range(feat1.shape[0]):
                if len(features) >= nb_of_patients:
                    break
                idx = idx_s[i].decode("utf-8").split("_")[0]
                # Only add a single example for each patient.
                if features.get(idx) is None:
                    features[idx] = []
                    features[idx].append(feat1[i])
                    features[idx].append(feat2[i])

    # Save hardwork
    output = os.path.abspath("./eval_ssl/model_out")
    Path(output).mkdir(parents=True, exist_ok=True)
    file_name = os.path.join(output, 'outputs_{}.pickle'.format("test"))
    print("Saving outputs in: {}".format(file_name))
    # with open(file_name, 'wb') as handle:
    #     pickle.dump(features, handle, protocol=pickle.HIGHEST_PROTOCOL)

    # Calculate mAP
    print("Calculating mAPs...")
    mAPs, quantiles = test_tools.test_mAP_outputs(epochs=[features], with_filepath=False)

    mAP_dict = {
        "P_mAP": mAPs[0][0],
        "P_mAP_var": mAPs[0][1],
        "P_mAP_var": mAPs[0][1],
        "P_mAP_quant_20p": quantiles[0][0],
        "P_mAP_median": quantiles[0][1],
        "P_mAP_quant_80p": quantiles[0][2]
    }
    pickle.dump(mAP_dict, open(os.path.join(FLAGS.checkpoint_path, 'mAP_result.p'), "wb"))
    print("mAP: {}, var: {}, quantiles 0.2: {}, median: {}, 0.8: {}".format(
            mAPs[0][0], mAPs[0][1], quantiles[0][0], quantiles[0][1], quantiles[0][2]), \
        file=open(os.path.join(FLAGS.checkpoint_path, 'mAP_result.txt'), 'w'))

    if mAPs is not None:
        print("\nResults:")
        print("mAP: {}, var: {}, quantiles 0.2: {}, median: {}, 0.8: {}".format(
            mAPs[0][0], mAPs[0][1], quantiles[0][0], quantiles[0][1], quantiles[0][2]))
コード例 #3
0
def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Too many command-line arguments.')

    if FLAGS.create_hub:
        app.run(create_module_from_checkpoints)
    else:

        if not os.path.exists(FLAGS.model_dir):
            os.makedirs(FLAGS.model_dir)
            print("Created directory: {0}".format(os.path.abspath(FLAGS.model_dir)))    
                
        # Enable training summary.
        if FLAGS.train_summary_steps > 0:
            tf.config.set_soft_device_placement(True)

        # Choose dataset. 
        if FLAGS.dataset == "chest_xray":
            # Not really a builder, but it's compatible
            # TODO config
            #data_path = FLAGS.local_tmp_folder
            data_path = FLAGS.data_dir
            data_split = FLAGS.train_data_split
            print(f"***********************************************************************************")
            print("")
            print(f"DANGER WARNING ON SPLIT -> XRAY Data split:{data_split} SHOULD BE 0.9")
            print("")
            print(f"***********************************************************************************")

            builder, info = chest_xray.XRayDataSet(data_path, config=None, train=True, return_tf_dataset=False, split=data_split)
            build_input_fn = partial(data_lib.build_chest_xray_fn, FLAGS.use_multi_gpus, data_path)
            num_train_examples = info.get('num_examples')
            num_classes = info.get('num_classes')
            num_eval_examples = info.get('num_eval_examples')
            print(f"num_train_examples:{num_train_examples}, num_eval_examples:{num_eval_examples}")
        else:
            #builder = tfds.builder(FLAGS.dataset, data_dir=FLAGS.data_dir)
            builder.download_and_prepare()
            num_train_examples = builder.info.splits[FLAGS.train_split].num_examples
            num_eval_examples = builder.info.splits[FLAGS.eval_split].num_examples
            num_classes = builder.info.features['label'].num_classes
            build_input_fn = data_lib.build_input_fn

        train_steps = model_util.get_train_steps(num_train_examples)
        eval_steps = int(math.ceil(num_eval_examples / FLAGS.eval_batch_size))
        epoch_steps = int(round(num_train_examples / FLAGS.train_batch_size))

        resnet.BATCH_NORM_DECAY = FLAGS.batch_norm_decay
        model = resnet.resnet_v1(
            resnet_depth=FLAGS.resnet_depth,
            width_multiplier=FLAGS.width_multiplier,
            cifar_stem=FLAGS.image_size <= 32)

        checkpoint_steps = (
            FLAGS.checkpoint_steps or (FLAGS.checkpoint_epochs * epoch_steps))

        cluster = None
        if FLAGS.use_tpu and FLAGS.master is None:
            if FLAGS.tpu_name:
                cluster = tf.distribute.cluster_resolver.TPUClusterResolver(
                    FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
            else:
                cluster = tf.distribute.cluster_resolver.TPUClusterResolver()
                tf.config.experimental_connect_to_cluster(cluster)
                tf.tpu.experimental.initialize_tpu_system(cluster)

        strategy = tf.distribute.MirroredStrategy() if not FLAGS.use_tpu and FLAGS.use_multi_gpus else None # Multi GPU?
        print("use_multi_gpus: {0}".format(FLAGS.use_multi_gpus))
        print("use MirroredStrategy: {0}".format(not FLAGS.use_tpu and FLAGS.use_multi_gpus))
        default_eval_mode = tf.estimator.tpu.InputPipelineConfig.PER_HOST_V1
        sliced_eval_mode = tf.estimator.tpu.InputPipelineConfig.SLICED
        run_config = tf.estimator.tpu.RunConfig(
            tpu_config=tf.estimator.tpu.TPUConfig(
                iterations_per_loop=checkpoint_steps,
                eval_training_input_configuration=sliced_eval_mode
                if FLAGS.use_tpu else default_eval_mode),
            model_dir=FLAGS.model_dir,
            train_distribute=strategy, 
            eval_distribute=strategy,
            save_summary_steps=checkpoint_steps,
            save_checkpoints_steps=checkpoint_steps,
            keep_checkpoint_max=FLAGS.keep_checkpoint_max,
            master=FLAGS.master,
            cluster=cluster)
        estimator = tf.estimator.tpu.TPUEstimator(
            model_lib.build_model_fn(model, num_classes, num_train_examples, FLAGS.train_batch_size),
            config=run_config,
            train_batch_size=FLAGS.train_batch_size,
            eval_batch_size=FLAGS.eval_batch_size,
            use_tpu=FLAGS.use_tpu)
            

        # save flags for this experiment

        pickle.dump(FLAGS.flag_values_dict(), open(os.path.join(FLAGS.model_dir, 'experiment_flags.p'), "wb"))
        FLAGS.append_flags_into_file(os.path.join(FLAGS.model_dir, 'experiment_flags.txt'))


        # Train/Eval
        if FLAGS.mode == 'eval':
            for ckpt in tf.train.checkpoints_iterator(
                    run_config.model_dir, min_interval_secs=15):
                try:
                    result = perform_evaluation(
                        estimator=estimator,
                        input_fn=build_input_fn(builder, False),
                        eval_steps=eval_steps,
                        model=model,
                        num_classes=num_classes,
                        checkpoint_path=ckpt)
                except tf.errors.NotFoundError:
                    continue
                if result['global_step'] >= train_steps:
                    return
        else:
            estimator.train(
                build_input_fn(builder, True), max_steps=train_steps)
            if FLAGS.mode == 'train_then_eval':
                perform_evaluation(
                    estimator=estimator,
                    input_fn=build_input_fn(builder, False),
                    eval_steps=eval_steps,
                    model=model,
                    num_classes=num_classes)
        # Save the Hub in all case
        # app.run(create_module_from_checkpoints)
        create_module_from_checkpoints(argv)

        # Compute SSL metric:
        if FLAGS.compute_ssl_metric:
            compute_ssl_metric()
コード例 #4
0
ファイル: finetuning.py プロジェクト: YanCote/IFT6268-simclr
def train(args, yml_config):
    with strategy.scope():

        # @title Load tensorflow datasets: we use tensorflow flower dataset as an examplegit
        batch_size = yml_config['finetuning']['batch']
        buffer_size = yml_config['finetuning']['buffer_size']

        # @title Load tensorflow datasets: we use tensorflow flower dataset as an example
        dataset_name = yml_config['data_src']

        if dataset_name == 'tf_flowers':
            tfds_dataset, tfds_info = tfds.load(dataset_name,
                                                split='train',
                                                with_info=True)
            num_images = tfds_info.splits['train'].num_examples
            num_classes = tfds_info.features['label'].num_classes

            x = tfds_dataset.map(_preprocess).batch(batch_size)
            x = tf1.data.make_one_shot_iterator(x).get_next()

        elif dataset_name == 'chest_xray':
            if args.xray_path == '':
                data_path = yml_config['dataset']['chest_xray']
            else:
                data_path = args.xray_path
            train_dataset, tfds_info = chest_xray.XRayDataSet(data_path,
                                                              config=None,
                                                              train=True)
            num_images = np.floor(
                yml_config['finetuning']['train_data_ratio'] *
                tfds_info['num_examples'])
            num_classes = tfds_info['num_classes']

        print(f"Training: {num_images} images...")

        def _preprocess(x):
            x['image'] = preprocess_image(x['image'],
                                          224,
                                          224,
                                          is_training=False,
                                          color_distort=False)
            return x

        x_ds = train_dataset \
            .take(num_images) \
            .map(_preprocess, deterministic=False) \
            .shuffle(buffer_size)\
            .batch(yml_config['finetuning']['batch'])

        x_iter = tf1.data.make_one_shot_iterator(x_ds)
        x_init = x_iter.make_initializer(x_ds)
        x = x_iter.get_next()

        print(f"{type(x)} {type(x['image'])} {x['image']} {x['label']}")
        # @title Load module and construct the computation graph
        learning_rate = yml_config['finetuning']['learning_rate']
        momentum = yml_config['finetuning']['momentum']
        weight_decay = yml_config['finetuning']['weight_decay']
        epoch_save_step = yml_config['finetuning']['epoch_save_step']
        load_saver = yml_config['finetuning'].get('load_ckpt')

        # Load the base network and set it to non-trainable (for speedup fine-tuning)
        hub_path = str(
            Path(yml_config['finetuning']['pretrained_build']).resolve())
        hub_path = os.path.join(hub_path, 'hub')
        module = hub.Module(hub_path,
                            trainable=yml_config['finetuning']['train_resnet'])

        if yml_config['finetuning']['pretrained_model'] == 'ChestXRay':
            key = module(inputs=x['image'],
                         signature="projection-head-1",
                         as_dict=True)
        else:
            key = module(inputs=x['image'], as_dict=True)

        # Attach a trainable linear layer to adapt for the new task.
        if dataset_name == 'tf_flowers':
            with tf1.variable_scope('head_supervised_new',
                                    reuse=tf1.AUTO_REUSE):
                logits_t = tf1.layers.dense(inputs=key['default'],
                                            units=num_classes,
                                            name='proj_head')
            loss_t = tf1.reduce_mean(
                input_tensor=tf1.nn.softmax_cross_entropy_with_logits(
                    labels=tf1.one_hot(x['label'], num_classes),
                    logits=logits_t))
        elif dataset_name == 'chest_xray':
            with tf1.variable_scope('head_supervised_new',
                                    reuse=tf1.AUTO_REUSE):
                logits_t = tf1.layers.dense(inputs=key['default'],
                                            units=num_classes)
                cross_entropy = weighted_cel(labels=x['label'],
                                             logits=logits_t,
                                             bound=3.0)
                loss_t = tf1.reduce_mean(tf1.reduce_sum(cross_entropy, axis=1))

        # Setup optimizer and training op.
        if yml_config['finetuning']['optimizer'] == 'adam':
            optimizer = tf1.train.AdamOptimizer(learning_rate)
        elif yml_config['finetuning']['optimizer'] == 'lars':
            optimizer = LARSOptimizer(learning_rate,
                                      momentum=momentum,
                                      weight_decay=weight_decay,
                                      exclude_from_weight_decay=[
                                          'batch_normalization', 'bias',
                                          'head_supervised'
                                      ])
        else:
            raise RuntimeError("Optimizer not supported")

        variables_to_train = tf1.trainable_variables()
        train_op = optimizer.minimize(
            loss_t,
            global_step=tf1.train.get_or_create_global_step(),
            var_list=variables_to_train)

        print('Variables to train:', variables_to_train)

        # Add ops to save and restore all the variables.
        sess = tf1.Session()
        Saver = tf1.train.Saver()  # Default saves all variables
        current_time = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
        directory = Path(args.output_dir)

        is_time_to_save_session = partial(model_ckpt.save_session,
                                          epoch_save_step,
                                          Saver,
                                          output=directory)
        if load_saver is not None:
            Saver.restore(sess, load_saver)
        else:
            sess.run(tf1.global_variables_initializer())

        # @title We fine-tune the new *linear layer* for just a few iterations.
        epochs = yml_config['finetuning']['epochs']

        # ===============Tensor board section ===============
        # with tf.name_scope('performance'):
        # tf_labels = tf1.placeholder(tf.int32, shape=[batch_size,num_classes], name='accuracy')
        tf_tot_acc_all_ph = tf1.placeholder(tf.float32,
                                            shape=None,
                                            name='accuracy_all_labels_ph')
        tf_tot_acc_all_summary = tf1.summary.scalar('accuracy_all_labels',
                                                    tf_tot_acc_all_ph)
        tf_tot_acc_per_class_ph = tf1.placeholder(tf.float32,
                                                  shape=None,
                                                  name='accuracy_per_class_ph')
        tf_tot_acc_per_class_summary = tf1.summary.scalar(
            'accuracy_per_class', tf_tot_acc_per_class_ph)
        tf_tot_acc_class_avg_ph = tf1.placeholder(
            tf.float32, shape=None, name='accuracy_per_class_averaged_ph')
        tf_tot_acc_class_avg_summary = tf1.summary.scalar(
            'accuracy_per_class_averaged', tf_tot_acc_class_avg_ph)
        tf_train_tot_loss_ph = tf1.placeholder(tf.float32,
                                               shape=None,
                                               name='train_tot_loss')
        tf_train_tot_loss_summary = tf1.summary.scalar('train_tot_loss',
                                                       tf_train_tot_loss_ph)
        tf_tot_auc_ph = tf1.placeholder(tf.float32, shape=None, name='auc_ph')
        tf_tot_auc_ph_summary = tf1.summary.scalar('auc', tf_tot_auc_ph)

        performance_summaries = tf1.summary.merge([
            tf_tot_acc_all_summary, tf_tot_acc_class_avg_summary,
            tf_train_tot_loss_summary, tf_tot_auc_ph_summary
        ])

        hyper_param = []
        print(
            f"yml_config[pretrained_build]= {yml_config['finetuning']['pretrained_build']} "
        )
        for item in yml_config['finetuning']:
            hyper_param.append(
                tf1.summary.text(
                    str(item),
                    tf.constant(str(yml_config['finetuning'][item])),
                    'HyperParam'))

        summ_writer = tf1.summary.FileWriter(directory / 'tb', sess.graph)
        tf.summary.record_if(yml_config['tensorboard'])
        # Limit the precision of floats...
        np.set_printoptions(formatter={'float': '{: 0.3f}'.format})
        with sess.as_default() as scope:
            if yml_config['mlflow']:

                # log params in MLFLOW
                if args.mlflow_dir is None:
                    mlflow.set_tracking_uri(yml_config['mlflow_path'])
                else:
                    mlflow.set_tracking_uri(args.mlflow_dir)

                mlflow.set_experiment('results')
                mlflow.start_run()
                # open pickle file that contains the hyper params of pretuned
                fname = os.path.join(
                    yml_config['finetuning']['pretrained_build'],
                    'experiment_flags.p')
                if os.path.exists(fname):
                    with open(fname, 'rb') as f:
                        pretuned_params = pickle.load(f)
                    pretuned_params = {
                        'P-' + str(key).replace('/', '').replace(
                            '?', '').replace('$', ''): val
                        for key, val in pretuned_params.items()
                    }
                    mlflow.log_params(pretuned_params)

                # open pickle file that contains the hyper params of pretuned
                fname = os.path.join(
                    yml_config['finetuning']['pretrained_build'],
                    'mAP_result.p')
                if os.path.exists(fname):
                    with open(fname, 'rb') as f:
                        pretuned_metric = pickle.load(f)
                    mlflow.log_metrics(pretuned_metric)

                finetuned_params = {
                    'F-' + str(key).replace('/', ''): val
                    for key, val in yml_config['finetuning'].items()
                }

                mlflow.log_param('TB_Timestamp', current_time)

                mlflow.log_params(finetuned_params)

            fname = os.path.join(directory, 'finetuning_hyper_params.txt')
            with open(fname, 'w') as f:
                for key, value in yml_config['finetuning'].items():
                    f.write('%s:%s\n' % (key, value))

            writer = tf1.summary.FileWriter('./log', sess.graph)
            for index, summary_op in enumerate(hyper_param):
                text = sess.run(summary_op)
                summ_writer.add_summary(text, index)

            n_iter = int(num_images / batch_size)
            print(f"Batch:{batch_size}, n_iter:{n_iter} ")

            # =============== Main Loop (epoch) - START ===============
            for it in range(epochs):
                start_time_epoch = time.time()
                # Init dataset iterator
                sess.run(x_init)
                # Accuracy all = All class must be Correct
                # Accuracy per class = Score for each class
                # Accuracy class average: the average of the accuracy per class
                tot_acc_all = 0.0
                tot_acc_per_class = 0.0
                tot_acc_class_avg = 0.0
                train_tot_loss = 0.0
                epoch_acc_all = 0.0
                epoch_acc_per_class = 0.0
                epoch_acc_class_avg = 0.0
                #show_one_image(x['image'][0].eval())

                # =============== Main Loop (iteration) - START ===============
                all_labels = []
                all_logits = []
                for step in range(n_iter):

                    start_time_iter = time.time()
                    _, loss, image, logits, labels = sess.run(
                        fetches=(train_op, loss_t, x['image'], logits_t,
                                 x['label']))
                    # tf_labels = tf.convert_to_tensor(labels)
                    train_tot_loss += loss
                    all_labels.extend(labels)
                    if dataset_name == 'tf_flowers':
                        pred = logits.argmax(-1)
                        correct = np.sum(pred == labels)
                        acc_per_class = np.array([correct / float(batch_size)])
                    elif dataset_name == 'chest_xray':
                        # # New compute
                        logits_sig = scipy.special.expit(logits)
                        all_logits.extend(logits_sig)
                        pred = (logits_sig > 0.5).astype(np.float32)
                        acc_all = np.mean(
                            np.min(np.equal(pred, labels).astype(np.float32),
                                   axis=1))
                        acc_per_class = np.mean(np.equal(pred, labels).astype(
                            np.float32),
                                                axis=0)
                        acc_class_avg = np.mean(acc_per_class)
                        tot_acc_all += acc_all
                        tot_acc_per_class += acc_per_class
                        tot_acc_class_avg += acc_class_avg

                    #The function roc_auc_score can result in a error (ValueError: Only one class present in y_true.
                    # ROC AUC score is not defined in that) . The error occurred when each label has only one class
                    # in the batch. For example, if all the samples in the batch has hernia +1, the error will occurred.I
                    try:
                        auc_cum = roc_auc_score(np.array(all_labels),
                                                np.array(all_logits))
                    except:
                        auc_cum = None

                    current_time_iter = time.time()
                    elapsed_time_iter = current_time_iter - start_time_iter

                    if yml_config['finetuning']['verbose_train_loop']:
                        print(
                            f"[Epoch {it + 1}/{epochs} Iter: {step}/{n_iter}] Model: {yml_config['finetuning']['pretrained_model']}, Total Loss: {train_tot_loss} Loss: {np.float32(loss)}"  # Batch Acc: {np.float32(acc_all)} "
                            f" AUC Cumulative: {auc_cum}")
                        print(f"Finished iteration:{step} in: " +
                              str(int(elapsed_time_iter)) + " sec")

                    # break if logits explose
                    if np.isnan(np.sum(logits)):
                        print(f"Loss has exploded: Nan")
                        break

                epoch_acc_all = (tot_acc_all / n_iter)
                epoch_acc_per_class = (tot_acc_per_class / n_iter)
                epoch_acc_class_avg = (tot_acc_class_avg / n_iter)

                try:
                    epoch_auc = roc_auc_score(np.array(all_labels),
                                              np.array(all_logits),
                                              average=None)
                    epoch_auc_mean = epoch_auc.mean()
                    aucs = dict(zip(chest_xray.XR_LABELS.keys(), epoch_auc))
                    auc_scores = {
                        'AUC ' + str(key): val
                        for key, val in aucs.items()
                    }

                except:
                    epoch_auc = None
                    epoch_auc_mean = None

                print(
                    f"[Epoch {it + 1}/{epochs} Model: {yml_config['finetuning']['pretrained_model']}, Loss: {train_tot_loss} "
                    f" Train AUC: {epoch_auc_mean} AOC/Class {epoch_auc},")

                # Is it time to save the session?
                is_time_to_save_session(it, sess)

                current_time_epoch = time.time()
                elapsed_time_iter = current_time_epoch - start_time_epoch
                print(f"Finished EPOCH:{it + 1} in: " +
                      str(int(elapsed_time_iter)) + " sec")

                # ===================== Write Tensorboard summary ===============================
                # Execute the summaries defined above

                summ = sess.run(performance_summaries,
                                feed_dict={
                                    tf_tot_acc_all_ph: epoch_acc_all,
                                    tf_tot_acc_class_avg_ph:
                                    epoch_acc_class_avg,
                                    tf_train_tot_loss_ph: train_tot_loss,
                                    tf_tot_auc_ph: epoch_auc_mean
                                })

                # Write the obtained summaries to the file, so it can be displayed in the TensorBoard
                summ_writer.add_summary(summ, it)

                # =============== Main Loop (epoch) - END ===============

            print(f"Training Done")

            if yml_config['mlflow']:
                mlflow.log_metric('Total Train Accuracy', epoch_acc_all)
                mlflow.log_metric('Total Train Accuracy per class',
                                  np.mean(epoch_acc_per_class))
                mlflow.log_metric('Total Train Loss', train_tot_loss)
                if epoch_auc is not None:
                    mlflow.log_metrics(auc_scores)

            fname_final = str(directory / f'final.ckpt')
            ckpt_pt = Saver.save(sess=sess, save_path=fname_final)
            print(f"Final Chekpoint Saved in {fname_final}")
            return directory
コード例 #5
0
def evaluation(yml_config, args, module_path=None):

    if module_path is None:
        hub_path = os.path.abspath(
            yml_config['inference']['pretrained_hub_path'])
    else:
        hub_path = module_path
    module = hub.Module(hub_path, trainable=False)
    sess = tf1.Session()

    #TO DO  a verifier quon prend le test data setls
    if args.xray_path == '':
        data_path = yml_config['dataset']['chest_xray']
    else:
        data_path = args.xray_path
    test_dataset, tfds_info = chest_xray.XRayDataSet(data_path,
                                                     config=None,
                                                     train=False)
    num_images = tfds_info['num_eval_examples']
    num_images = np.floor(yml_config['finetuning']['eval_data_ratio'] *
                          tfds_info['num_eval_examples'])
    assert yml_config['finetuning']['eval_data_ratio']
    num_classes = tfds_info['num_classes']
    batch_size = yml_config['inference']['batch']

    n_iter = int(num_images / batch_size)

    def _preprocess(x):
        x['image'] = preprocess_image(x['image'],
                                      224,
                                      224,
                                      is_training=False,
                                      color_distort=False)
        return x

    x_ds = test_dataset \
        .take(num_images) \
        .map(_preprocess, deterministic=False) \
        .batch(batch_size)\

    x_iter = tf1.data.make_one_shot_iterator(x_ds)
    x_init = x_iter.make_initializer(x_ds)
    x = x_iter.get_next()

    key = module(x['image'], as_dict=True)
    #cross_entropy = tf.nn.weighted_cross_entropy_with_logits(labels=x['label'], logits=key['default'],  pos_weight=yml_config['finetuning']['pos_weight_loss'])
    cross_entropy = weighted_cel(labels=x['label'],
                                 logits=key['default'],
                                 bound=3.0)
    loss = tf1.reduce_mean(tf1.reduce_sum(cross_entropy, axis=1))

    with sess.as_default():
        init = tf.compat.v1.global_variables_initializer()
        sess.run(init)
        all_labels = []
        all_logits = []
        val_tot_loss = 0
        for step in range(n_iter):
            _loss, logits, labels = sess.run(fetches=(loss, key['default'],
                                                      x['label']))
            logits_sig = scipy.special.expit(logits)
            all_logits.extend(logits_sig)
            all_labels.extend(labels)
            val_tot_loss += _loss

            try:
                auc_cum = roc_auc_score(np.array(all_labels),
                                        np.array(all_logits))
            except:
                auc_cum = None

            #if yml_config['finetuning']['verbose_train_loop']:
            print(
                f" [Iter: {step}/{n_iter}] Total Loss: {val_tot_loss} Loss: {np.float32(_loss)}  AUC Cumulative: {auc_cum}"
            )

        val_tot_loss_mean = val_tot_loss / n_iter
        try:
            epoch_auc = roc_auc_score(np.array(all_labels),
                                      np.array(all_logits),
                                      average=None)
            epoch_auc_mean = epoch_auc.mean()
            aucs = dict(zip(chest_xray.XR_LABELS.keys(), epoch_auc))
            auc_scores = {
                'T-AUC ' + str(key): val
                for key, val in aucs.items()
            }

        except:
            epoch_auc = None
            epoch_auc_mean = None

        if yml_config['mlflow']:
            mlflow.log_metric('Total Test Loss', val_tot_loss)
            mlflow.log_metric('Avg Test Loss', val_tot_loss_mean)

            if epoch_auc is not None:
                mlflow.log_metrics(auc_scores)
                mlflow.log_metric('Avg Test AUC', epoch_auc_mean)

        print(
            f"Validation Done! Model: {yml_config['finetuning']['pretrained_model']}, Total Loss: {val_tot_loss}, Mean Loss: {val_tot_loss_mean},"
            f" Validation AUC: {epoch_auc_mean} AOC/Class {epoch_auc},")
コード例 #6
0
ファイル: inference.py プロジェクト: YanCote/IFT6268-simclr
if dataset_name == 'tf_flowers':
    tfds_dataset, tfds_info = tfds.load(
        dataset_name, split='train', with_info=True)
    num_images = tfds_info.splits['train'].num_examples
    num_classes = tfds_info.features['label'].num_classes
    x = tfds_dataset.map(_preprocess).batch(batch_size)
    x = tf.data.make_one_shot_iterator(x).get_next()

elif dataset_name == 'chest_xray':
    if args.xray_path == '':
        data_path = yml_config['dataset']['chest_xray']
    else:
        data_path = args.xray_path
    train_dataset, tfds_info = chest_xray.XRayDataSet(data_path,
                                                      train_ratio=yml_config['finetuning']['train_data_ratio'],
                                                      config=None, train=True)
    num_images = tfds_info['num_examples']
    num_classes = tfds_info['num_classes']
    print(f"Training: {num_images} images...")

    x_ds = train_dataset \
        .take(num_images) \
        .map(_preprocess, deterministic=False) \
        .batch(batch_size) \

    x_iter = tf.compat.v1.data.make_one_shot_iterator(x_ds)
    x_init = x_iter.make_initializer(x_ds)
    x = x_iter.get_next()
    print(f"{type(x)} {type(x['image'])} {x['image']} {x['label']}")
コード例 #7
0
def evaluation(yml_config, module_path=None, FLAGS=None):
    tf1.keras.backend.clear_session()
    sess = tf1.Session()
    data_path = FLAGS.xray_path
    test_dataset, tfds_info = chest_xray.XRayDataSet(data_path, config=None, train=False)
    num_images = tfds_info['num_eval_examples']
    num_images = np.floor(yml_config['finetuning']['eval_data_ratio'] * tfds_info['num_eval_examples'])
    assert yml_config['finetuning']['eval_data_ratio']
    num_classes = tfds_info['num_classes']
    batch_size = yml_config['inference']['batch']

    n_iter = int(num_images / batch_size)

    def _preprocess(x):
        x['image'] = preprocess_image(
            x['image'], 224, 224, is_training=False, color_distort=False)
        return x

    x_ds = test_dataset \
        .take(num_images) \
        .map(_preprocess, deterministic=False) \
        .batch(batch_size)\
    

    x_iter = tf1.data.make_one_shot_iterator(x_ds)
    x_init = x_iter.make_initializer(x_ds)
    x = x_iter.get_next()
    resnet.BATCH_NORM_DECAY = FLAGS.batch_norm_decay
    model = resnet.resnet_v1(resnet_depth=50,width_multiplier=1,cifar_stem=224 <= 32)


    key = model(x['image'], False)
    
    # Attach a trainable linear layer to adapt for the new task.
    with tf1.variable_scope('head_supervised_new', reuse=tf1.AUTO_REUSE):
        logits_t = tf1.layers.dense(inputs=key, units=14)
    
    cross_entropy = weighted_cel(labels=x['label'], logits=logits_t, bound = 3.0)
    loss_t = tf1.reduce_mean(tf1.reduce_sum(cross_entropy, axis=1))

    sess = tf1.Session()
    Saver = tf1.train.Saver() # Default saves all variables
    load_saver = module_path
    Saver.restore(sess, load_saver)

    with sess.as_default():
        all_labels = []
        all_logits = []
        val_tot_loss = 0
        for step in range(n_iter): 
            _loss, logits, labels = sess.run(fetches=(loss_t, logits_t, x['label']))
            logits_sig = scipy.special.expit(logits)
            all_logits.extend(logits_sig)
            all_labels.extend(labels)
            val_tot_loss += _loss

            try:
                auc_cum = roc_auc_score(np.array(all_labels),np.array(all_logits))
            except:
                auc_cum = None

            print(f" [Iter: {step}/{n_iter}] Total Loss: {val_tot_loss} Loss: {np.float32(_loss)}  AUC Cumulative: {auc_cum}")


        val_tot_loss_mean = val_tot_loss / n_iter
        try:
            epoch_auc = roc_auc_score(np.array(all_labels),np.array(all_logits), average=None)
            epoch_auc_mean = epoch_auc.mean()
            aucs = dict(zip(chest_xray.XR_LABELS.keys(),epoch_auc ))
            auc_scores = {'T-AUC ' + str(key): val for key, val in aucs.items()}

        except:
            epoch_auc= None
            epoch_auc_mean= None
        
        if yml_config['mlflow']:
            mlflow.log_metric('Total Test Loss',val_tot_loss)
            mlflow.log_metric('Avg Test Loss',val_tot_loss_mean )

            if epoch_auc is not None:
                mlflow.log_metrics(auc_scores)
                mlflow.log_metric('Avg Test AUC', epoch_auc_mean)


        print(f"Validation Done! Model: {yml_config['finetuning']['pretrained_model']}, Total Loss: {val_tot_loss}, Mean Loss: {val_tot_loss_mean},"
                f" Validation AUC: {epoch_auc_mean} AOC/Class {epoch_auc},")