コード例 #1
0
ファイル: get_weights.py プロジェクト: WN1695173791/examples
def get_weights(save_dir: Path, model_name: str, dtype: str) -> str:
    """Download pre-trained imagenet weights for model.

    Args:
        save_dir: Path to where checkpoint must be downloaded.
        model_name: Type of image classification model, must be one of
        ("GoogleNet", "InceptionV1", "MobileNet", "MobileNetV2", "NASNetMobile", "DenseNet121",
         "ResNet50", "Xception", "InceptionV3") in all lower case.
        dtype: Data type of the network.

    Returns: Path to checkpoint file.

    """
    if isinstance(save_dir, str):
        save_dir = Path(save_dir)
    g = tf.Graph()
    with tf.Session(graph=g) as sess:
        keras_backend.set_floatx(dtype)
        keras_backend.set_session(sess)
        if model_name == "mobilenet":
            MobileNet(weights='imagenet')
            saver = tf.train.Saver()
        elif model_name == "mobilenetv2":
            MobileNetV2(weights='imagenet')
            saver = tf.train.Saver()
        elif model_name == "nasnetmobile":
            NASNetMobile(weights='imagenet')
            saver = tf.train.Saver()
        elif model_name == "densenet121":
            DenseNet121(weights='imagenet')
            saver = tf.train.Saver()
        elif model_name == "resnet50":
            ResNet50(weights='imagenet')
            saver = tf.train.Saver()
        elif model_name == "xception":
            Xception(weights='imagenet')
            saver = tf.train.Saver()
        elif model_name == "inceptionv3":
            InceptionV3(weights='imagenet')
            saver = tf.train.Saver()
        elif model_name in ("googlenet", "inceptionv1"):
            tar_file = get_file(
                fname='inceptionv1_tar.gz',
                origin=
                'http://download.tensorflow.org/models/inception_v1_2016_08_28.tar.gz'
            )
            tar_file_reader = tarfile.open(tar_file)
            tar_file_reader.extractall(save_dir)
            if dtype == 'float16':
                saver = convert_ckpt_to_fp16(
                    Path(save_dir, 'inception_v1.ckpt').as_posix())
            sess.run(tf.global_variables_initializer())
        else:
            raise ValueError("""Requested model type = %s not one of
            ["GoogleNet", "InceptionV1", "MobileNet", "MobileNetV2", "NASNetMobile", "DenseNet121",
            "ResNet50", "Xception", "InceptionV3"].""" % model_name)
        save_dir.mkdir(parents=True, exist_ok=True)
        return saver.save(sess,
                          Path(save_dir, f"{model_name}.ckpt").as_posix())
コード例 #2
0
def main(argv=None):

    # Set test phase
    K.set_learning_phase(0)

    # Set float default
    K.set_floatx('float32')

    handle_train_dir(FLAGS.train_dir)

    _print_header()

    surgery()
コード例 #3
0
def main(argv=None):

    K.set_floatx('float32')

    print_flags(FLAGS)

    # Read or/and prepare test config dictionary
    if FLAGS.test_config_file:
        with open(FLAGS.test_config_file, 'r') as yml_file:
            test_config = yaml.load(yml_file, Loader=yaml.FullLoader)
    else:
        test_config = {}
    test_config = prepare_test_config(test_config, FLAGS)

    # Load model
    model = load_model(os.path.join(FLAGS.model))

    # Open HDF5 file containing the data set and get images and labels
    hdf5_file = h5py.File(FLAGS.data_file, 'r')
    images_tr, images_tt, labels_tr, labels_tt, _ = train_val_split(
            hdf5_file, FLAGS.group_tr, FLAGS.group_tt, FLAGS.chunk_size)

    # Test
    results_dict = test(images_tt, labels_tt, images_tr, labels_tr, model,
                        test_config, FLAGS.batch_size, FLAGS.chunk_size)

    # Print and write results
    if FLAGS.output_dir:

        if FLAGS.output_dir == '-1':
            FLAGS.output_dir = os.path.dirname(FLAGS.model)

        if FLAGS.append:
            write_mode = 'a'
        else:
            write_mode = 'w'

        if not os.path.exists(FLAGS.output_dir):
            os.makedirs(FLAGS.output_dir)
        output_file = os.path.join(FLAGS.output_dir,
                                   '{}.txt'.format(FLAGS.output_basename))
        write_test_results(results_dict, output_file, write_mode)
        output_file = os.path.join(FLAGS.output_dir, 
                                   '{}.yml'.format(FLAGS.output_basename))
        with open(output_file, write_mode) as f:
            results_dict = numpy_to_python(results_dict)
            yaml.dump(results_dict, f, default_flow_style=False)
    print_test_results(results_dict)

    # Close HDF5 File
    hdf5_file.close()
コード例 #4
0
def main(argv=None):

    # Set test phase
    K.set_learning_phase(0)

    # Set float default
    K.set_floatx('float32')

    # Create TF session and set as Keras backend session
    sess = tf.Session()
    K.set_session(sess)
    
    _print_flags()

    # Define output file
    if FLAGS.do_write:
        output_file = os.path.join(os.path.dirname(FLAGS.model),
                                   'advacc_' +
                                   os.path.basename(FLAGS.model) + '_' +
                                   os.path.basename(FLAGS.attack_params_file)\
                                                    .split('.')[0])
    else:
        output_file = None
    
    # Load model
    model = load_model(os.path.join(FLAGS.model))
    model = del_mse_nodes(model, verbose=1)
    model = ensure_softmax_output(model)
    
    # Load adversarial model
    if FLAGS.model_adv:
        model_adv = load_model(os.path.join(FLAGS.model_adv))
    else:
        model_adv = model

    # Open HDF5 file containing the data set and get images and labels
    hdf5_file = h5py.File(FLAGS.data_file, 'r')
    if (FLAGS.seed is not None) & (FLAGS.pct_test != 1.0):
        shuffle = True
    else: 
        shuffle = False
    images, labels, hdf5_aux = data_input.hdf52dask(hdf5_file, FLAGS.group, 
                                               FLAGS.chunk_size, shuffle, 
                                               FLAGS.seed, FLAGS.pct_test)

    # Load image parameters
    with open(FLAGS.image_params_file, 'r') as yml_file:
        train_image_params = yaml.load(yml_file, Loader=yaml.FullLoader)
    image_params_dict = data_input.validation_image_params(
            **train_image_params)

    # Load attack parameters
    with open(FLAGS.attack_params_file, 'r') as yml_file:
        attack_params_dict = yaml.load(yml_file, Loader=yaml.FullLoader)

    test_rep_orig(FLAGS.data_file, FLAGS.group, FLAGS.chunk_size,
                  FLAGS.batch_size, model, train_image_params, 
                  train_image_params, 1, None, [])

    test(images, labels, FLAGS.batch_size, model, model_adv, image_params_dict, 
         attack_params_dict, output_file)

    # Close HDF5 File
    hdf5_file.close()

    # Close and remove aux HDF5 files
    for f in hdf5_aux:
        filename = f.filename
        f.close()
        os.remove(filename)
コード例 #5
0
def main(argv=None):

    handle_train_dir(FLAGS.train_dir)

    # Print and write the flag arguments
    print_flags(FLAGS)
    write_flags(FLAGS)

    K.set_floatx('float32')

    # Read or/and prepare train config dictionary
    if FLAGS.train_config_file:
        with open(FLAGS.train_config_file, 'r') as f_yml:
            train_config = yaml.load(f_yml, Loader=yaml.FullLoader)
    else:
        train_config = {}
    train_config = prepare_train_config(train_config, FLAGS)
    train_config = dict2namespace(train_config)

    # Set tensorflow and numpy seeds (weights initialization)
    if train_config.seeds.tf:
        tf.set_random_seed(train_config.seeds.tf)
    np.random.seed(train_config.seeds.np)

    # Open HDF5 file containing the data set
    hdf5_file = h5py.File(train_config.data.data_file, 'r')
    num_examples, num_classes, image_shape = dataset_characteristics(
        hdf5_file, train_config.data.group_tr, train_config.data.labels_id)
    train_config.data.n_classes = num_classes
    train_config.data.image_shape = image_shape

    # Determine the train and validation sets
    images_tr, images_val, labels_tr, labels_val, aux_hdf5 = \
            train_val_split(hdf5_file,
                            train_config.data.group_tr,
                            train_config.data.group_val,
                            train_config.data.chunk_size,
                            train_config.data.pct_tr,
                            train_config.data.pct_val,
                            seed=train_config.seeds.train_val,
                            shuffle=train_config.data.shuffle_train_val,
                            labels_id=train_config.data.labels_id)
    train_config.data.n_train = images_tr.shape[0]
    train_config.data.n_val = images_val.shape[0]

    # Data augmentation parameters
    with open(train_config.daug.daug_params_file, 'r') as f_yml:
        daug_params_tr = yaml.load(f_yml, Loader=yaml.FullLoader)
        if (daug_params_tr['do_random_crop'] |
            daug_params_tr['do_central_crop']) & \
           (daug_params_tr['crop_size'] is not None):
            train_config.data.image_shape = daug_params_tr['crop_size']
    daug_params_tr['seed_daug'] = train_config.seeds.daug
    if train_config.daug.aug_per_img_val > 1:
        daug_params_val = daug_params_tr
        daug_params_val['seed_daug'] = train_config.seeds.daug
    else:
        daug_params_val = validation_image_params(train_config.daug.nodaug,
                                                  **daug_params_tr)
    train_config.daug.daug_params_tr = daug_params_tr
    train_config.daug.daug_params_val = daug_params_val

    # Adjust training parameters
    train_config = define_train_params(train_config,
                                       output_dir=FLAGS.train_dir)

    # Read invariance paramters
    if train_config.optimizer.invariance:
        with open(train_config.optimizer.daug_invariance_params_file,
                  'r') as f_yml:
            train_config.optimizer.daug_invariance_params = yaml.load(
                f_yml, Loader=yaml.FullLoader)
        with open(train_config.optimizer.class_invariance_params_file,
                  'r') as f_yml:
            train_config.optimizer.class_invariance_params = yaml.load(
                f_yml, Loader=yaml.FullLoader)

    # Get monitored metrics
    metrics, metric_names = handle_metrics(train_config.metrics)
    FLAGS.metrics = metric_names

    # Initialize the model
    model, model_cat, loss_weights = _model_setup(train_config, metrics,
                                                  FLAGS.resume_training)
    _model_print_save(model, FLAGS.train_dir)

    callbacks = _get_callbacks(train_config,
                               FLAGS.train_dir,
                               save_model_every=FLAGS.save_model_every,
                               track_gradients=FLAGS.track_gradients,
                               fmri_rdms=FLAGS.fmri_rdms,
                               loss_weights=loss_weights)

    # Write training configuration to disk
    output_file = os.path.join(
        FLAGS.train_dir,
        'train_config_' + time.strftime('%a_%d_%b_%Y_%H%M%S') + '.yml')
    with open(output_file, 'w') as f:
        yaml.dump(numpy_to_python(namespace2dict(train_config)),
                  f,
                  default_flow_style=False)

    # Initialize Training Progress Logger
    loggers = []
    if FLAGS.log_file_train:
        log_file = os.path.join(FLAGS.train_dir, FLAGS.log_file_train)
        loggers.append(
            TrainingProgressLogger(log_file, model, train_config, images_tr,
                                   labels_tr))
    if FLAGS.log_file_test:
        log_file = os.path.join(FLAGS.train_dir, FLAGS.log_file_test)
        loggers.append(
            TrainingProgressLogger(log_file, model, train_config, images_val,
                                   labels_val))

    # Train
    history, model = train(images_tr, labels_tr, images_val, labels_val, model,
                           model_cat, callbacks, train_config, loggers)

    # Save model
    model.save(os.path.join(FLAGS.train_dir, 'model_final'))

    # Test
    if FLAGS.test_config_file:
        with open(FLAGS.test_config_file, 'r') as f_yml:
            test_config = yaml.load(f_yml, Loader=yaml.FullLoader)
        test_config = prepare_test_config(test_config, FLAGS)

        test_results_dict = test(images_val, labels_val, images_tr, labels_tr,
                                 model, test_config,
                                 train_config.train.batch_size.val,
                                 train_config.data.chunk_size)

        # Write test results to YAML
        output_file = os.path.join(
            FLAGS.train_dir,
            'test_' + os.path.basename(FLAGS.test_config_file))
        with open(output_file, 'wb') as f:
            yaml.dump(numpy_to_python(test_results_dict),
                      f,
                      default_flow_style=False)

        # Write test results to TXT
        output_file = output_file.replace('yml', 'txt')
        write_test_results(test_results_dict, output_file)

        # Print test results
        print_test_results(test_results_dict)

    # Close and remove aux HDF5 files
    hdf5_file.close()
    for f in aux_hdf5:
        filename = f.filename
        f.close()
        os.remove(filename)