Beispiel #1
0
def main():
    arg_parser = train_app_MTMC.create_default_argument_parser("DukeMTMC-reID")
    arg_parser.add_argument("--dataset_dir",
                            help="Path to DukeMTMC dataset directory.",
                            default="./DukeMTMC-reID/DukeMTMC-reID")
    #print("######################arg_parser",arg_parser)
    arg_parser.add_argument(
        "--sdk_dir",
        help="Path to DukeMTMC baseline evaluation software.",
        default="resources/Market-1501-v15.09.15-baseline")
    args = arg_parser.parse_args()
    dataset = DukeMTMC(args.dataset_dir, num_validation_y=0.1, seed=1234)

    if args.mode == "train":
        train_x, train_y, _ = dataset.read_train()
        print("Train set size: %d images, %d identities" %
              (len(train_x), len(np.unique(train_y))))
        print("##############################", train_x[0])

        network_factory = net.create_network_factory(
            is_training=True,
            num_classes=MTMC.MAX_LABEL + 1,
            add_logits=args.loss_mode == "cosine-softmax")
        train_kwargs = train_app_MTMC.to_train_kwargs(args)
        train_app_MTMC.train_loop(net.preprocess,
                                  network_factory,
                                  train_x,
                                  train_y,
                                  num_images_per_id=4,
                                  image_shape=MTMC.IMAGE_SHAPE,
                                  **train_kwargs)
    elif args.mode == "eval":
        valid_x, valid_y, camera_indices = dataset.read_validation()
        print("Validation set size: %d images, %d identities" %
              (len(valid_x), len(np.unique(valid_y))))

        network_factory = net.create_network_factory(
            is_training=False,
            num_classes=MTMC.MAX_LABEL + 1,
            add_logits=args.loss_mode == "cosine-softmax")
        eval_kwargs = train_app_MTMC.to_eval_kwargs(args)
        train_app_MTMC.eval_loop(net.preprocess,
                                 network_factory,
                                 valid_x,
                                 valid_y,
                                 camera_indices,
                                 image_shape=MTMC.IMAGE_SHAPE,
                                 **eval_kwargs)
    elif args.mode == "export":
        # Export one specific model.
        gallery_filenames, _, query_filenames, _, _ = dataset.read_test()

        network_factory = net.create_network_factory(
            is_training=False,
            num_classes=MTMC.MAX_LABEL + 1,
            add_logits=False,
            reuse=None)
        gallery_features = train_app_MTMC.encode(net.preprocess,
                                                 network_factory,
                                                 args.restore_path,
                                                 gallery_filenames,
                                                 image_shape=MTMC.IMAGE_SHAPE)
        sio.savemat(os.path.join(args.sdk_dir, "feat_test.mat"),
                    {"features": gallery_features})

        network_factory = net.create_network_factory(
            is_training=False,
            num_classes=MTMC.MAX_LABEL + 1,
            add_logits=False,
            reuse=True)
        query_features = train_app_MTMC.encode(net.preprocess,
                                               network_factory,
                                               args.restore_path,
                                               query_filenames,
                                               image_shape=MTMC.IMAGE_SHAPE)
        sio.savemat(os.path.join(args.sdk_dir, "feat_query.mat"),
                    {"features": query_features})
    elif args.mode == "finalize":
        network_factory = net.create_network_factory(
            is_training=False,
            num_classes=MTMC.MAX_LABEL + 1,
            add_logits=False,
            reuse=None)
        train_app_MTMC.finalize(functools.partial(net.preprocess,
                                                  input_is_bgr=True),
                                network_factory,
                                args.restore_path,
                                image_shape=MTMC.IMAGE_SHAPE,
                                output_filename="./MTMC.ckpt")
    elif args.mode == "freeze":
        network_factory = net.create_network_factory(
            is_training=False,
            num_classes=MTMC.MAX_LABEL + 1,
            add_logits=False,
            reuse=None)
        train_app_MTMC.freeze(functools.partial(net.preprocess,
                                                input_is_bgr=True),
                              network_factory,
                              args.restore_path,
                              image_shape=MTMC.IMAGE_SHAPE,
                              output_filename="./MTMC.pb")
    else:
        raise ValueError("Invalid mode argument.")
def main():
    # output to file
    this_time = time.strftime('%Y-%m-%dT%H%M%S')
    sys.stdout = open(this_time + 'sdd.txt', 'w')
    sys.stderr = open(this_time + 'sdd.log', 'w')
    arg_parser = train_app.create_default_argument_parser("sdd")
    arg_parser.add_argument("--dataset_dir",
                            help="Path to Stanford drone dataset directory.",
                            default="../StanfordDroneDataset")
    arg_parser.add_argument("--sdk_dir",
                            help="Path to sdd baseline evaluation software.",
                            default="resources/StanfordDroneDataset-baseline")
    arg_parser.add_argument(
        "--annotation_file_name",
        help="Path to Stanford drone dataset annotation file.",
        default="/annotations/nexus/video0/annotations.txt")
    arg_parser.add_argument("--video_file_name",
                            help="Path to Stanford drone dataset video file.",
                            default="/videos/nexus/video0/video.mov")
    args = arg_parser.parse_args()
    dataset = sdd_dataset(args.dataset_dir,
                          args.annotation_file_name,
                          args.video_file_name,
                          num_validation_y=0.1,
                          seed=1234)

    if args.mode == "train":
        train_x, train_y, _ = dataset.read_train()
        print("Train set size: %d images, %d identities" %
              (len(train_x), len(np.unique(train_y))))
        network_factory = net.create_network_factory(
            is_training=True,
            num_classes=sdd.calculate_max_label(dataset._dataset_dir +
                                                dataset._annotation_file_name)
            + 1,
            add_logits=args.loss_mode == "cosine-softmax")
        train_kwargs = train_app.to_train_kwargs(args)
        train_app.train_loop(net.preprocess,
                             network_factory,
                             train_x,
                             train_y,
                             num_images_per_id=4,
                             image_shape=sdd.IMAGE_SHAPE,
                             **train_kwargs)
    elif args.mode == "eval":
        valid_x, valid_y, camera_indices = dataset.read_validation()
        print("Validation set size: %d images, %d identities" %
              (len(valid_x), len(np.unique(valid_y))))

        network_factory = net.create_network_factory(
            is_training=False,
            num_classes=sdd.calculate_max_label(self._dataset_dir +
                                                self._annotation_file_name) +
            1,
            add_logits=args.loss_mode == "cosine-softmax")
        eval_kwargs = train_app.to_eval_kwargs(args)
        train_app.eval_loop(net.preprocess,
                            network_factory,
                            valid_x,
                            valid_y,
                            camera_indices,
                            image_shape=sdd.IMAGE_SHAPE,
                            **eval_kwargs)
    elif args.mode == "export":
        # Export one specific model.
        gallery_filenames, _, query_filenames, _, _ = dataset.read_test()

        network_factory = net.create_network_factory(
            is_training=False,
            num_classes=sdd.calculate_max_label(self._dataset_dir +
                                                self._annotation_file_name) +
            1,
            add_logits=False,
            reuse=None)
        gallery_features = train_app.encode(net.preprocess,
                                            network_factory,
                                            args.restore_path,
                                            gallery_filenames,
                                            image_shape=sdd.IMAGE_SHAPE)
        sio.savemat(os.path.join(args.sdk_dir, "feat_test.mat"),
                    {"features": gallery_features})

        network_factory = net.create_network_factory(
            is_training=False,
            num_classes=sdd.calculate_max_label(self._dataset_dir +
                                                self._annotation_file_name) +
            1,
            add_logits=False,
            reuse=True)
        query_features = train_app.encode(net.preprocess,
                                          network_factory,
                                          args.restore_path,
                                          query_filenames,
                                          image_shape=sdd.IMAGE_SHAPE)
        sio.savemat(os.path.join(args.sdk_dir, "feat_query.mat"),
                    {"features": query_features})
    elif args.mode == "finalize":
        network_factory = net.create_network_factory(
            is_training=False,
            num_classes=sdd.calculate_max_label(self._dataset_dir +
                                                self._annotation_file_name) +
            1,
            add_logits=False,
            reuse=None)
        train_app.finalize(functools.partial(net.preprocess,
                                             input_is_bgr=True),
                           network_factory,
                           args.restore_path,
                           image_shape=sdd.IMAGE_SHAPE,
                           output_filename="./sdd.ckpt")
    elif args.mode == "freeze":
        network_factory = net.create_network_factory(
            is_training=False,
            num_classes=sdd.calculate_max_label(self._dataset_dir +
                                                self._annotation_file_name) +
            1,
            add_logits=False,
            reuse=None)
        train_app.freeze(functools.partial(net.preprocess, input_is_bgr=True),
                         network_factory,
                         args.restore_path,
                         image_shape=sdd.IMAGE_SHAPE,
                         output_filename="./sdd.pb")
    else:
        raise ValueError("Invalid mode argument.")
Beispiel #3
0
def main():
    arg_parser = train_app.create_default_argument_parser("vric")
    arg_parser.add_argument("--dataset_dir",
                            help="Path to MARS dataset directory.",
                            default="resources/MARS-evaluation-master")
    args = arg_parser.parse_args()
    dataset = VRIC(args.dataset_dir, num_validation_y=0.1, seed=1234)

    if args.mode == "train":
        train_x, train_y, _ = dataset.read_train()
        print("Train set size: %d images, %d identities" %
              (len(train_x), len(np.unique(train_y))))

        network_factory = net.create_network_factory(
            is_training=True,
            num_classes=vric.MAX_LABEL + 1,
            add_logits=args.loss_mode == "cosine-softmax")
        train_kwargs = train_app.to_train_kwargs(args)
        train_app.train_loop(net.preprocess,
                             network_factory,
                             train_x,
                             train_y,
                             num_images_per_id=4,
                             image_shape=IMAGE_SHAPE,
                             **train_kwargs)
    elif args.mode == "eval":
        valid_x, valid_y, camera_indices = dataset.read_validation()
        print("Validation set size: %d images, %d identities" %
              (len(valid_x), len(np.unique(valid_y))))

        network_factory = net.create_network_factory(
            is_training=False,
            num_classes=vric.MAX_LABEL + 1,
            add_logits=args.loss_mode == "cosine-softmax")
        eval_kwargs = train_app.to_eval_kwargs(args)
        train_app.eval_loop(net.preprocess,
                            network_factory,
                            valid_x,
                            valid_y,
                            camera_indices,
                            image_shape=IMAGE_SHAPE,
                            num_galleries=20,
                            **eval_kwargs)
    elif args.mode == "export":
        filenames = dataset.read_test_filenames()

        network_factory = net.create_network_factory(
            is_training=False,
            num_classes=vric.MAX_LABEL + 1,
            add_logits=False,
            reuse=None)
        features = train_app.encode(net.preprocess,
                                    network_factory,
                                    args.restore_path,
                                    filenames,
                                    image_shape=IMAGE_SHAPE)
        sio.savemat(os.path.join(args.dataset_dir, "feat_test.mat"),
                    {"features": features})
    elif args.mode == "finalize":
        network_factory = net.create_network_factory(
            is_training=False,
            num_classes=vric.MAX_LABEL + 1,
            add_logits=False,
            reuse=None)
        train_app.finalize(functools.partial(net.preprocess,
                                             input_is_bgr=True),
                           network_factory,
                           args.restore_path,
                           image_shape=IMAGE_SHAPE,
                           output_filename="./vric.ckpt")
    elif args.mode == "freeze":
        network_factory = net.create_network_factory(
            is_training=False,
            num_classes=vric.MAX_LABEL + 1,
            add_logits=False,
            reuse=None)
        train_app.freeze(functools.partial(net.preprocess, input_is_bgr=True),
                         network_factory,
                         args.restore_path,
                         image_shape=IMAGE_SHAPE,
                         output_filename="./vric.pb")
    else:
        raise ValueError("Invalid mode argument.")
def main():
    arg_parser = train_app.create_default_argument_parser("veri")
    arg_parser.add_argument("--dataset_dir",
                            help="Path to the VeRi data",
                            default="./VeRi")
    #    arg_parser.add_argument(
    #        "--sdk_dir", help="Path to Market1501 baseline evaluation software.",
    #        default="resources/Market-1501-v15.09.15-baseline")
    args = arg_parser.parse_args()
    dataset = Veri(args.dataset_dir, num_validation_y=0.1, seed=1234)

    if args.mode == "train":
        train_x, train_y, _ = dataset.read_train()
        print("Train set size: %d images, %d identities" %
              (len(train_x), len(np.unique(train_y))))

        network_factory = net.create_network_factory(
            is_training=True,
            num_classes=veri.MAX_LABEL + 1,
            add_logits=args.loss_mode == "cosine-softmax")
        train_kwargs = train_app.to_train_kwargs(args)
        train_app.train_loop(net.preprocess,
                             network_factory,
                             train_x,
                             train_y,
                             num_images_per_id=16,
                             image_shape=veri.IMAGE_SHAPE,
                             **train_kwargs)  #,


#    elif args.mode == "eval":
#        valid_x, valid_y, camera_indices = dataset.read_validation()
#        print("Validation set size: %d images, %d identities" % (
#            len(valid_x), len(np.unique(valid_y))))
#
#        network_factory = net.create_network_factory(
#            is_training=False, num_classes=market1501.MAX_LABEL + 1,
#            add_logits=args.loss_mode == "cosine-softmax")
#        eval_kwargs = train_app.to_eval_kwargs(args)
#        train_app.eval_loop(
#            net.preprocess, network_factory, valid_x, valid_y, camera_indices,
#            image_shape=market1501.IMAGE_SHAPE, **eval_kwargs)
#    elif args.mode == "export":
#        # Export one specific model.
#        gallery_filenames, _, query_filenames, _, _ = dataset.read_test()
#
#        network_factory = net.create_network_factory(
#            is_training=False, num_classes=market1501.MAX_LABEL + 1,
#            add_logits=False, reuse=None)
#        gallery_features = train_app.encode(
#            net.preprocess, network_factory, args.restore_path,
#            gallery_filenames, image_shape=market1501.IMAGE_SHAPE)
#        sio.savemat(
#            os.path.join(args.sdk_dir, "feat_test.mat"),
#            {"features": gallery_features})
#
#        network_factory = net.create_network_factory(
#            is_training=False, num_classes=market1501.MAX_LABEL + 1,
#            add_logits=False, reuse=True)
#        query_features = train_app.encode(
#            net.preprocess, network_factory, args.restore_path,
#            query_filenames, image_shape=market1501.IMAGE_SHAPE)
#        sio.savemat(
#            os.path.join(args.sdk_dir, "feat_query.mat"),
#            {"features": query_features})
#    elif args.mode == "finalize":
#        network_factory = net.create_network_factory(
#            is_training=False, num_classes=market1501.MAX_LABEL + 1,
#            add_logits=False, reuse=None)
#        train_app.finalize(
#            functools.partial(net.preprocess, input_is_bgr=True),
#            network_factory, args.restore_path,
#            image_shape=market1501.IMAGE_SHAPE,
#            output_filename="./market1501.ckpt")
    elif args.mode == "freeze":
        network_factory = net.create_network_factory(
            is_training=False,
            num_classes=veri.MAX_LABEL + 1,
            add_logits=False,
            reuse=None)
        train_app.freeze(functools.partial(net.preprocess, input_is_bgr=True),
                         network_factory,
                         args.restore_path,
                         image_shape=veri.IMAGE_SHAPE,
                         output_filename="./veri.pb")
    else:
        raise ValueError("Invalid mode argument.")
Beispiel #5
0
def main():
    arg_parser = train_app.create_default_argument_parser("bdd")
    arg_parser.add_argument(
        "--dataset_dir", help="Path to BDD tracking dataset directory.",
        default="data/bdd-tracking-reid")
    arg_parser.add_argument(
        "--sdk_dir", help="Path to BDD baseline evaluation software.",
        default="/path/to/bdd/sdk")
    args = arg_parser.parse_args()
    dataset = BddTracking(args.dataset_dir, num_validation_y=0.1, seed=1234)

    if args.mode == "train":
        train_x, train_y = dataset.read_train()
        print("Train set size: %d images, %d identities" % (
            len(train_x), len(np.unique(train_y))))
        
        network_factory = net.create_network_factory(
            is_training=True, num_classes=bdd.MAX_LABEL + 1,
            add_logits=args.loss_mode == "cosine-softmax")
        train_kwargs = train_app.to_train_kwargs(args)
        train_app.train_loop(
            net.preprocess, network_factory, train_x, train_y,
            num_images_per_id=4, image_shape=bdd.IMAGE_SHAPE,
            **train_kwargs)
    elif args.mode == "eval":
        valid_x, valid_y = dataset.read_validation()
        print("Validation set size: %d images, %d identities" % (
            len(valid_x), len(np.unique(valid_y))))

        network_factory = net.create_network_factory(
            is_training=False, num_classes=bdd.MAX_LABEL + 1,
            add_logits=args.loss_mode == "cosine-softmax")
        eval_kwargs = train_app.to_eval_kwargs(args)
        train_app.eval_loop(
            net.preprocess, network_factory, valid_x, valid_y, None,
            image_shape=bdd.IMAGE_SHAPE, **eval_kwargs)
    elif args.mode == "export":
        # Export one specific model.
        gallery_filenames, _, query_filenames, _, _ = dataset.read_test()

        network_factory = net.create_network_factory(
            is_training=False, num_classes=bdd.MAX_LABEL + 1,
            add_logits=False, reuse=None)
        gallery_features = train_app.encode(
            net.preprocess, network_factory, args.restore_path,
            gallery_filenames, image_shape=bdd.IMAGE_SHAPE)
        sio.savemat(
            os.path.join(args.sdk_dir, "feat_test.mat"),
            {"features": gallery_features})

        network_factory = net.create_network_factory(
            is_training=False, num_classes=bdd.MAX_LABEL + 1,
            add_logits=False, reuse=True)
        query_features = train_app.encode(
            net.preprocess, network_factory, args.restore_path,
            query_filenames, image_shape=bdd.IMAGE_SHAPE)
        sio.savemat(
            os.path.join(args.sdk_dir, "feat_query.mat"),
            {"features": query_features})
    elif args.mode == "finalize":
        network_factory = net.create_network_factory(
            is_training=False, num_classes=bdd.MAX_LABEL + 1,
            add_logits=False, reuse=None)
        train_app.finalize(
            functools.partial(net.preprocess, input_is_bgr=True),
            network_factory, args.restore_path,
            image_shape=bdd.IMAGE_SHAPE,
            output_filename="./bdd.ckpt")
    elif args.mode == "freeze":
        network_factory = net.create_network_factory(
            is_training=False, num_classes=bdd.MAX_LABEL + 1,
            add_logits=False, reuse=None)
        train_app.freeze(
            functools.partial(net.preprocess, input_is_bgr=True),
            network_factory, args.restore_path,
            image_shape=bdd.IMAGE_SHAPE,
            output_filename="/data1/haofeng/out/cml/cml-bdd-tracking-train/bdd-tracking-val.pb")
    else:
        raise ValueError("Invalid mode argument.")
def main():
    arg_parser = train_app.create_default_argument_parser("veri")
    arg_parser.add_argument("--dataset_dir",
                            help="Path to VeRi dataset directory.",
                            default="resources/VeRi")
    args = arg_parser.parse_args()
    dataset = VeRi(args.dataset_dir, num_validation_y=0.1, seed=1234)

    if args.mode == "train":
        train_x, train_y, _ = dataset.read_train()
        print("Train set size: %d images, %d identites" %
              (len(train_x), len(np.unique(train_y))))

        network_factory = net.create_network_factory(
            is_training=True,
            num_classes=veri.MAX_LABEL + 1,
            add_logits=args.loss_mode == "cosine-softmax")
        train_kwargs = train_app.to_train_kwargs(args)
        train_app.train_loop(net.preprocess,
                             network_factory,
                             train_x,
                             train_y,
                             num_images_per_id=4,
                             image_shape=veri.IMAGE_SHAPE,
                             **train_kwargs)
    elif args.mode == "eval":
        valid_x, valid_y, camera_indices = dataset.read_validation()
        print("Validation set size: %d images, %d identites" %
              (len(valid_x), len(np.unique(valid_y))))

        network_factory = net.create_network_factory(
            is_training=False,
            num_classes=veri.MAX_LABEL + 1,
            add_logits=args.loss_mode == "cosine-softmax")
        eval_kwargs = train_app.to_eval_kwargs(args)
        train_app.eval_loop(net.preprocess,
                            network_factory,
                            valid_x,
                            valid_y,
                            camera_indices,
                            image_shape=veri.IMAGE_SHAPE,
                            **eval_kwargs)
    elif args.mode == "export":
        raise NotImplementedError()
    elif args.mode == "finalize":
        network_factory = net.create_network_factory(
            is_training=False,
            num_classes=veri.MAX_LABEL + 1,
            add_logits=False,
            reuse=None)
        train_app.finalize(functools.partial(net.preprocess,
                                             input_is_bgr=True),
                           network_factory,
                           args.restore_path,
                           image_shape=veri.IMAGE_SHAPE,
                           output_filename="./veri.ckpt")
    elif args.mode == "freeze":
        network_factory = net.create_network_factory(
            is_training=False,
            num_classes=veri.MAX_LABEL + 1,
            add_logits=False,
            reuse=None)
        train_app.freeze(functools.partial(net.preprocess, input_is_bgr=True),
                         network_factory,
                         args.restore_path,
                         image_shape=veri.IMAGE_SHAPE,
                         output_filename="./veri.pb")
    else:
        raise ValueError("Invalid mode argument.")
def main():
    arg_parser = train_app.create_default_argument_parser("youtube_faces")
    arg_parser.add_argument("--dataset_dir",
                            help="path to youtube_faces dataset directory.",
                            default="/home/max/Desktop/yt_test_data")
    args = arg_parser.parse_args()
    dataset = Youtube_faces(args.dataset_dir, num_validation_y=0.1, seed=1234)

    if args.mode == "train":
        train_x, train_y, _ = dataset.read_train()
        print("Train set size: %d images, %d persons" %
              (len(train_x), len(np.unique(train_y))))

        network_factory = net.create_network_factory(
            is_training=True,
            num_classes=youtube_faces.MAX_LABEL + 1,
            add_logits=args.loss_mode == "cosine-softmax")
        train_kwargs = train_app.to_train_kwargs(args)
        train_app.train_loop(net.preprocess,
                             network_factory,
                             train_x,
                             train_y,
                             num_images_per_id=4,
                             image_shape=youtube_faces.IMAGE_SHAPE,
                             **train_kwargs)

    elif args.mode == "eval":
        valid_x, valid_y, camera_indices = dataset.read_validation()
        print("Validation set size: %d images, %d persons" %
              (len(valid_x), len(np.unique(valid_y))))

        network_factory = net.create_network_factory(
            is_training=False,
            num_classes=youtube_faces.MAX_LABEL + 1,
            add_logits=args.loss_mode == "cosine-softmax")
        eval_kwargs = train_app.to_eval_kwargs(args)
        train_app.eval_loop(net.preprocess,
                            network_factory,
                            valid_x,
                            valid_y,
                            camera_indices,
                            image_shape=youtube_faces.IMAGE_SHAPE,
                            **eval_kwargs)

    elif args.mode == "export":
        raise NotImplementedError()
    # elif args.mode == "export":
    #     # Export one specific model.
    #     gallery_filenames, _, query_filenames, _, _ = dataset.read_test()

    #     network_factory = net.create_network_factory(
    #         is_training=False, num_classes=market1501.MAX_LABEL + 1,
    #         add_logits=False, reuse=None)
    #     gallery_features = train_app.encode(
    #         net.preprocess, network_factory, args.restore_path,
    #         gallery_filenames, image_shape=market1501.IMAGE_SHAPE)
    #     sio.savemat(
    #         os.path.join(args.sdk_dir, "feat_test.mat"),
    #         {"features": gallery_features})

    #     network_factory = net.create_network_factory(
    #         is_training=False, num_classes=market1501.MAX_LABEL + 1,
    #         add_logits=False, reuse=True)
    #     query_features = train_app.encode(
    #         net.preprocess, network_factory, args.restore_path,
    #         query_filenames, image_shape=market1501.IMAGE_SHAPE)
    #     sio.savemat(
    #         os.path.join(args.sdk_dir, "feat_query.mat"),
    #         {"features": query_features})

    elif args.mode == "finalize":
        network_factory = net.create_network_factory(
            is_training=False,
            num_classes=youtube_faces.MAX_LABEL + 1,
            add_logits=False,
            reuse=None)
        train_app.finalize(functools.partial(net.preprocess,
                                             input_is_bgr=True),
                           network_factory,
                           args.restore_path,
                           image_shape=IMAGE_SHAPE,
                           output_filename="./youtube_faces.ckpt")
    elif args.mode == "freeze":
        network_factory = net.create_network_factory(
            is_training=False,
            num_classes=youtube_faces.MAX_LABEL + 1,
            add_logits=False,
            reuse=None)
        train_app.freeze(functools.partial(net.preprocess, input_is_bgr=True),
                         network_factory,
                         args.restore_path,
                         image_shape=youtube_faces.IMAGE_SHAPE,
                         output_filename="./youtube_faces.pb")
    else:
        raise ValueError("Invalid mode argument.")