# Define model
    model = getattr(nets, args.arch + 'Model')(args)

    # Define attacker
    if args.attack_iter == 0 or args.eval_directory:
        attacker = NoOpAttacker()
    else:
        attacker = PGDAttacker(
            args.attack_iter,
            args.attack_epsilon,
            args.attack_step_size,
            prob_start_from_clean=0.2 if not args.eval else 0.0)
        if args.use_fp16xla:
            attacker.USE_FP16 = True
            attacker.USE_XLA = True  #False if args.arch.endswith("Dither") else True
    model.set_attacker(attacker)

    os.system("nvidia-smi")
    hvd.init()
    gpu_thread_count = 2
    os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private'
    os.environ['TF_GPU_THREAD_COUNT'] = str(gpu_thread_count)
    os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '1'
    os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.gpu_options.visible_device_list = str(hvd.local_rank())
    config.gpu_options.per_process_gpu_memory_fraction = 0.45
    run_barrier(config)
Пример #2
0
    # Define model
    model = getattr(nets, args.arch + 'Model')(args)

    # Define attacker
    if args.attack_iter == 0 or args.eval_directory:
        attacker = NoOpAttacker()
    else:
        attacker = PGDAttacker(
            args.attack_iter,
            args.attack_epsilon,
            args.attack_step_size,
            prob_start_from_clean=0.2 if not args.eval else 0.0)
        if args.use_fp16xla:
            attacker.USE_FP16 = True
            attacker.USE_XLA = True
    model.set_attacker(attacker)

    os.system("nvidia-smi")
    hvd.init()

    if args.eval:
        sessinit = get_model_loader(args.load)
        if hvd.size() == 1:
            # single-GPU eval, slow
            ds = get_val_dataflow(args.data, args.batch)
            eval_on_ILSVRC12(model, sessinit, ds)
        else:
            logger.info("CMD: " + " ".join(sys.argv))
            cb = create_eval_callback("eval",
                                      model.get_inference_func(attacker),