Пример #1
0
def verify(eran_model_path, input, epsilon):
    model, is_conv, means, stds = read_net(eran_model_path, 14, False)
    eran = ERAN(model)

    specLB = np.clip(input - epsilon, 0, 1)
    specUB = np.clip(input + epsilon, 0, 1)
    for i in range(0, 2):
        specLB[i] = input[i]
        specUB[i] = input[i]
    for i in range(2, 4):
        specLB[i] = np.clip(input[i] - epsilon, -1, 1)
        specUB[i] = np.clip(input[i] + epsilon, -1, 1)
    label, nn, _, _ = eran.analyze_box(specLB, specUB, 'deepzono', 1, 1)
    print("lable = {}".format(label))
    return label >= 0
Пример #2
0
def verify(dnn: OperationGraph,
           phi: Expression,
           domain="deeppoly",
           timeout_lp=1.0,
           timeout_milp=1.0,
           use_area_heuristic=True,
           **kwargs: Dict[str, Any]):
    logger = logging.getLogger(__name__)
    dnn = dnn.simplify()
    phi.networks[0].concretize(dnn)

    result = UNSAT
    property_extractor = HalfspacePolytopePropertyExtractor(
        HyperRectangle, HalfspacePolytope)
    for prop in property_extractor.extract_from(~phi):
        if prop.input_constraint.num_variables > 1:
            raise ERANTranslatorError(
                "Unsupported network: More than 1 input variable")
        with tf.Session(graph=tf.Graph()) as tf_session:
            layers = as_layers(
                prop.suffixed_op_graph(),
                extra_layer_types=ERAN_LAYER_TYPES,
                translator_error=ERANTranslatorError,
            )
            input_interval = prop.input_constraint

            spec_lb = input_interval.lower_bounds[0]
            spec_ub = input_interval.upper_bounds[0]
            if len(spec_lb.shape) == 4:
                spec_lb = spec_lb.transpose((0, 2, 3, 1))
                spec_ub = spec_ub.transpose((0, 2, 3, 1))
            tf_graph = as_tf(layers, translator_error=ERANTranslatorError)
            eran_model = ERAN(tf_graph, session=tf_session)
            _, nn, nlb, nub = eran_model.analyze_box(spec_lb.flatten().copy(),
                                                     spec_ub.flatten().copy(),
                                                     domain, timeout_lp,
                                                     timeout_milp,
                                                     use_area_heuristic,
                                                     **kwargs)
            output_lower_bound = np.asarray(nlb[-1])
            output_upper_bound = np.asarray(nub[-1])
            logger.debug("output lower bound: %s", output_lower_bound)
            logger.debug("output upper bound: %s", output_upper_bound)
            result |= check(output_lower_bound, output_upper_bound)
        if result == SAT or result == UNKNOWN:
            return result

    return result
def runMNIST():
    nnets = [
        nnetPrefix + "/mnist_relu_3_100_16bit.pyt",
    ]
    perturb = 3
    epsilon = 1.0
    numInputs = 784

    csvfile = open('../data/mnist_test.csv', 'r')
    images = csv.reader(csvfile, delimiter=',')
    tests = []
    for i, image in enumerate(images):
        if i == 10: break
        target = int(image[0])
        image = np.float64(image[1:])
        specLB = np.copy(image)
        specUB = np.copy(image)
        specLB = np.clip(specLB - perturb, 0, 255)
        specUB = np.clip(specUB + perturb, 0, 255)
        specLB = specLB / np.float64(255)
        specUB = specUB / np.float64(255)
        tests.append((target, specLB, specUB))

    for nnet in nnets:
        print("Network: " + nnet.split("/")[-1], file=sys.stderr)
        model, is_conv, means, stds = read_net(nnet, numInputs, False)
        eran = ERAN(model)
        for target, lb, ub in tests:
            res = getEps2(eran, lb, ub, target, 10)
            print("Result: " + str(res), file=sys.stderr)
            if abs(res[1]) < epsilon and abs(res[2]) < epsilon:
                print("Verified", file=sys.stderr)
            else:
                print("Failed", file=sys.stderr)
            exit(0)
def runHAR():
    nnet = nnetPrefix + "/HAR_16bit.pyt"

    perturb = 0.1
    epsilon = 0.25
    numInputs = 561

    csvfile = open('../data/har_test.csv', 'r')
    testInputs = csv.reader(csvfile, delimiter=',')
    tests = []
    for i, testInput in enumerate(testInputs):
        if i == 10: break
        target = int(testInput[0])
        testInput = np.float64(testInput[1:])
        specLB = np.copy(testInput)
        specUB = np.copy(testInput)
        specLB = np.clip(specLB - perturb, -1, 1)
        specUB = np.clip(specUB + perturb, -1, 1)
        tests.append((target, specLB, specUB))

    model, is_conv, means, stds = read_net(nnet, numInputs, False)
    eran = ERAN(model)
    for target, lb, ub in tests:
        res = getEps2(eran, lb, ub, target, 6)
        print("Result: " + str(res), file=sys.stderr)
        if abs(res[1]) < epsilon and abs(res[2]) < epsilon:
            print("Verified", file=sys.stderr)
        else:
            print("Failed", file=sys.stderr)
        exit(0)
def runACAS():
    epsilon = 0.05
    numInputs = 5

    nnet = "ACASXU_run2a_1_1_batch_2000_16bit.pyt"
    nnetPath = nnetPrefix + nnet
    model, is_conv, means, stds = read_net(nnetPath, numInputs, False)
    eran = ERAN(model)

    target = 0

    specUB = prop16UB
    specLB = prop16LB
    print("Property: " + str(16))
    print("Network: " + nnet)
    res = getEps2(eran, specLB, specUB, target, 5)
    print("Result: " + str(res))
    if abs(res[1]) < epsilon and abs(res[2]) < epsilon:
        print("Verified")
    else:
        print("Failed")

    specUB = prop26UB
    specLB = prop26LB
    print("Property: " + str(26))
    print("Network: " + nnet)
    res = getEps2(eran, specLB, specUB, target, 5)
    print("Result: " + str(res))
    if abs(res[1]) < epsilon and abs(res[2]) < epsilon:
        print("Verified")
    else:
        print("Failed")
Пример #6
0
def getERAN(netname, num_pixels):
    is_trained_with_pytorch = True
    domain = 'refinepoly'
    model, is_conv, means, stds = read_tensorflow_net(
        netname, num_pixels, is_trained_with_pytorch,
        (domain == 'gpupoly' or domain == 'refinegpupoly'))
    eran = ERAN(model, is_onnx=False)
    return eran
Пример #7
0
def _start_acasxu_recursive(kwargs):
    """
    Utility method to start _acasxu_recursive through multiprocessing
    """
    network_file = kwargs['network_file']
    model, _ = read_onnx_net(network_file)
    eran = ERAN(model, is_onnx=True)
    global _failed_already

    return _acasxu_recursive(
        kwargs['specLB'], kwargs['specUB'], model, eran, kwargs['constraints'],
        _failed_already, kwargs['max_depth'], 0,
        kwargs['permitted_depth_extensions'], 0, kwargs['domain'],
        kwargs['timeout_lp'], kwargs['timeout_milp'],
        kwargs['use_default_heuristic'], kwargs['complete'])
Пример #8
0
def runACAS():
    epsilon = 0.05
    numInputs = 5

    erans = {}
    for nnet in ACASNnets:
        nnetPath = nnetPrefix + nnet
        model, is_conv, means, stds = read_net(nnetPath, numInputs, False)
        erans[nnet] = ERAN(model)

    for prop, test in ACASProps.items():
        target, specUB, specLB = test
        print("Property: " + str(prop))
        for nnet in ACASNnets:
            if re.search(ACASExps[prop],
                         nnet) and not re.search(ACASSkips[prop], nnet):
                print("Network: " + nnet)
                res = getEps2(erans[nnet], specLB, specUB, target, 5)
                print("Result: " + str(res))
                if abs(res[1]) < epsilon and abs(res[2]) < epsilon:
                    print("Verified")
                else:
                    print("Failed")
Пример #9
0
    def __analyze_segmentation_eran(eran: ERAN,
                                    true_labels,
                                    valid_classes,
                                    num_total_classes,
                                    specLB,
                                    specUB,
                                    domain,
                                    timeout_lp,
                                    timeout_milp,
                                    use_default_heuristic,
                                    lexpr_weights=None,
                                    lexpr_cst=None,
                                    lexpr_dim=None,
                                    uexpr_weights=None,
                                    uexpr_cst=None,
                                    uexpr_dim=None,
                                    expr_size=0,
                                    testing=False,
                                    prop=-1,
                                    spatial_constraints=None):
        assert domain == "deeppoly", "domain isn't valid, must be 'deeppoly'"
        specLB = np.reshape(specLB, (-1, ))
        specUB = np.reshape(specUB, (-1, ))
        eran.nn = layers()
        eran.nn.specLB = specLB
        eran.nn.specUB = specUB

        execute_list, output_info = eran.optimizer.get_deeppoly(
            eran.nn, specLB, specUB, lexpr_weights, lexpr_cst, lexpr_dim,
            uexpr_weights, uexpr_cst, uexpr_dim, expr_size,
            spatial_constraints)
        analyzer = Analyzer(execute_list, eran.nn, domain, timeout_lp,
                            timeout_milp, None, use_default_heuristic, -1,
                            prop, testing)
        dominant_classes, nlb, nub = EranVerifier.__analyze_segmentation_analyzer(
            analyzer, true_labels, valid_classes, num_total_classes)
        return dominant_classes, nlb, nub
Пример #10
0
def runMNIST3Pixel():
    nnets = [
        nnetPrefix + "/mnist_relu_3_100_16bit.pyt",
        nnetPrefix + "/mnist_relu_2_512_16bit.pyt",
        nnetPrefix + "/mnist_relu_4_1024_16bit.pyt"
    ]
    perturb = 3
    epsilon = 1.0
    numInputs = 784
    numPixels = 3

    csvfile = open('../data/mnist_test.csv', 'r')
    images = csv.reader(csvfile, delimiter=',')
    tests = []
    for i, image in enumerate(images):
        target = int(image[0])
        image = np.float64(image[1:])
        specLB = np.copy(image)
        specUB = np.copy(image)
        for j in range(numPixels):
            specLB[random_pixels[i][j]] = 0.0
            specUB[random_pixels[i][j]] = 255.0
        specLB = specLB / np.float64(255)
        specUB = specUB / np.float64(255)
        tests.append((target, specLB, specUB))

    for nnet in nnets:
        print("Network: " + nnet.split("/")[-1], file=sys.stderr)
        model, is_conv, means, stds = read_net(nnet, numInputs, False)
        eran = ERAN(model)
        for target, lb, ub in tests:
            res = getEps2(eran, lb, ub, target, 10)
            print("Result: " + str(res), file=sys.stderr)
            if abs(res[1]) < epsilon and abs(res[2]) < epsilon:
                print("Verified", file=sys.stderr)
            else:
                print("Failed", file=sys.stderr)
Пример #11
0
    sess = tf.Session()
    if is_saved_tf_model:
        saver = tf.train.import_meta_graph(netname)
        saver.restore(sess, tf.train.latest_checkpoint(netfolder + '/'))
    else:
        with tf.gfile.GFile(netname, "rb") as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            sess.graph.as_default()
            tf.graph_util.import_graph_def(graph_def, name='')
    ops = sess.graph.get_operations()
    last_layer_index = -1
    while ops[last_layer_index].type in non_layer_operation_types:
        last_layer_index -= 1
    eran = ERAN(
        sess.graph.get_tensor_by_name(ops[last_layer_index].name + ':0'), sess)

else:
    if (dataset == 'mnist'):
        num_pixels = 784
    elif (dataset == 'cifar10'):
        num_pixels = 3072
    elif (dataset == 'acasxu'):
        num_pixels = 5
    elif (zonotope_bool == True):
        num_pixels = len(zonotope)
    if is_onnx:
        is_trained_with_pytorch = True
        model, is_conv = read_onnx_net(netname)
        # this is to have different defaults for mnist and cifar10
        if dataset == 'cifar10':
Пример #12
0
def verify_acasxu_style(
    network_file: str,
    means: np.ndarray,
    stds: np.ndarray,
    input_box: List[Tuple[np.ndarray, np.ndarray]],
    output_constraints: List[List[Tuple[int, int, float]]],
    timeout_lp=1,
    timeout_milp=1,
    max_depth=10,
    permitted_depth_extensions=3,
    use_default_heuristic=True,
    complete=True,
    progress_bar=True
) -> Optional[List[Tuple[np.ndarray, Tuple[int, int, float],
                         Optional[float]]]]:
    """
    Verifies an fully-connected (non-convolutional) neural network.

    Returns None if an no counterexamples could be found and the network could not be verified.
    Returns an empty list if the network could be verified.
    Returns a list of inputs for which the network violates the constraints if the network could not be verified.
    """
    domain = "deeppoly"

    model, is_conv = read_onnx_net(network_file)
    eran = ERAN(model, is_onnx=True)

    # total will be updated later
    progress_bar = tqdm(total=1, disable=not progress_bar)

    specLB = [interval[0] for interval in input_box]
    specUB = [interval[1] for interval in input_box]
    _normalize(specLB, means, stds)
    _normalize(specUB, means, stds)

    counterexample_list = []
    # adex_holds stores whether x_adex (below) is actually a counterexample
    # if adex_holds is True, then x_adex is a spurious counterexample
    adex_holds = True

    verified_flag, nn, nlb, nub, _, x_adex = eran.analyze_box(
        specLB, specUB, _init_domain(domain), timeout_lp, timeout_milp,
        use_default_heuristic, output_constraints)

    if not verified_flag and x_adex is not None:
        adex_holds, _, _, _, _, _ = eran.analyze_box(x_adex, x_adex,
                                                     "deeppoly", timeout_lp,
                                                     timeout_milp,
                                                     use_default_heuristic,
                                                     output_constraints)
        if not adex_holds:
            verified_flag = False
            # we need to undo the input normalisation, that was applied to the counterexamples
            counterexample_list.append(
                (np.array(x_adex) * stds + means, output_constraints))

    if not verified_flag and adex_holds:
        # expensive min/max gradient calculation
        nn.set_last_weights(output_constraints)
        grads_lower, grads_upper = nn.back_propagate_gradient(nlb, nub)
        assert len(grads_lower) == len(specLB), 'back_propagate_gradient did not yield gradients for inputs. ' \
                                                '(verify_acasxu_style only supports non-convolutional networks).'

        smears = [
            max(-grad_l, grad_u) * (u - l) for grad_l, grad_u, l, u in zip(
                grads_lower, grads_upper, specLB, specUB)
        ]
        smears = [1e-8 if smear == 0 else smear for smear in smears]
        split_multiple = 20 / np.sum(smears)

        num_splits = [int(np.ceil(smear * split_multiple)) for smear in smears]
        step_size = []
        for i in range(len(specLB)):  # for each input dimension
            if num_splits[i] == 0:
                num_splits[i] = 1
            step_size.append((specUB[i] - specLB[i]) / num_splits[i])

        start_val = np.copy(specLB)
        end_val = np.copy(specUB)
        # _, nn, _, _, _, _ = eran.analyze_box(
        #     specLB, specUB, domain,
        #     timeout_lp, timeout_milp, use_default_heuristic, output_constraints
        # )

        # generate all combinations of splits of the input dimensions
        multi_bounds = [(specLB.copy(), specUB.copy())]
        for d in range(len(specLB)):  # for each input dimension
            # for each split from the previous dimensions
            new_multi_bounds = []
            for specLB_, specUB_ in multi_bounds:
                for i in range(num_splits[d]):
                    specLB_ = specLB_.copy()
                    specUB_ = specUB_.copy()
                    specLB_[d] = start_val[d] + i * step_size[d]
                    specUB_[d] = np.fmin(end_val[d],
                                         start_val[d] + (i + 1) * step_size[d])
                    new_multi_bounds.append((specLB_, specUB_))
            multi_bounds = new_multi_bounds

        # print(f"len(multi_bounds)={len(multi_bounds)}\n"
        #       f"means={means}\n"
        #       f"stds={stds}\n"
        #       f"grads_lower={grads_lower}\n"
        #       f"grads_upper={grads_upper}\n"
        #       f"smeans={smears}\n"
        #       f"num_splits={num_splits}\n"
        #       f"step_size={step_size}\n"
        #       f"start_val={start_val}\n"
        #       f"end_val={end_val}")

        progress_bar.reset(total=len(multi_bounds) + 1)
        progress_bar.update()  # for the first analyze_box run

        failed_already = Value('i', 1)
        pool = None
        try:
            # sequential version
            # res = itertools.starmap(
            #    lambda lb, ub: _acasxu_recursive(lb, ub, model, eran, output_constraints, failed_already,
            #                                     25, 0, domain, timeout_lp, timeout_milp, use_default_heuristic,
            #                                     complete),
            #    multi_bounds
            # )
            arguments = [{
                'specLB': lb,
                'specUB': ub,
                'network_file': network_file,
                'constraints': output_constraints,
                'max_depth': max_depth,
                'permitted_depth_extensions': permitted_depth_extensions,
                'domain': domain,
                'timeout_lp': timeout_lp,
                'timeout_milp': timeout_milp,
                'use_default_heuristic': use_default_heuristic,
                'complete': complete
            } for lb, ub in multi_bounds]
            # using only half of the CPUs sped up the computation on the computers it was tested on
            # and also kept CPU utilisation high for all CPUs.
            pool = Pool(processes=os.cpu_count() // 2,
                        initializer=_init,
                        initargs=(failed_already, ))
            res = pool.imap_unordered(_start_acasxu_recursive, arguments)

            counterexample_list = []
            verified_flag = True
            for verified, counterexamples in res:
                if not verified:
                    verified_flag = False
                    if counterexamples is not None:
                        # convert counterexamples to numpy
                        counterexamples = [
                            (np.array(cx), constraint, value)
                            for cx, constraint, value in counterexamples
                        ]
                        # we need to undo the input normalisation, that was applied to the counterexamples
                        counterexamples = [
                            (cx * stds + means, constraint, value)
                            for cx, constraint, value in counterexamples
                        ]
                        counterexample_list.extend(counterexamples)
                    else:
                        warning(
                            f"Property not verified for without counterexample."
                        )
                progress_bar.update()

        except Exception as ex:
            warning(f"Property not verified because of an exception: {ex}.")
            raise ex
        finally:
            progress_bar.close()
            if pool is not None:
                # make sure the Pool is properly closed
                pool.terminate()
                pool.join()
    else:
        # property has been verified in first analyze_box run.
        progress_bar.update()
        progress_bar.close()

    if not verified_flag and len(counterexample_list) > 0:
        info(
            f"Property not verified with counterexamples: {counterexample_list}."
        )
        return counterexample_list
    elif not verified_flag:
        info(f"Property not verified without counterexamples.")
        # raise RuntimeError("Property disproven, but no counterexample found.")
        return None
    else:
        info(f"Property verified.")
        return []
Пример #13
0
def init_model():
    global netname, epsilon, domain, dataset, means, stds, is_conv
    netname = config.netname
    filename, file_extension = os.path.splitext(netname)
    assert config.netname, 'a network has to be provided for analysis.'

    is_trained_with_pytorch = file_extension == ".pyt"
    is_saved_tf_model = file_extension == ".meta"
    is_pb_file = file_extension == ".pb"
    is_tensorflow = file_extension == ".tf"
    is_onnx = file_extension == ".onnx"
    assert is_trained_with_pytorch or is_saved_tf_model or is_pb_file or is_tensorflow or is_onnx, "file extension not supported"

    epsilon = config.epsilon
    domain = config.domain
    assert domain in [
        'deepzono', 'refinezono', 'deeppoly', 'refinepoly', 'gpupoly',
        'refinegpupoly'
    ], "domain name can be either deepzono, refinezono, deeppoly, refinepoly, gpupoly, refinegpupoly"

    dataset = config.dataset
    #assert dataset in ['mnist', 'cifar10', 'acasxu', 'fashion'], "only mnist, cifar10, acasxu, and fashion datasets are supported"
    print("netname ", netname, " epsilon ", epsilon, " domain ", domain,
          " dataset ", dataset, "args complete ", config.complete,
          " complete ", complete, " timeout_lp ", config.timeout_lp)
    if (dataset == 'mnist'):
        num_pixels = 784
    elif (dataset == 'cifar10'):
        num_pixels = 3072
    elif (dataset == 'acasxu'):
        num_pixels = 5
    else:
        num_pixels = dataset
    if is_onnx:
        model, is_conv = read_onnx_net(netname)
    else:
        model, is_conv, means, stds = read_tensorflow_net(
            netname, num_pixels, is_trained_with_pytorch,
            (domain == 'gpupoly' or domain == 'refinegpupoly'))
    if domain == 'gpupoly' or domain == 'refinegpupoly':
        if is_onnx:
            translator = ONNXTranslator(model, True)
        else:
            translator = TFTranslator(model)
        operations, resources = translator.translate()
        optimizer = Optimizer(operations, resources)
        nn = layers()
        network, relu_layers, num_gpu_layers = optimizer.get_gpupoly(nn)
    else:
        eran = ERAN(model, is_onnx=is_onnx)

    if not is_trained_with_pytorch:
        if dataset == 'mnist' and not config.geometric:
            means = [0]
            stds = [1]
        elif dataset == 'acasxu':
            means = [1.9791091e+04, 0.0, 0.0, 650.0, 600.0]
            stds = [60261.0, 6.28318530718, 6.28318530718, 1100.0, 1200.0]
        else:
            means = [0]
            stds = [1]
    is_trained_with_pytorch = is_trained_with_pytorch or is_onnx

    if config.mean is not None:
        means = config.mean
        stds = config.std
    return eran
Пример #14
0
def main():
    parser = argparse.ArgumentParser(description='Analyze NN.')
    parser.add_argument('--net', type=str, help='Neural network to analyze')
    parser.add_argument('--dataset', type=str, default='mnist', help='Dataset')
    parser.add_argument('--data_dir',
                        type=str,
                        help='Directory which contains data')
    parser.add_argument('--data_root',
                        type=str,
                        help='Directory which contains data')
    parser.add_argument('--num_params',
                        type=int,
                        default=0,
                        help='Number of transformation parameters')
    parser.add_argument('--num_tests',
                        type=int,
                        default=None,
                        help='Number of images to test')
    parser.add_argument('--from_test',
                        type=int,
                        default=0,
                        help='Number of images to test')
    parser.add_argument('--test_idx',
                        type=int,
                        default=None,
                        help='Index to test')
    parser.add_argument('--debug',
                        action='store_true',
                        help='Whether to display debug info')
    parser.add_argument('--attack',
                        action='store_true',
                        help='Whether to attack')
    parser.add_argument('--timeout_lp',
                        type=float,
                        default=1,
                        help='timeout for the LP solver')
    parser.add_argument('--timeout_milp',
                        type=float,
                        default=1,
                        help='timeout for the MILP solver')
    parser.add_argument(
        '--use_area_heuristic',
        type=str2bool,
        default=True,
        help='whether to use area heuristic for the DeepPoly ReLU approximation'
    )
    args = parser.parse_args()

    #LRJ add
    deepsymbol_input_folder = args.data_dir + '/Batch_Input_DeepG'
    #print(deepsymbol_input_folder)
    if not os.path.exists(deepsymbol_input_folder):
        os.makedirs(deepsymbol_input_folder)
    else:
        for root, dirs, files in os.walk(deepsymbol_input_folder):
            for name in files:
                if name.endswith('.in'):
                    os.remove(os.path.join(root, name))
                    #print("delete "+os.path.join(root,name))

    global n_rows, n_cols, n_channels
    if args.dataset == 'cifar10':
        n_rows, n_cols, n_channels = 32, 32, 3
    else:
        n_rows, n_cols, n_channels = 28, 28, 1

    filename, file_extension = os.path.splitext(args.net)

    is_trained_with_pytorch = False
    is_saved_tf_model = False

    if (file_extension == ".net" or file_extension == ".pyt"):
        is_trained_with_pytorch = True
    elif (file_extension == ".meta"):
        is_saved_tf_model = True
    elif (file_extension != ".tf"):
        print("file extension not supported")
        exit(1)

    is_conv = False

    if (is_saved_tf_model):
        netfolder = os.path.dirname(args.net)

        tf.logging.set_verbosity(tf.logging.ERROR)

        sess = tf.Session()
        saver = tf.train.import_meta_graph(args.net)
        saver.restore(sess, tf.train.latest_checkpoint(netfolder + '/'))
        eran = ERAN(sess.graph.get_tensor_by_name('logits:0'), sess)
    else:
        if args.dataset == 'mnist' or args.dataset == 'fashion':
            num_pixels = 784
        else:
            num_pixels = 3072
        model, is_conv, means, stds = read_net(args.net, num_pixels,
                                               is_trained_with_pytorch)
        eran = ERAN(model)

    csvfile = open('../../code/datasets/{}_test.csv'.format(args.dataset), 'r')
    tests = csv.reader(csvfile, delimiter=',')

    total, attacked, standard_correct, tot_time = 0, 0, 0, 0
    correct_box, correct_poly = 0, 0
    cver_box, cver_poly = [], []

    for i, test in enumerate(tests):
        if args.test_idx is not None and i != args.test_idx:
            continue

        attacks_file = os.path.join(args.data_dir, 'attack_{}.csv'.format(i))
        if args.num_tests is not None and i >= args.num_tests:
            break
        print('Test {}:'.format(i))

        #LRJ add
        current_test = i

        if args.dataset == 'mnist' or args.dataset == 'fashion':
            image = np.float64(test[1:len(test)])
        else:
            if is_trained_with_pytorch:
                image = np.float64(test[1:len(test)])
            else:
                image = np.float64(test[1:len(test)]) - 0.5

        spec_lb = np.copy(image)
        spec_ub = np.copy(image)

        if (is_trained_with_pytorch):
            normalize(spec_lb, means, stds, args.dataset, is_conv)
            normalize(spec_ub, means, stds, args.dataset, is_conv)

        label, nn, nlb, nub = eran.analyze_box(spec_lb, spec_ub, 'deeppoly',
                                               args.timeout_lp,
                                               args.timeout_milp,
                                               args.use_area_heuristic)
        print('Label: ', label)

        if label != int(test[0]):
            print('Label {}, but true label is {}, skipping...'.format(
                label, int(test[0])))
            print('Standard accuracy: {} percent'.format(standard_correct /
                                                         float(i + 1) * 100))
            continue
        else:
            standard_correct += 1
            print('Standard accuracy: {} percent'.format(standard_correct /
                                                         float(i + 1) * 100))

        dim = n_rows * n_cols * n_channels

        ok_box, ok_poly = True, True
        k = args.num_params + 1 + 1 + dim

        attack_imgs, checked, attack_pass = [], [], 0
        cex_found = False
        if args.attack:
            with open(attacks_file, 'r') as fin:
                lines = fin.readlines()
                for j in range(0, len(lines), args.num_params + 1):
                    params = [
                        float(line[:-1])
                        for line in lines[j:j + args.num_params]
                    ]
                    tokens = lines[j + args.num_params].split(',')
                    values = np.array(list(map(float, tokens)))

                    attack_lb = values[::2]
                    attack_ub = values[1::2]

                    if is_trained_with_pytorch:
                        normalize(attack_lb, means, stds, args.dataset,
                                  is_conv)
                        normalize(attack_ub, means, stds, args.dataset,
                                  is_conv)
                    else:
                        attack_lb -= 0.5
                        attack_ub -= 0.5
                    attack_imgs.append((params, attack_lb, attack_ub))
                    checked.append(False)

                    predict_label, _, _, _ = eran.analyze_box(
                        attack_lb[:dim], attack_ub[:dim], 'deeppoly',
                        args.timeout_lp, args.timeout_milp,
                        args.use_area_heuristic, 0)
                    if predict_label != int(test[0]):
                        print('counter-example, params: ', params,
                              ', predicted label: ', predict_label)
                        cex_found = True
                        break
                    else:
                        attack_pass += 1
        print('tot attacks: ', len(attack_imgs))
        specs_file = os.path.join(args.data_dir, '{}.csv'.format(i))
        #LRJ add
        spec_lb_all = []
        spec_ub_all = []
        begtime = time.time()
        with open(specs_file, 'r') as fin:
            lines = fin.readlines()
            print('Number of lines: ', len(lines))
            assert len(lines) % k == 0

            spec_lb = np.zeros(args.num_params + dim)
            spec_ub = np.zeros(args.num_params + dim)

            expr_size = args.num_params
            lexpr_cst, uexpr_cst = [], []
            lexpr_weights, uexpr_weights = [], []
            lexpr_dim, uexpr_dim = [], []

            ver_chunks_box, ver_chunks_poly, tot_chunks = 0, 0, 0

            for i, line in enumerate(lines):
                if i % k < args.num_params:
                    # read specs for the parameters
                    values = np.array(list(map(float, line[:-1].split(' '))))
                    assert values.shape[0] == 2
                    param_idx = i % k
                    spec_lb[dim + param_idx] = values[0]
                    spec_ub[dim + param_idx] = values[1]
                    if args.debug:
                        print('parameter %d: [%.4f, %.4f]' %
                              (param_idx, values[0], values[1]))
                elif i % k == args.num_params:
                    # read interval bounds for image pixels
                    values = np.array(list(map(float, line[:-1].split(','))))
                    #print(values)
                    #print(len(values))
                    spec_lb[:dim] = values[::2]
                    spec_ub[:dim] = values[1::2]
                    # if args.debug:
                    #     show_ascii_spec(spec_lb, spec_ub)
                elif i % k < k - 1:
                    # read polyhedra constraints for image pixels
                    tokens = line[:-1].split(' ')
                    assert len(tokens) == 2 + 2 * args.num_params + 1

                    bias_lower, weights_lower = float(tokens[0]), list(
                        map(float, tokens[1:1 + args.num_params]))
                    assert tokens[args.num_params + 1] == '|'
                    bias_upper, weights_upper = float(
                        tokens[args.num_params + 2]), list(
                            map(float, tokens[3 + args.num_params:]))

                    assert len(weights_lower) == args.num_params
                    assert len(weights_upper) == args.num_params

                    lexpr_cst.append(bias_lower)
                    uexpr_cst.append(bias_upper)
                    for j in range(args.num_params):
                        lexpr_dim.append(dim + j)
                        uexpr_dim.append(dim + j)
                        lexpr_weights.append(weights_lower[j])
                        uexpr_weights.append(weights_upper[j])
                else:
                    assert (line == 'SPEC_FINISHED\n')
                    for p_idx in range(args.num_params):
                        lexpr_cst.append(spec_lb[dim + p_idx])
                        for l in range(args.num_params):
                            lexpr_weights.append(0)
                            lexpr_dim.append(dim + l)
                        uexpr_cst.append(spec_ub[dim + p_idx])
                        for l in range(args.num_params):
                            uexpr_weights.append(0)
                            uexpr_dim.append(dim + l)
                    #LRJ Add Data beform normalize
                    if len(spec_lb_all):
                        spec_lb_all = np.minimum(spec_lb_all, spec_lb[:dim])
                    else:
                        spec_lb_all = list(spec_lb[:dim])

                    if len(spec_ub_all):
                        spec_ub_all = np.maximum(spec_ub_all, spec_ub[:dim])
                    else:
                        spec_ub_all = list(spec_ub[:dim])
                    #print(spec_lb_all)

                    if (is_trained_with_pytorch):
                        normalize(spec_lb[:dim], means, stds, args.dataset,
                                  is_conv)
                        normalize(spec_ub[:dim], means, stds, args.dataset,
                                  is_conv)
                    normalize_poly(args.num_params, lexpr_cst, lexpr_weights,
                                   lexpr_dim, uexpr_cst, uexpr_weights,
                                   uexpr_dim, means, stds, args.dataset)

                    for attack_idx, (attack_params, attack_lb,
                                     attack_ub) in enumerate(attack_imgs):
                        ok_attack = True
                        for j in range(num_pixels):
                            low, up = lexpr_cst[j], uexpr_cst[j]
                            for idx in range(args.num_params):
                                low += lexpr_weights[j * args.num_params +
                                                     idx] * attack_params[idx]
                                up += uexpr_weights[j * args.num_params +
                                                    idx] * attack_params[idx]
                            if low > attack_lb[j] + EPS or attack_ub[
                                    j] > up + EPS:
                                ok_attack = False
                        if ok_attack:
                            checked[attack_idx] = True
                            # print('checked ', attack_idx)
                    if args.debug:
                        print('Running the analysis...')

                    t_begin = time.time()
                    perturbed_label_poly, _, _, _ = eran.analyze_box(
                        spec_lb, spec_ub, 'deeppoly', args.timeout_lp,
                        args.timeout_milp, args.use_area_heuristic, 0,
                        lexpr_weights, lexpr_cst, lexpr_dim, uexpr_weights,
                        uexpr_cst, uexpr_dim, expr_size)
                    perturbed_label_box, _, _, _ = eran.analyze_box(
                        spec_lb[:dim], spec_ub[:dim], 'deeppoly',
                        args.timeout_lp, args.timeout_milp,
                        args.use_area_heuristic, 0)
                    t_end = time.time()
                    #LRJ add normalized data below
                    #print(spec_lb[:dim])
                    #if len(spec_lb_all):
                    #    spec_lb_all=np.minimum(spec_lb_all,spec_lb[:dim])
                    #else:
                    #    spec_lb_all=spec_lb[:dim]

                    #if len(spec_ub_all):
                    #    spec_ub_all=np.maximum(spec_ub_all,spec_ub[:dim])
                    #else:
                    #    spec_ub_all=spec_ub[:dim]

                    print('DeepG: ', perturbed_label_poly, '\tInterval: ',
                          perturbed_label_box, '\tlabel: ', label,
                          '[Time: %.4f]' % (t_end - t_begin))

                    tot_chunks += 1
                    if perturbed_label_box != label:
                        ok_box = False
                    else:
                        ver_chunks_box += 1

                    if perturbed_label_poly != label:
                        ok_poly = False
                    else:
                        ver_chunks_poly += 1

                    lexpr_cst, uexpr_cst = [], []
                    lexpr_weights, uexpr_weights = [], []
                    lexpr_dim, uexpr_dim = [], []
        #LRJ add
        #print(len(spec_lb_all))
        #print(len(spec_ub_all))
        in_file = os.path.join(deepsymbol_input_folder,
                               'in_{}.in'.format(current_test))
        fp = open(in_file, "w")
        for index in range(len(spec_lb_all)):
            fp.write(
                str(spec_lb_all[index]) + ' ' + str(spec_ub_all[index]) + '\n')
        fp.close()

        total += 1
        if ok_box:
            correct_box += 1
        if ok_poly:
            correct_poly += 1
        if cex_found:
            assert (not ok_box) and (not ok_poly)
            attacked += 1
        cver_poly.append(ver_chunks_poly / float(tot_chunks))
        cver_box.append(ver_chunks_box / float(tot_chunks))
        tot_time += time.time() - begtime

        print('Verified[box]: {}, Verified[poly]: {}, CEX found: {}'.format(
            ok_box, ok_poly, cex_found))
        assert not cex_found or not ok_box, 'ERROR! Found counter-example, but image was verified with box!'
        assert not cex_found or not ok_poly, 'ERROR! Found counter-example, but image was verified with poly!'

        print('Attacks found: %.2f percent, %d/%d' %
              (100.0 * attacked / total, attacked, total))
        print('[Box]  Provably robust: %.2f percent, %d/%d' %
              (100.0 * correct_box / total, correct_box, total))
        print('[Poly] Provably robust: %.2f percent, %d/%d' %
              (100.0 * correct_poly / total, correct_poly, total))
        print('Empirically robust: %.2f percent, %d/%d' %
              (100.0 * (total - attacked) / total, total - attacked, total))
        print('[Box]  Average chunks verified: %.2f percent' %
              (100.0 * np.mean(cver_box)))
        print('[Poly]  Average chunks verified: %.2f percent' %
              (100.0 * np.mean(cver_poly)))
        print('Average time: ', tot_time / total)
        print(
            'Verified:%d Box Robust:%d Poly Robust:%d Box Robust Percentage:%.2f Poly Robust Percentage:%.2f'
            % (total, correct_box, correct_poly, 100.0 * correct_box / total,
               100.0 * correct_poly / total))
Пример #15
0
    return (y * std_deviations) + means


args = {
    "complete": False,
    "timeout_lp": 1,
    "timeout_milp": 1,
    "use_area_heuristic": True,
}

netname = sys.argv[1]
filename, file_extension = os.path.splitext(netname)

num_pixels = 5
model, is_conv, means, stds = read_net(netname, num_pixels, False)
eran = ERAN(model)


def extract_from_expr(expr, coeffs=5):
    coefficients = []
    for i in range(coeffs):
        coeff = elina_linexpr0_coeffref(expr, i)
        assert coeff.contents.discr == 1
        interval = coeff.contents.val.interval.contents
        assert interval.inf.contents.discr == 0
        assert interval.sup.contents.discr == 0
        inf = interval.inf.contents.val.dbl
        sup = interval.sup.contents.val.dbl
        coefficients.append([inf, sup])
    cst = elina_linexpr0_cstref(expr)
    assert cst.contents.discr == 1
Пример #16
0
def _acasxu_recursive(specLB, specUB, model, eran: ERAN, constraints, failed_already, max_depth=10, depth=0,
                      permitted_depth_extensions=3, depth_extension=0,
                      domain="deeppoly", timeout_lp=1, timeout_milp=1,
                      use_default_heuristic=True, complete=True) \
        -> Tuple[bool, Optional[Sequence[Tuple[np.ndarray, Tuple[int, int, float]]]]]:
    # Returns whether the constraint is violated, and potentially counterexamples as witness of violation.
    # The counterexamples are returned together with the atomic comparison constraint they violate.
    hold, nn, nlb, nub, _, _ = \
        eran.analyze_box(specLB, specUB, domain, timeout_lp, timeout_milp, use_default_heuristic, constraints)
    if hold:
        return True, []
    elif depth >= max_depth:
        if failed_already.value and complete:  # failed_already.value means "not falsified already"
            verified_flag, adv_examples, adv_values, adv_violated_constraints = \
                verify_network_with_milp(nn, specLB, specUB, nlb, nub, constraints)
            xs = []
            if verified_flag:
                return True, []
            else:
                if adv_examples is not None:
                    for adv_image, violated_constraint, adv_value in \
                            zip(adv_examples, adv_violated_constraints, adv_values):
                        # counterexamples found with milp may be improper counterexamples
                        # for constraints with multiple or-clauses, for example the or clauses
                        # are treated individually and returned counterexamples violate individual
                        # or-clauses, but they do not necessarily violate the whole or-term.
                        hold, _, nlb, nub, _, x = eran.analyze_box(
                            adv_image, adv_image, domain, timeout_lp,
                            timeout_milp, use_default_heuristic, constraints)
                        # print(f'hold={hold}, adv_image={adv_image}')
                        if not hold:
                            info(
                                f"property violated at {adv_image} with violation {adv_value} output_score {nlb[-1]}"
                            )
                            failed_already.value = 0
                            # break  # do not break, try to find many counterexamples
                        else:  # spurious counterexample
                            info(
                                f"spurious counterexample found: {adv_image}  with violation {adv_value} "
                                f"output_score {nlb[-1]}")
                            # for spurious counterexamples we continue search, maybe we will still find a proper cx
                        xs.append((adv_image, violated_constraint, adv_value))
            if len(xs) > 0:  # counterexample found
                return False, xs
            # if verified_flag was false, but no concrete counterexamples were found
            # continue with bounds splitting (perform depth_extension) unless the number
            # of permitted depth extensions was also reached
            if depth_extension >= permitted_depth_extensions:
                return False, None
            # debug(f"Extending Recursion Depth ({depth_extension+1}. time)")
            depth_extension += 1
            depth = 0
        else:
            return False, None

    # grads = _estimate_grads(specLB, specUB, model, input_shape=eran.input_shape)
    # grads + small epsilon so if gradient estimation becomes 0 it will divide the biggest interval.
    # smears = np.multiply(grads + 0.00001, [u-l for u, l in zip(specUB, specLB)])

    nn.set_last_weights(constraints)
    grads_lower, grads_upper = nn.back_propagate_gradient(nlb, nub)
    smears = [
        max(-grad_l, grad_u) * (u - l) for grad_l, grad_u, l, u in zip(
            grads_lower, grads_upper, specLB, specUB)
    ]

    index = np.argmax(smears)
    m = (specLB[index] + specUB[index]) / 2

    result = failed_already.value
    xs = []
    if result:
        result1, xs1 = _acasxu_recursive(
            specLB, [ub if i != index else m for i, ub in enumerate(specUB)],
            model, eran, constraints, failed_already, max_depth, depth + 1,
            permitted_depth_extensions, depth_extension, domain, timeout_lp,
            timeout_milp, use_default_heuristic, complete)
        result = result and result1
        if xs1 is not None:
            xs.extend(xs1)
    if result:
        result2, xs2 = _acasxu_recursive(
            [lb if i != index else m for i, lb in enumerate(specLB)], specUB,
            model, eran, constraints, failed_already, max_depth, depth + 1,
            permitted_depth_extensions, depth_extension, domain, timeout_lp,
            timeout_milp, use_default_heuristic, complete)
        result = result and result2
        if xs2 is not None:
            xs.extend(xs2)
    return result, xs
Пример #17
0
    i += 1
    if label == corr_label:
        prec += 1 
    if i % 100 == 0:
        print( prec , '/', i )
exit()
'''

from clever_wolf import *
import sys
sys.path.insert(0, '../ELINA/python_interface/')
from eran import ERAN
from config import config
config.dyn_krelu = False
config.numproc_krelu = 1
eran = ERAN(cut_model.tf_output, is_onnx=False)
imgLB = np.copy(img)
imgUB = np.copy(img)
label, nn, nlb, nub = eran.analyze_box(imgLB, imgUB, 'deepzono', 1, 1, True)
assert label == cut_model.y_true

cut_model.orig_image = image.copy()

if dataset == 'acasxu' or dataset == 'mortgage':
    corr_label = cut_model.y_true

# Create specLB/UB
if dataset == 'mnist':
    specLB = np.clip(image - epsilon, 0, 1)
    specUB = np.clip(image + epsilon, 0, 1)
elif dataset == 'mortgage':
Пример #18
0
def verify_plain(network_file: str, means: np.ndarray, stds: np.ndarray,
                 input_box: List[Tuple[np.ndarray, np.ndarray]],
                 output_constraints: List[List[Tuple[int, int, float]]],
                 timeout_lp=1, timeout_milp=1, use_default_heuristic=True, use_milp=True, progress_bar=True) \
        -> Optional[Sequence[Tuple[np.ndarray, Tuple[int, int, float], Optional[float]]]]:
    """
    Verifies a neural network. This method also supports convolutional networks,
    but has much lower precision than ``verify_acasxu_style``, because
    it does not split the input bounds.

    Returns None if an no counterexamples could be found and the network could not be verified.
    Returns an empty list if the network could be verified.
    Returns a list of inputs for which the network violates the constraints if the network could not be verified.
    """
    domain = "deeppoly"

    model, is_conv = read_onnx_net(network_file)
    eran = ERAN(model, is_onnx=True)

    specLB = [interval[0] for interval in input_box]
    specUB = [interval[1] for interval in input_box]
    _normalize(specLB, means, stds)
    _normalize(specUB, means, stds)

    counterexample_list = []
    # adex_holds stores whether x_adex (below) is actually a counterexample
    # if adex_holds is True, then x_adex is a spurious counterexample
    adex_holds = True

    with tqdm(total=3, disable=not progress_bar) as progress_bar:
        verified_flag, nn, nlb, nub, _, x_adex = eran.analyze_box(
            specLB, specUB, domain, timeout_lp, timeout_milp,
            use_default_heuristic, output_constraints)
        progress_bar.update()

        if not verified_flag and x_adex is not None:
            adex_holds, _, _, _, _, _ = eran.analyze_box(
                x_adex, x_adex, "deeppoly", timeout_lp, timeout_milp,
                use_default_heuristic, output_constraints)
            if not adex_holds:
                verified_flag = False
                # we need to undo the input normalisation, that was applied to the counterexamples
                counterexample_list.append((np.array(x_adex) * stds + means,
                                            output_constraints, None))
        progress_bar.update()

        # try to find a counterexample with milp solving
        if not verified_flag and adex_holds and use_milp:
            verified_flag, adv_examples, adv_values, adv_violated_constraints = \
                verify_network_with_milp(nn, specLB, specUB, nlb, nub, output_constraints)
            if not verified_flag:
                # compare with milp solving code above
                if adv_examples is not None:
                    for adv_image, violated_constraint, adv_value in \
                            zip(adv_examples, adv_violated_constraints, adv_values):
                        hold, _, nlb, nub, _, x = eran.analyze_box(
                            adv_image, adv_image, domain, timeout_lp,
                            timeout_milp, use_default_heuristic,
                            output_constraints)
                        if not hold:
                            info(
                                f"property violated at {adv_image} with violation {adv_value} output_score {nlb[-1]}"
                            )
                        else:
                            info(
                                f"spurious counterexample found: {adv_image} with violation {adv_value} "
                                f"output_score {nlb[-1]}")
                        counterexample_list.append(
                            (np.array(adv_image) * stds + means,
                             violated_constraint, adv_value))
        progress_bar.update()

    if not verified_flag and len(counterexample_list) > 0:
        info(
            f"Property not verified with counterexamples: {counterexample_list}."
        )
        return counterexample_list
    elif not verified_flag:
        info(f"Property not verified without counterexamples.")
        # raise RuntimeError("Property disproven, but no counterexample found.")
        return None
    else:
        info(f"Property verified.")
        return []
Пример #19
0
    print("netname ", netname, " specnumber ", specnumber, " domain ", domain,
          " dataset ", dataset, "args complete ", args.complete, " complete ",
          complete, " timeout_lp ", args.timeout_lp)
else:
    print("netname ", netname, " epsilon ", epsilon, " domain ", domain,
          " dataset ", dataset, "args complete ", args.complete, " complete ",
          complete, " timeout_lp ", args.timeout_lp)
if (is_saved_tf_model):
    netfolder = os.path.dirname(netname)

    tf.logging.set_verbosity(tf.logging.ERROR)

    sess = tf.Session()
    saver = tf.train.import_meta_graph(netname)
    saver.restore(sess, tf.train.latest_checkpoint(netfolder + '/'))
    eran = ERAN(sess.graph.get_tensor_by_name('logits:0'), sess)

else:
    if (dataset == 'mnist'):
        num_pixels = 784
    elif (dataset == 'cifar10'):
        num_pixels = 3072
    else:
        num_pixels = 5
    model, is_conv, means, stds = read_net(netname, num_pixels,
                                           is_trained_with_pytorch)
    eran = ERAN(model)

correctly_classified_images = 0
verified_images = 0
total_images = 0
Пример #20
0
                saver = tf.train.import_meta_graph(netname)
                saver.restore(sess, tf.train.latest_checkpoint(netfolder+'/'))
            else:
                with tf.gfile.GFile(netname, "rb") as f:
                    graph_def = tf.GraphDef()
                    graph_def.ParseFromString(f.read())
                    sess.graph.as_default()
                    tf.graph_util.import_graph_def(graph_def, name='')

            ops = sess.graph.get_operations()
            last_layer_index = -1
            while ops[last_layer_index].type in non_layer_operation_types:
                last_layer_index -= 1
            out_tensor = sess.graph.get_tensor_by_name(ops[last_layer_index].name + ':0')
            try:
                eran = ERAN(out_tensor, sess)
            except Exception as e:
                tested_file.write(', '.join([dataset, network, 'ERAN parse error message: ' + str(e)]) + '\n')
                tested_file.flush()
                continue

        else:
            if(dataset=='mnist'):
                num_pixels = 784
            elif (dataset=='cifar10'):
                num_pixels = 3072
            elif(dataset=='acasxu'):
                num_pixels = 5
            if is_onnx:
                model, is_conv = read_onnx_net(netname)
                # this is a hack and should be done nicer
Пример #21
0
 def __init__(self, model):
     self.eran = ERAN(model=model, is_onnx=True)
Пример #22
0
class EranVerifier:
    """Wrapper object for the ERAN verifier to have a clean interface with proper types."""
    __TIMEOUT_LP = 1_000_000
    __TIMEOUT_MILP = 1_000_000

    def __init__(self, model):
        self.eran = ERAN(model=model, is_onnx=True)

    def analyze_classification_box(self, bounds: Interval):
        (dominant_class, _, nlb, nub,
         _) = self.eran.analyze_box(specLB=bounds.lower_bound,
                                    specUB=bounds.upper_bound,
                                    domain='deeppoly',
                                    timeout_lp=EranVerifier.__TIMEOUT_LP,
                                    timeout_milp=EranVerifier.__TIMEOUT_MILP,
                                    use_default_heuristic=True,
                                    testing=True)
        return dominant_class, nlb, nub

    def analyze_classification_linear(self, bounds: Interval,
                                      constraints: LinearBounds,
                                      params: List[Interval]):
        res = self.__eran_resources(bounds, constraints, params)

        (dominant_class, _, nlb, nub,
         _) = self.eran.analyze_box(specLB=res.lower_bound,
                                    specUB=res.upper_bound,
                                    domain='deeppoly',
                                    timeout_lp=EranVerifier.__TIMEOUT_LP,
                                    timeout_milp=EranVerifier.__TIMEOUT_MILP,
                                    lexpr_weights=res.lexpr_weights,
                                    lexpr_cst=res.lexpr_cst,
                                    lexpr_dim=res.lexpr_dim,
                                    uexpr_weights=res.uexpr_weights,
                                    uexpr_cst=res.uexpr_cst,
                                    uexpr_dim=res.uexpr_dim,
                                    expr_size=res.expr_size,
                                    use_default_heuristic=True,
                                    testing=True)
        return dominant_class, nlb, nub

    def analyze_segmentation_box(self, bounds: Interval,
                                 correct_labels: np.ndarray,
                                 valid_labels: np.ndarray,
                                 num_total_classes: int):
        (dominant_classes, nlb,
         nub) = EranVerifier.__analyze_segmentation_eran(
             eran=self.eran,
             true_labels=correct_labels,
             valid_classes=valid_labels,
             num_total_classes=num_total_classes,
             specLB=bounds.lower_bound,
             specUB=bounds.upper_bound,
             domain='deeppoly',
             timeout_lp=EranVerifier.__TIMEOUT_LP,
             timeout_milp=EranVerifier.__TIMEOUT_MILP,
             use_default_heuristic=True,
             testing=True)
        return dominant_classes, nlb, nub

    def analyze_segmentation_linear(self, bounds: Interval,
                                    constraints: LinearBounds,
                                    params: List[Interval],
                                    correct_labels: np.ndarray,
                                    valid_labels: np.ndarray,
                                    num_total_classes: int):
        res = self.__eran_resources(bounds, constraints, params)

        (dominant_classes, nlb,
         nub) = EranVerifier.__analyze_segmentation_eran(
             eran=self.eran,
             true_labels=correct_labels,
             valid_classes=valid_labels,
             num_total_classes=num_total_classes,
             specLB=res.lower_bound,
             specUB=res.upper_bound,
             domain='deeppoly',
             timeout_lp=EranVerifier.__TIMEOUT_LP,
             timeout_milp=EranVerifier.__TIMEOUT_MILP,
             lexpr_weights=res.lexpr_weights,
             lexpr_cst=res.lexpr_cst,
             lexpr_dim=res.lexpr_dim,
             uexpr_weights=res.uexpr_weights,
             uexpr_cst=res.uexpr_cst,
             uexpr_dim=res.uexpr_dim,
             expr_size=res.expr_size,
             use_default_heuristic=True,
             testing=True)
        return dominant_classes, nlb, nub

    # adapted from ERAN/tf_verify/ERAN to support segmentation
    @staticmethod
    def __analyze_segmentation_eran(eran: ERAN,
                                    true_labels,
                                    valid_classes,
                                    num_total_classes,
                                    specLB,
                                    specUB,
                                    domain,
                                    timeout_lp,
                                    timeout_milp,
                                    use_default_heuristic,
                                    lexpr_weights=None,
                                    lexpr_cst=None,
                                    lexpr_dim=None,
                                    uexpr_weights=None,
                                    uexpr_cst=None,
                                    uexpr_dim=None,
                                    expr_size=0,
                                    testing=False,
                                    prop=-1,
                                    spatial_constraints=None):
        assert domain == "deeppoly", "domain isn't valid, must be 'deeppoly'"
        specLB = np.reshape(specLB, (-1, ))
        specUB = np.reshape(specUB, (-1, ))
        eran.nn = layers()
        eran.nn.specLB = specLB
        eran.nn.specUB = specUB

        execute_list, output_info = eran.optimizer.get_deeppoly(
            eran.nn, specLB, specUB, lexpr_weights, lexpr_cst, lexpr_dim,
            uexpr_weights, uexpr_cst, uexpr_dim, expr_size,
            spatial_constraints)
        analyzer = Analyzer(execute_list, eran.nn, domain, timeout_lp,
                            timeout_milp, None, use_default_heuristic, -1,
                            prop, testing)
        dominant_classes, nlb, nub = EranVerifier.__analyze_segmentation_analyzer(
            analyzer, true_labels, valid_classes, num_total_classes)
        return dominant_classes, nlb, nub

    # adapted from ERAN/tf_verify/Analyzer to support segmentation
    @staticmethod
    def __analyze_segmentation_analyzer(analyzer: Analyzer, true_labels,
                                        valid_classes, num_classes: int):

        element, nlb, nub = analyzer.get_abstract0()

        number_points = len(true_labels)
        dominant_classes = []

        for n in range(number_points):
            dominant_class = -1
            label_failed = []
            candidate_labels = valid_classes
            true_label = true_labels[n]

            certified = True
            for candidate_label in candidate_labels:
                point_offset = n * num_classes
                if candidate_label != true_label and not analyzer.is_greater(
                        analyzer.man, element, point_offset + true_label,
                        point_offset + candidate_label,
                        analyzer.use_default_heuristic):
                    certified = False
                    label_failed.append(candidate_label)
                    break
            if certified:
                dominant_class = true_label
            dominant_classes.append(dominant_class)

        elina_abstract0_free(analyzer.man, element)
        return dominant_classes, nlb, nub

    @staticmethod
    def __eran_resources(bounds: Interval, constraints: LinearBounds,
                         params: List[Interval]) -> EranResources:
        res = EranResources()

        res.lower_bound = np.append(bounds.lower_bound,
                                    [p.lower_bound for p in params])
        res.upper_bound = np.append(bounds.upper_bound,
                                    [p.upper_bound for p in params])

        expr_size = len(params)
        res.expr_size = expr_size
        num_indices = len(res.lower_bound.flatten())
        param_indices = np.arange(num_indices - expr_size, num_indices)

        res.lexpr_weights = np.append(constraints.lower_slope.flatten(),
                                      np.zeros(expr_size * expr_size))
        res.uexpr_weights = np.append(constraints.upper_slope.flatten(),
                                      np.zeros(expr_size * expr_size))

        res.lexpr_cst = np.append(constraints.lower_offset.flatten(),
                                  [p.lower_bound for p in params])
        res.uexpr_cst = np.append(constraints.upper_offset.flatten(),
                                  [p.upper_bound for p in params])

        res.lexpr_dim = np.repeat([param_indices], num_indices,
                                  axis=0).flatten()
        res.uexpr_dim = np.repeat([param_indices], num_indices,
                                  axis=0).flatten()

        assert len(res.lower_bound) == num_indices
        assert len(res.upper_bound) == num_indices
        assert len(res.lexpr_weights) == num_indices * expr_size
        assert len(res.uexpr_weights) == num_indices * expr_size
        assert len(res.lexpr_cst) == num_indices
        assert len(res.uexpr_cst) == num_indices
        assert len(res.lexpr_dim) == num_indices * expr_size
        assert len(res.uexpr_dim) == num_indices * expr_size
        return res
Пример #23
0
    exit(1)


is_conv = False
mean = 0
std = 0

if(is_saved_tf_model):
    netfolder = os.path.dirname(netname) 

    tf.logging.set_verbosity(tf.logging.ERROR)

    sess = tf.Session()
    saver = tf.train.import_meta_graph(netname)
    saver.restore(sess, tf.train.latest_checkpoint(netfolder+'/'))
    eran = ERAN(sess.graph.get_tensor_by_name('logits:0'), sess)    

else:
    if(dataset=='mnist'):
        num_pixels = 784
    else:
        num_pixels = 3072

    model, is_conv, means, stds = read_net(netname, num_pixels, is_trained_with_pytorch)
    eran = ERAN(model)

correctly_classified_images = 0
verified_images = 0
total_images = 0

def normalize(image, means, stds):
Пример #24
0
    sess = tf.Session()
    if is_saved_tf_model:
        saver = tf.train.import_meta_graph(netname)
        saver.restore(sess, tf.train.latest_checkpoint(netfolder + '/'))
    else:
        with tf.gfile.GFile(netname, "rb") as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            sess.graph.as_default()
            tf.graph_util.import_graph_def(graph_def, name='')
    ops = sess.graph.get_operations()
    last_layer_index = -1
    while ops[last_layer_index].type in non_layer_operation_types:
        last_layer_index -= 1
    eran = ERAN(
        sess.graph.get_tensor_by_name(ops[last_layer_index].name + ':0'), sess)

else:
    if (zonotope_bool == True):
        num_pixels = len(zonotope)
    elif (dataset == 'mnist'):
        num_pixels = 784
    elif (dataset == 'cifar10'):
        num_pixels = 3072
    elif (dataset == 'acasxu'):
        num_pixels = 5
    if is_onnx:
        model, _ = read_onnx_net(netname)
        # this is to have different defaults for mnist and cifar10
    else:
        model, _, means, stds = read_tensorflow_net(netname, num_pixels,
Пример #25
0
def clever_wolf(nn, cut_model, y_true, y_tar, specLB, specUB, domain, args):
    clever_start_time = time.time()
    if cut_model.y_tar == None:
        data, lb, ub = generate_initial_region_PGD(y_tar, 250)
        reset_pool((specLB, specUB))
        update_target_pool((y_true, y_tar))
        cut_model.update_target(y_true, y_tar)
        cut_model.reset_model(specLB, specUB)
        succ_attacks = data.shape[0]
        all_attacks = 250
        if not args.nowolf:
            samples = cut_model.sample_poly_under(250)
            pos_ex = sample_wolf_attacks(samples, 'pos_brent')
            succ_attacks += pos_ex.shape[0]
            all_attacks += 250
        print('Target', y_tar, succ_attacks, '/', all_attacks)
        if succ_attacks > 0:
            data, lb, ub = generate_initial_region_PGD(y_tar, 5000)
            reset_pool((specLB, specUB))
            update_target_pool((y_true, y_tar))
            cut_model.update_target(y_true, y_tar)
            cut_model.reset_model(specLB, specUB)

            if not args.nowolf:
                samples = cut_model.sample_poly_under(5000)
                pos_ex = sample_wolf_attacks(samples, 'pos_brent')
                if not pos_ex.shape[0] == 0:
                    data = np.concatenate((data, pos_ex))
            lb = np.min(data, axis=0)
            ub = np.max(data, axis=0)

            cut_model.update_bounds(lb, ub)
            cut_model.set_data(data)
            update_pool((lb, ub))
        else:
            return False
        s = time.time()
        config.dyn_krelu = False
        config.use_3relu = False
        config.use_2relu = False
        config.numproc_krelu = 24
        eran = ERAN(cut_model.tf_output, is_onnx=False)
        label, nn, nlb, nub = eran.analyze_box(lb, ub, 'deeppoly', 1, 1, True)
        print('Label:', label, 'Time:', time.time() - s, 's')
        if label == -1:
            cut_model.nlb = [np.array(lb) for lb in nlb]
            cut_model.nub = [np.array(ub) for ub in nub]
            lb, ub = cut_model.overapprox_box()
            cut_model.nlb.insert(0, lb)
            cut_model.nub.insert(0, ub)
        else:
            cut_model.save(model_name)
            print('Verified, time:', int(time.time() - clever_start_time))
            return True
    print('Init model')
    if args.obox_approx:
        cut_model.approx_obox = True
    process = psutil.Process(os.getpid())
    start_lp_sampling = args.nowolf
    method = None
    res = None
    hist = History(cut_model.input_size, cut_hist_size=5, update_hist_every=2)
    lp_params = (nn, domain, y_tar)
    wolf_params = [1000]
    for cut in range(args.max_cuts):

        sys.stdout.flush()

        print_vol(cut_model)

        gc.collect()
        print('\nCut:', cut, ', Time:', int(time.time() - clever_start_time),
              'sec,', 'Target:', y_tar, ',X_size', cut_model.data_size,
              ',Memory:',
              process.memory_info().rss / (1024 * 1024), '\n')

        if cut % args.save_every == 0:
            cut_model.save(model_name)

        if not start_lp_sampling and cut % args.choose_criterea_every == 0:
            method = choose_method(cut_model, hist, lp_params, wolf_params)
            print('Chosen new method:', method)
            if method == None:
                cut_model.save(model_name)
                print('Verified, time:', int(time.time() - clever_start_time))
                return True

        if not start_lp_sampling and method == 'Wolf':
            res = wolf_cut(cut_model, hist, *wolf_params)

        if start_lp_sampling or res == False or method == 'LP':
            if not start_lp_sampling and not method == 'LP':
                start_lp_sampling = start_lp_sampling or res == False
            verified, _ = lp_cut(cut_model, hist, *lp_params)

            if verified:
                cut_model.save(model_name)
                print('Verified, time:', int(time.time() - clever_start_time))
                return True

    cut_model.shrink_poly(nn, domain, y_tar)
    cut_model.save(model_name)
    print_vol(cut_model)
    print('Verified, time:', int(time.time() - clever_start_time))
    return True
    sess = tf.Session()
    if is_saved_tf_model:
        saver = tf.train.import_meta_graph(netname)
        saver.restore(sess, tf.train.latest_checkpoint(netfolder + '/'))
    else:
        with tf.gfile.GFile(netname, "rb") as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            sess.graph.as_default()
            tf.graph_util.import_graph_def(graph_def, name='')
    ops = sess.graph.get_operations()
    last_layer_index = -1
    while ops[last_layer_index].type in non_layer_operation_types:
        last_layer_index -= 1
    eran = ERAN(
        sess.graph.get_tensor_by_name(ops[last_layer_index].name + ':0'), sess)

else:
    if (zonotope_bool == True):
        num_pixels = len(zonotope)
    elif (dataset == 'mnist'):
        num_pixels = 784
    elif (dataset == 'cifar10'):
        num_pixels = 3072
    elif (dataset == 'acasxu'):
        num_pixels = 5
    if is_onnx:
        model, is_conv = read_onnx_net(netname)
        # this is to have different defaults for mnist and cifar10
    else:
        model, is_conv, means, stds = read_tensorflow_net(
Пример #27
0
                              tf.train.latest_checkpoint(netfolder + '/'))
            else:
                with tf.gfile.GFile(netname, "rb") as f:
                    graph_def = tf.GraphDef()
                    graph_def.ParseFromString(f.read())
                    sess.graph.as_default()
                    tf.graph_util.import_graph_def(graph_def, name='')

            ops = sess.graph.get_operations()
            last_layer_index = -1
            while ops[last_layer_index].type in non_layer_operation_types:
                last_layer_index -= 1
            out_tensor = sess.graph.get_tensor_by_name(
                ops[last_layer_index].name + ':0')
            try:
                eran = ERAN(out_tensor, sess)
            except Exception as e:
                tested_file.write(', '.join([
                    dataset, network, 'ERAN parse error trace: ' +
                    traceback.format_exc()
                ]) + '\n\n\n')
                tested_file.flush()
                continue

        else:
            if (dataset == 'mnist'):
                num_pixels = 784
            elif (dataset == 'cifar10'):
                num_pixels = 3072
            elif (dataset == 'acasxu'):
                num_pixels = 5
Пример #28
0
    sess = tf.Session()
    if is_saved_tf_model:
        saver = tf.train.import_meta_graph(netname)
        saver.restore(sess, tf.train.latest_checkpoint(netfolder+'/'))
    else:
        with tf.gfile.GFile(netname, "rb") as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            sess.graph.as_default()
            tf.graph_util.import_graph_def(graph_def, name='')
    ops = sess.graph.get_operations()
    last_layer_index = -1
    while ops[last_layer_index].type in non_layer_operation_types:
        last_layer_index -= 1
    model = sess.graph.get_tensor_by_name(ops[last_layer_index].name + ':0')
    eran = ERAN(model, sess)

else:
    if(zonotope_bool==True):
        num_pixels = len(zonotope)
    elif(dataset=='mnist'):
        num_pixels = 784
    elif (dataset=='cifar10'):
        num_pixels = 3072
    elif(dataset=='acasxu'):
        num_pixels = 5
    if is_onnx:
        model, is_conv = read_onnx_net(netname)
    else:
        model, is_conv, means, stds = read_tensorflow_net(netname, num_pixels, is_trained_with_pytorch)
    eran = ERAN(model, is_onnx=is_onnx)
Пример #29
0
def clever_wolf(nn, cut_model, y_true, y_tar, specLB, specUB, domain, args,
                pgd_args1, pgd_args2):
    try:
        delattr(cut_model, 'shrink_box_best')
    except:
        pass
    clever_start_time = time.time()
    if cut_model.y_tar == None:
        reset_pool((specLB, specUB))
        update_target_pool((y_true, y_tar))
        data, _, lb, ub = generate_initial_region_PGD(250, *pgd_args1)
        cut_model.update_target(y_true, y_tar)
        cut_model.reset_model(specLB, specUB)
        succ_attacks = data.shape[0]
        all_attacks = 250
        if not args.nowolf:
            samples = cut_model.sample_poly_under(250)
            pos_ex = sample_wolf_attacks(samples, 'pos_brent')
            succ_attacks += pos_ex.shape[0]
            all_attacks += 250

        data2, _, lb2, ub2 = generate_initial_region_PGD(250, *pgd_args2)
        succ_attacks += data2.shape[0]
        all_attacks += 250
        print('Target', y_tar, succ_attacks, '/', all_attacks)
        if succ_attacks > 0:
            reset_pool((specLB, specUB))
            update_target_pool((y_true, y_tar))
            cut_model.update_target(y_true, y_tar)
            cut_model.reset_model(specLB, specUB)

            data, bounds, lb, ub = generate_initial_region_PGD(
                2500, *pgd_args1)
            data2, bounds2, lb2, ub2 = generate_initial_region_PGD(
                2500, *pgd_args2)
            if lb is None and lb2 is None:
                data = np.zeros((0, cut_model.input_size))
                bounds = np.zeros(0)
            elif lb is None:
                data = data2
                bounds = bounds2
            elif not lb2 is None:
                data = np.concatenate((data, data2))
                bounds = np.concatenate((bounds, bounds2))

            if not args.nowolf:
                samples = cut_model.sample_poly_under(5000)
                pos_ex = sample_wolf_attacks(samples, 'pos_brent')
                if not pos_ex.shape[0] == 0:
                    data = np.concatenate((data, pos_ex))

            if data.shape[0] == 0:
                return False

            lb = np.min(data, axis=0)
            ub = np.max(data, axis=0)

            cut_model.update_bounds(lb, ub)
            cut_model.set_data(data)
            cut_model.bounds = bounds
            update_pool((lb, ub))
        else:
            return False
        s = time.time()
        config.dyn_krelu = False
        config.use_3relu = False
        config.use_2relu = False
        config.numproc_krelu = 24

        norm_ser = dill.dumps(normalize)
        denorm_ser = dill.dumps(denormalize)
        cut_model.norm_func = norm_ser
        cut_model.denorm_func = denorm_ser
        cut_model.dataset = dataset
        cut_model.is_conv = is_conv
        cut_model.norm_args = (means, stds)
        lb, ub = cut_model.overapprox_box()
        cut_model.orig_over_lb = lb
        cut_model.orig_over_ub = ub

        eran = ERAN(cut_model.tf_output, is_onnx=False)
        label, nn, nlb, nub = eran.analyze_box(lb, ub, 'deeppoly', 1, 1, True)
        print('Label:', label, 'Time:', time.time() - s, 's')
        if label == -1:
            cut_model.nlb = [np.array(lb) for lb in nlb]
            cut_model.nub = [np.array(ub) for ub in nub]
            lb, ub = cut_model.overapprox_box()
            cut_model.nlb.insert(0, lb)
            cut_model.nub.insert(0, ub)
        else:
            cut_model.shrink_box_best = cut_model.overapprox_box()
            cut_model.save(model_name)
            print('Verified, time:', int(time.time() - clever_start_time))
            print_vol(cut_model)
            return True
    print('Init model')
    if args.obox_approx:
        cut_model.approx_obox = True
    process = psutil.Process(os.getpid())
    start_lp_sampling = args.nowolf
    method = None
    res = None
    lp_params = (nn, domain, y_tar)
    wolf_params = [1000]

    shrink = cut_model.copy()
    if 'shrink_box_best' in dir(shrink):
        del shrink.shrink_box_best

    print_vol(cut_model)
    sys.stdout.flush()
    lbs, ubs, lb_max, ub_max = shrink.create_underapprox_box_lp(
        y_tar, shrink_to=args.obox_init, baseline=args.baseline)
    if args.baseline:
        print('\nBaseline, Time:', int(time.time() - clever_start_time),
              'sec,', 'Target:', y_tar, '\n')
    if not lb_max is None:
        data = cut_model.data
        cut_model.obox = None
        cut_model.ubox = None
        cut_model.update_target(y_true, y_tar)
        cut_model.reset_model(lb_max, ub_max)
        cut_model.update_bounds(lb_max, ub_max)
        cut_model.nlb = [np.array(lb) for lb in nlb]
        cut_model.nub = [np.array(ub) for ub in nub]
        cut_model.nlb.insert(0, lb_max)
        cut_model.nub.insert(0, ub_max)
        cut_model.data = data
    if not lbs is None:
        cut_model.shrink_box_best = (np.array(lbs), np.array(ubs))
    else:
        print('Failed to converge')
        return False

    print_vol(cut_model)
    if args.baseline:
        cut_model.save(model_name, baseline=True)
        return True
    cut_model.p_c = 0.9
    for cut in range(50):

        cut_model.it_cut = cut
        cut_model.p_c *= 0.95
        if cut >= args.max_cuts:
            cut_model.p_c = 0
        sys.stdout.flush()
        '''
        if dataset == 'mortgage':
            project = np.array( [ False ] * cut_model.input_size )
            project[ bounds_keys ] = True
            W, lb, ub = cut_model.denorm_W( project, means, stds )
            lb_sh, ub_sh = cut_model.shrink_box_best[0].copy(), cut_model.shrink_box_best[1].copy()
            denormalize( lb_sh, dataset, means, stds, False )
            denormalize( ub_sh, dataset, means, stds, False )
            lb_sh = lb_sh[bounds_keys] 
            ub_sh = ub_sh[bounds_keys] 
            center = None
            pos_ex = None
            neg_ex = None
            v2 = draw2d_region( W, lb, ub, lb_sh, ub_sh, model_name + '_reg' + str(cut) + '.png', ( bounds_lb, bounds_ub ), center=center, pos_ex=pos_ex, neg_ex=neg_ex, draw_hp=W.shape[0] - 1 )
            print( 'Volume:', v2 )
        '''

        gc.collect()
        print('\nCut:', cut, ', Time:', int(time.time() - clever_start_time),
              'sec,', 'Target:', y_tar, ',Memory:',
              process.memory_info().rss / (1024 * 1024), '\n')

        verified, _ = lp_cut(cut_model, *lp_params)

        if verified:
            if dataset == 'mortgage':
                project = np.array([False] * cut_model.input_size)
                project[bounds_keys] = True
                W, lb, ub = cut_model.denorm_W(project, means, stds)

                lb_sh, ub_sh = cut_model.shrink_box_best[0].copy(
                ), cut_model.shrink_box_best[1].copy()
                denormalize(lb_sh, dataset, means, stds, False)
                denormalize(ub_sh, dataset, means, stds, False)
                lb_sh = lb_sh[bounds_keys]
                ub_sh = ub_sh[bounds_keys]

                center = None
                pos_ex = None
                neg_ex = None
                v2 = draw2d_region(W,
                                   lb,
                                   ub,
                                   lb_sh,
                                   ub_sh,
                                   model_name + '_reg' + str(cut + 1) + '.png',
                                   (bounds_lb, bounds_ub),
                                   center=center,
                                   pos_ex=pos_ex,
                                   neg_ex=neg_ex,
                                   draw_hp=W.shape[0] - 1)
                print('Volume:', v2)
            cut_model.save(model_name)
            print('Verified, time:', int(time.time() - clever_start_time))
            print_vol(cut_model)
            return True
    print('Failed to converge')
    return False
Пример #30
0
def main():
    """Runs the ERAN analysis on the pendulum_continuous model.

    This happens in a few steps:
    1. ERAN doesn't specifically support HardTanh layers, so we translate the
       HardTanh into an equivalent set of ReLU layers using convert_htanh().
    2. We then read the controller network into an ERAN model.
    3. We use ERAN/DeepPoly to extract an abstract value describing the
       network's behavior over the initial set. Module float imprecision
       handling, this abstract value is basically two affine transform A and B
       such that Ax <= f(x) <= Bx for all x in the initial set.
    4. We compute Ax and Bx for a particular point (0.35, 0.35) right on the
       edge of the initial set, and show that the range determined by DeepPoly
       (even after applying a concrete HardTanh at the end) is wide enough to
       mark that point as unsafe even on the first iteration.
    """
    # (1) Translate the model into an equivalent one without HardTanh.
    with_htanh_filename = sys.argv[1]
    no_htanh_filename = "/ovol/pendulum_continuous.no_htanh.eran"
    convert_htanh(with_htanh_filename, no_htanh_filename)
    # (2) Read it into ERAN.
    num_pixels = 2
    model, _, _, _ = read_net(no_htanh_filename, num_pixels, False)
    eran = ERAN(model)

    # (3) Extract an abstract value over the initial set.
    # (3a) Load model and init set into ERAN.
    initLB = np.array([-0.35, -0.35])
    initUB = np.array([0.35, 0.35])

    nn = layers()
    nn.specLB = initLB
    nn.specUB = initUB
    execute_list = eran.optimizer.get_deeppoly(initLB, initUB)
    # NOTE: 9 is just a placeholder specnumber to tell it we're using
    # ACAS.
    analyzer = Analyzer(execute_list, nn, "deeppoly", args["timeout_lp"],
                        args["timeout_milp"], 9, args["use_area_heuristic"])

    # (3b) Perform the analysis and extract the abstract values.
    element, _, _ = analyzer.get_abstract0()

    lexpr = get_lexpr_for_output_neuron(analyzer.man, element, 0)
    uexpr = get_uexpr_for_output_neuron(analyzer.man, element, 0)

    lexpr = np.array(extract_from_expr(lexpr))
    uexpr = np.array(extract_from_expr(uexpr))

    # (3c) Extract the output range for initLB based on the abstract value.
    lower_bound, upper_bound = compute_output_range(initLB, lexpr, uexpr)

    # Apply extra knowledge that -1 <= lower_bound <= upper_bound <= 1.
    lower_bound = max(lower_bound, -1.)
    upper_bound = min(upper_bound, 1.)

    post_lower, post_upper = post_bounds(initLB, lower_bound, upper_bound)
    post_lower, post_upper = post_lower.flatten(), post_upper.flatten()

    lower_safe = np.min(post_lower) >= -0.35
    upper_safe = np.max(post_upper) <= 0.35
    is_safe = lower_safe and upper_safe
    print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
    if not is_safe:
        print("ERAN reported initLB unsafe after the first step.")
    else:
        print("Something changed; ERAN used to report initLB unsafe, but now"
              "it says it's safe.")
    print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")

    elina_abstract0_free(analyzer.man, element)