示例#1
0
def verify(eran_model_path, input, epsilon):
    model, is_conv, means, stds = read_net(eran_model_path, 14, False)
    eran = ERAN(model)

    specLB = np.clip(input - epsilon, 0, 1)
    specUB = np.clip(input + epsilon, 0, 1)
    for i in range(0, 2):
        specLB[i] = input[i]
        specUB[i] = input[i]
    for i in range(2, 4):
        specLB[i] = np.clip(input[i] - epsilon, -1, 1)
        specUB[i] = np.clip(input[i] + epsilon, -1, 1)
    label, nn, _, _ = eran.analyze_box(specLB, specUB, 'deepzono', 1, 1)
    print("lable = {}".format(label))
    return label >= 0
示例#2
0
def verify(dnn: OperationGraph,
           phi: Expression,
           domain="deeppoly",
           timeout_lp=1.0,
           timeout_milp=1.0,
           use_area_heuristic=True,
           **kwargs: Dict[str, Any]):
    logger = logging.getLogger(__name__)
    dnn = dnn.simplify()
    phi.networks[0].concretize(dnn)

    result = UNSAT
    property_extractor = HalfspacePolytopePropertyExtractor(
        HyperRectangle, HalfspacePolytope)
    for prop in property_extractor.extract_from(~phi):
        if prop.input_constraint.num_variables > 1:
            raise ERANTranslatorError(
                "Unsupported network: More than 1 input variable")
        with tf.Session(graph=tf.Graph()) as tf_session:
            layers = as_layers(
                prop.suffixed_op_graph(),
                extra_layer_types=ERAN_LAYER_TYPES,
                translator_error=ERANTranslatorError,
            )
            input_interval = prop.input_constraint

            spec_lb = input_interval.lower_bounds[0]
            spec_ub = input_interval.upper_bounds[0]
            if len(spec_lb.shape) == 4:
                spec_lb = spec_lb.transpose((0, 2, 3, 1))
                spec_ub = spec_ub.transpose((0, 2, 3, 1))
            tf_graph = as_tf(layers, translator_error=ERANTranslatorError)
            eran_model = ERAN(tf_graph, session=tf_session)
            _, nn, nlb, nub = eran_model.analyze_box(spec_lb.flatten().copy(),
                                                     spec_ub.flatten().copy(),
                                                     domain, timeout_lp,
                                                     timeout_milp,
                                                     use_area_heuristic,
                                                     **kwargs)
            output_lower_bound = np.asarray(nlb[-1])
            output_upper_bound = np.asarray(nub[-1])
            logger.debug("output lower bound: %s", output_lower_bound)
            logger.debug("output upper bound: %s", output_upper_bound)
            result |= check(output_lower_bound, output_upper_bound)
        if result == SAT or result == UNKNOWN:
            return result

    return result
示例#3
0
            num_splits = [4, 5, 1, 20, 20]
        else:
            num_splits = [10, 10, 1, 10, 10]
        step_size = []
        for i in range(5):
            step_size.append((specUB[i] - specLB[i]) / num_splits[i])
        #sorted_indices = np.argsort(widths)
        #input_to_split = sorted_indices[0]
        #print("input to split ", input_to_split)

        #step_size = widths/num_splits
        start_val = np.copy(specLB)
        end_val = np.copy(specUB)
        flag = True
        _, nn, _, _ = eran.analyze_box(specLB, specUB, init_domain(domain),
                                       config.timeout_lp, config.timeout_milp,
                                       config.use_default_heuristic,
                                       constraints)
        start = time.time()
        #complete_list = []
        for i in range(num_splits[0]):
            specLB[0] = start_val[0] + i * step_size[0]
            specUB[0] = np.fmin(end_val[0],
                                start_val[0] + (i + 1) * step_size[0])

            for j in range(num_splits[1]):
                specLB[1] = start_val[1] + j * step_size[1]
                specUB[1] = np.fmin(end_val[1],
                                    start_val[1] + (j + 1) * step_size[1])

                for k in range(num_splits[2]):
                    specLB[2] = start_val[2] + k * step_size[2]
示例#4
0
def clever_wolf(nn, cut_model, y_true, y_tar, specLB, specUB, domain, args):
    clever_start_time = time.time()
    if cut_model.y_tar == None:
        data, lb, ub = generate_initial_region_PGD(y_tar, 250)
        reset_pool((specLB, specUB))
        update_target_pool((y_true, y_tar))
        cut_model.update_target(y_true, y_tar)
        cut_model.reset_model(specLB, specUB)
        succ_attacks = data.shape[0]
        all_attacks = 250
        if not args.nowolf:
            samples = cut_model.sample_poly_under(250)
            pos_ex = sample_wolf_attacks(samples, 'pos_brent')
            succ_attacks += pos_ex.shape[0]
            all_attacks += 250
        print('Target', y_tar, succ_attacks, '/', all_attacks)
        if succ_attacks > 0:
            data, lb, ub = generate_initial_region_PGD(y_tar, 5000)
            reset_pool((specLB, specUB))
            update_target_pool((y_true, y_tar))
            cut_model.update_target(y_true, y_tar)
            cut_model.reset_model(specLB, specUB)

            if not args.nowolf:
                samples = cut_model.sample_poly_under(5000)
                pos_ex = sample_wolf_attacks(samples, 'pos_brent')
                if not pos_ex.shape[0] == 0:
                    data = np.concatenate((data, pos_ex))
            lb = np.min(data, axis=0)
            ub = np.max(data, axis=0)

            cut_model.update_bounds(lb, ub)
            cut_model.set_data(data)
            update_pool((lb, ub))
        else:
            return False
        s = time.time()
        config.dyn_krelu = False
        config.use_3relu = False
        config.use_2relu = False
        config.numproc_krelu = 24
        eran = ERAN(cut_model.tf_output, is_onnx=False)
        label, nn, nlb, nub = eran.analyze_box(lb, ub, 'deeppoly', 1, 1, True)
        print('Label:', label, 'Time:', time.time() - s, 's')
        if label == -1:
            cut_model.nlb = [np.array(lb) for lb in nlb]
            cut_model.nub = [np.array(ub) for ub in nub]
            lb, ub = cut_model.overapprox_box()
            cut_model.nlb.insert(0, lb)
            cut_model.nub.insert(0, ub)
        else:
            cut_model.save(model_name)
            print('Verified, time:', int(time.time() - clever_start_time))
            return True
    print('Init model')
    if args.obox_approx:
        cut_model.approx_obox = True
    process = psutil.Process(os.getpid())
    start_lp_sampling = args.nowolf
    method = None
    res = None
    hist = History(cut_model.input_size, cut_hist_size=5, update_hist_every=2)
    lp_params = (nn, domain, y_tar)
    wolf_params = [1000]
    for cut in range(args.max_cuts):

        sys.stdout.flush()

        print_vol(cut_model)

        gc.collect()
        print('\nCut:', cut, ', Time:', int(time.time() - clever_start_time),
              'sec,', 'Target:', y_tar, ',X_size', cut_model.data_size,
              ',Memory:',
              process.memory_info().rss / (1024 * 1024), '\n')

        if cut % args.save_every == 0:
            cut_model.save(model_name)

        if not start_lp_sampling and cut % args.choose_criterea_every == 0:
            method = choose_method(cut_model, hist, lp_params, wolf_params)
            print('Chosen new method:', method)
            if method == None:
                cut_model.save(model_name)
                print('Verified, time:', int(time.time() - clever_start_time))
                return True

        if not start_lp_sampling and method == 'Wolf':
            res = wolf_cut(cut_model, hist, *wolf_params)

        if start_lp_sampling or res == False or method == 'LP':
            if not start_lp_sampling and not method == 'LP':
                start_lp_sampling = start_lp_sampling or res == False
            verified, _ = lp_cut(cut_model, hist, *lp_params)

            if verified:
                cut_model.save(model_name)
                print('Verified, time:', int(time.time() - clever_start_time))
                return True

    cut_model.shrink_poly(nn, domain, y_tar)
    cut_model.save(model_name)
    print_vol(cut_model)
    print('Verified, time:', int(time.time() - clever_start_time))
    return True
示例#5
0
        num_splits = [10, 9, 1, 5, 14]
    else:
        num_splits = [10, 10, 1, 10, 10]
    step_size = []
    for i in range(5):
        step_size.append((specUB[i] - specLB[i]) / num_splits[i])
    #sorted_indices = np.argsort(widths)
    #input_to_split = sorted_indices[0]
    #print("input to split ", input_to_split)

    #step_size = widths/num_splits
    start_val = np.copy(specLB)
    end_val = np.copy(specUB)
    flag = True
    _, nn, _, _ = eran.analyze_box(specLB, specUB, init_domain(domain),
                                   args.timeout_lp, args.timeout_milp,
                                   args.use_area_heuristic, specnumber)
    start = time.time()
    for i in range(num_splits[0]):
        specLB[0] = start_val[0] + i * step_size[0]
        specUB[0] = np.fmin(end_val[0], start_val[0] + (i + 1) * step_size[0])

        for j in range(num_splits[1]):
            specLB[1] = start_val[1] + j * step_size[1]
            specUB[1] = np.fmin(end_val[1],
                                start_val[1] + (j + 1) * step_size[1])

            for k in range(num_splits[2]):
                specLB[2] = start_val[2] + k * step_size[2]
                specUB[2] = np.fmin(end_val[2],
                                    start_val[2] + (k + 1) * step_size[2])
示例#6
0
def verify_plain(network_file: str, means: np.ndarray, stds: np.ndarray,
                 input_box: List[Tuple[np.ndarray, np.ndarray]],
                 output_constraints: List[List[Tuple[int, int, float]]],
                 timeout_lp=1, timeout_milp=1, use_default_heuristic=True, use_milp=True, progress_bar=True) \
        -> Optional[Sequence[Tuple[np.ndarray, Tuple[int, int, float], Optional[float]]]]:
    """
    Verifies a neural network. This method also supports convolutional networks,
    but has much lower precision than ``verify_acasxu_style``, because
    it does not split the input bounds.

    Returns None if an no counterexamples could be found and the network could not be verified.
    Returns an empty list if the network could be verified.
    Returns a list of inputs for which the network violates the constraints if the network could not be verified.
    """
    domain = "deeppoly"

    model, is_conv = read_onnx_net(network_file)
    eran = ERAN(model, is_onnx=True)

    specLB = [interval[0] for interval in input_box]
    specUB = [interval[1] for interval in input_box]
    _normalize(specLB, means, stds)
    _normalize(specUB, means, stds)

    counterexample_list = []
    # adex_holds stores whether x_adex (below) is actually a counterexample
    # if adex_holds is True, then x_adex is a spurious counterexample
    adex_holds = True

    with tqdm(total=3, disable=not progress_bar) as progress_bar:
        verified_flag, nn, nlb, nub, _, x_adex = eran.analyze_box(
            specLB, specUB, domain, timeout_lp, timeout_milp,
            use_default_heuristic, output_constraints)
        progress_bar.update()

        if not verified_flag and x_adex is not None:
            adex_holds, _, _, _, _, _ = eran.analyze_box(
                x_adex, x_adex, "deeppoly", timeout_lp, timeout_milp,
                use_default_heuristic, output_constraints)
            if not adex_holds:
                verified_flag = False
                # we need to undo the input normalisation, that was applied to the counterexamples
                counterexample_list.append((np.array(x_adex) * stds + means,
                                            output_constraints, None))
        progress_bar.update()

        # try to find a counterexample with milp solving
        if not verified_flag and adex_holds and use_milp:
            verified_flag, adv_examples, adv_values, adv_violated_constraints = \
                verify_network_with_milp(nn, specLB, specUB, nlb, nub, output_constraints)
            if not verified_flag:
                # compare with milp solving code above
                if adv_examples is not None:
                    for adv_image, violated_constraint, adv_value in \
                            zip(adv_examples, adv_violated_constraints, adv_values):
                        hold, _, nlb, nub, _, x = eran.analyze_box(
                            adv_image, adv_image, domain, timeout_lp,
                            timeout_milp, use_default_heuristic,
                            output_constraints)
                        if not hold:
                            info(
                                f"property violated at {adv_image} with violation {adv_value} output_score {nlb[-1]}"
                            )
                        else:
                            info(
                                f"spurious counterexample found: {adv_image} with violation {adv_value} "
                                f"output_score {nlb[-1]}")
                        counterexample_list.append(
                            (np.array(adv_image) * stds + means,
                             violated_constraint, adv_value))
        progress_bar.update()

    if not verified_flag and len(counterexample_list) > 0:
        info(
            f"Property not verified with counterexamples: {counterexample_list}."
        )
        return counterexample_list
    elif not verified_flag:
        info(f"Property not verified without counterexamples.")
        # raise RuntimeError("Property disproven, but no counterexample found.")
        return None
    else:
        info(f"Property verified.")
        return []
示例#7
0
def main():
    parser = argparse.ArgumentParser(description='Analyze NN.')
    parser.add_argument('--net', type=str, help='Neural network to analyze')
    parser.add_argument('--dataset', type=str, default='mnist', help='Dataset')
    parser.add_argument('--data_dir',
                        type=str,
                        help='Directory which contains data')
    parser.add_argument('--data_root',
                        type=str,
                        help='Directory which contains data')
    parser.add_argument('--num_params',
                        type=int,
                        default=0,
                        help='Number of transformation parameters')
    parser.add_argument('--num_tests',
                        type=int,
                        default=None,
                        help='Number of images to test')
    parser.add_argument('--from_test',
                        type=int,
                        default=0,
                        help='Number of images to test')
    parser.add_argument('--test_idx',
                        type=int,
                        default=None,
                        help='Index to test')
    parser.add_argument('--debug',
                        action='store_true',
                        help='Whether to display debug info')
    parser.add_argument('--attack',
                        action='store_true',
                        help='Whether to attack')
    parser.add_argument('--timeout_lp',
                        type=float,
                        default=1,
                        help='timeout for the LP solver')
    parser.add_argument('--timeout_milp',
                        type=float,
                        default=1,
                        help='timeout for the MILP solver')
    parser.add_argument(
        '--use_area_heuristic',
        type=str2bool,
        default=True,
        help='whether to use area heuristic for the DeepPoly ReLU approximation'
    )
    args = parser.parse_args()

    #LRJ add
    deepsymbol_input_folder = args.data_dir + '/Batch_Input_DeepG'
    #print(deepsymbol_input_folder)
    if not os.path.exists(deepsymbol_input_folder):
        os.makedirs(deepsymbol_input_folder)
    else:
        for root, dirs, files in os.walk(deepsymbol_input_folder):
            for name in files:
                if name.endswith('.in'):
                    os.remove(os.path.join(root, name))
                    #print("delete "+os.path.join(root,name))

    global n_rows, n_cols, n_channels
    if args.dataset == 'cifar10':
        n_rows, n_cols, n_channels = 32, 32, 3
    else:
        n_rows, n_cols, n_channels = 28, 28, 1

    filename, file_extension = os.path.splitext(args.net)

    is_trained_with_pytorch = False
    is_saved_tf_model = False

    if (file_extension == ".net" or file_extension == ".pyt"):
        is_trained_with_pytorch = True
    elif (file_extension == ".meta"):
        is_saved_tf_model = True
    elif (file_extension != ".tf"):
        print("file extension not supported")
        exit(1)

    is_conv = False

    if (is_saved_tf_model):
        netfolder = os.path.dirname(args.net)

        tf.logging.set_verbosity(tf.logging.ERROR)

        sess = tf.Session()
        saver = tf.train.import_meta_graph(args.net)
        saver.restore(sess, tf.train.latest_checkpoint(netfolder + '/'))
        eran = ERAN(sess.graph.get_tensor_by_name('logits:0'), sess)
    else:
        if args.dataset == 'mnist' or args.dataset == 'fashion':
            num_pixels = 784
        else:
            num_pixels = 3072
        model, is_conv, means, stds = read_net(args.net, num_pixels,
                                               is_trained_with_pytorch)
        eran = ERAN(model)

    csvfile = open('../../code/datasets/{}_test.csv'.format(args.dataset), 'r')
    tests = csv.reader(csvfile, delimiter=',')

    total, attacked, standard_correct, tot_time = 0, 0, 0, 0
    correct_box, correct_poly = 0, 0
    cver_box, cver_poly = [], []

    for i, test in enumerate(tests):
        if args.test_idx is not None and i != args.test_idx:
            continue

        attacks_file = os.path.join(args.data_dir, 'attack_{}.csv'.format(i))
        if args.num_tests is not None and i >= args.num_tests:
            break
        print('Test {}:'.format(i))

        #LRJ add
        current_test = i

        if args.dataset == 'mnist' or args.dataset == 'fashion':
            image = np.float64(test[1:len(test)])
        else:
            if is_trained_with_pytorch:
                image = np.float64(test[1:len(test)])
            else:
                image = np.float64(test[1:len(test)]) - 0.5

        spec_lb = np.copy(image)
        spec_ub = np.copy(image)

        if (is_trained_with_pytorch):
            normalize(spec_lb, means, stds, args.dataset, is_conv)
            normalize(spec_ub, means, stds, args.dataset, is_conv)

        label, nn, nlb, nub = eran.analyze_box(spec_lb, spec_ub, 'deeppoly',
                                               args.timeout_lp,
                                               args.timeout_milp,
                                               args.use_area_heuristic)
        print('Label: ', label)

        if label != int(test[0]):
            print('Label {}, but true label is {}, skipping...'.format(
                label, int(test[0])))
            print('Standard accuracy: {} percent'.format(standard_correct /
                                                         float(i + 1) * 100))
            continue
        else:
            standard_correct += 1
            print('Standard accuracy: {} percent'.format(standard_correct /
                                                         float(i + 1) * 100))

        dim = n_rows * n_cols * n_channels

        ok_box, ok_poly = True, True
        k = args.num_params + 1 + 1 + dim

        attack_imgs, checked, attack_pass = [], [], 0
        cex_found = False
        if args.attack:
            with open(attacks_file, 'r') as fin:
                lines = fin.readlines()
                for j in range(0, len(lines), args.num_params + 1):
                    params = [
                        float(line[:-1])
                        for line in lines[j:j + args.num_params]
                    ]
                    tokens = lines[j + args.num_params].split(',')
                    values = np.array(list(map(float, tokens)))

                    attack_lb = values[::2]
                    attack_ub = values[1::2]

                    if is_trained_with_pytorch:
                        normalize(attack_lb, means, stds, args.dataset,
                                  is_conv)
                        normalize(attack_ub, means, stds, args.dataset,
                                  is_conv)
                    else:
                        attack_lb -= 0.5
                        attack_ub -= 0.5
                    attack_imgs.append((params, attack_lb, attack_ub))
                    checked.append(False)

                    predict_label, _, _, _ = eran.analyze_box(
                        attack_lb[:dim], attack_ub[:dim], 'deeppoly',
                        args.timeout_lp, args.timeout_milp,
                        args.use_area_heuristic, 0)
                    if predict_label != int(test[0]):
                        print('counter-example, params: ', params,
                              ', predicted label: ', predict_label)
                        cex_found = True
                        break
                    else:
                        attack_pass += 1
        print('tot attacks: ', len(attack_imgs))
        specs_file = os.path.join(args.data_dir, '{}.csv'.format(i))
        #LRJ add
        spec_lb_all = []
        spec_ub_all = []
        begtime = time.time()
        with open(specs_file, 'r') as fin:
            lines = fin.readlines()
            print('Number of lines: ', len(lines))
            assert len(lines) % k == 0

            spec_lb = np.zeros(args.num_params + dim)
            spec_ub = np.zeros(args.num_params + dim)

            expr_size = args.num_params
            lexpr_cst, uexpr_cst = [], []
            lexpr_weights, uexpr_weights = [], []
            lexpr_dim, uexpr_dim = [], []

            ver_chunks_box, ver_chunks_poly, tot_chunks = 0, 0, 0

            for i, line in enumerate(lines):
                if i % k < args.num_params:
                    # read specs for the parameters
                    values = np.array(list(map(float, line[:-1].split(' '))))
                    assert values.shape[0] == 2
                    param_idx = i % k
                    spec_lb[dim + param_idx] = values[0]
                    spec_ub[dim + param_idx] = values[1]
                    if args.debug:
                        print('parameter %d: [%.4f, %.4f]' %
                              (param_idx, values[0], values[1]))
                elif i % k == args.num_params:
                    # read interval bounds for image pixels
                    values = np.array(list(map(float, line[:-1].split(','))))
                    #print(values)
                    #print(len(values))
                    spec_lb[:dim] = values[::2]
                    spec_ub[:dim] = values[1::2]
                    # if args.debug:
                    #     show_ascii_spec(spec_lb, spec_ub)
                elif i % k < k - 1:
                    # read polyhedra constraints for image pixels
                    tokens = line[:-1].split(' ')
                    assert len(tokens) == 2 + 2 * args.num_params + 1

                    bias_lower, weights_lower = float(tokens[0]), list(
                        map(float, tokens[1:1 + args.num_params]))
                    assert tokens[args.num_params + 1] == '|'
                    bias_upper, weights_upper = float(
                        tokens[args.num_params + 2]), list(
                            map(float, tokens[3 + args.num_params:]))

                    assert len(weights_lower) == args.num_params
                    assert len(weights_upper) == args.num_params

                    lexpr_cst.append(bias_lower)
                    uexpr_cst.append(bias_upper)
                    for j in range(args.num_params):
                        lexpr_dim.append(dim + j)
                        uexpr_dim.append(dim + j)
                        lexpr_weights.append(weights_lower[j])
                        uexpr_weights.append(weights_upper[j])
                else:
                    assert (line == 'SPEC_FINISHED\n')
                    for p_idx in range(args.num_params):
                        lexpr_cst.append(spec_lb[dim + p_idx])
                        for l in range(args.num_params):
                            lexpr_weights.append(0)
                            lexpr_dim.append(dim + l)
                        uexpr_cst.append(spec_ub[dim + p_idx])
                        for l in range(args.num_params):
                            uexpr_weights.append(0)
                            uexpr_dim.append(dim + l)
                    #LRJ Add Data beform normalize
                    if len(spec_lb_all):
                        spec_lb_all = np.minimum(spec_lb_all, spec_lb[:dim])
                    else:
                        spec_lb_all = list(spec_lb[:dim])

                    if len(spec_ub_all):
                        spec_ub_all = np.maximum(spec_ub_all, spec_ub[:dim])
                    else:
                        spec_ub_all = list(spec_ub[:dim])
                    #print(spec_lb_all)

                    if (is_trained_with_pytorch):
                        normalize(spec_lb[:dim], means, stds, args.dataset,
                                  is_conv)
                        normalize(spec_ub[:dim], means, stds, args.dataset,
                                  is_conv)
                    normalize_poly(args.num_params, lexpr_cst, lexpr_weights,
                                   lexpr_dim, uexpr_cst, uexpr_weights,
                                   uexpr_dim, means, stds, args.dataset)

                    for attack_idx, (attack_params, attack_lb,
                                     attack_ub) in enumerate(attack_imgs):
                        ok_attack = True
                        for j in range(num_pixels):
                            low, up = lexpr_cst[j], uexpr_cst[j]
                            for idx in range(args.num_params):
                                low += lexpr_weights[j * args.num_params +
                                                     idx] * attack_params[idx]
                                up += uexpr_weights[j * args.num_params +
                                                    idx] * attack_params[idx]
                            if low > attack_lb[j] + EPS or attack_ub[
                                    j] > up + EPS:
                                ok_attack = False
                        if ok_attack:
                            checked[attack_idx] = True
                            # print('checked ', attack_idx)
                    if args.debug:
                        print('Running the analysis...')

                    t_begin = time.time()
                    perturbed_label_poly, _, _, _ = eran.analyze_box(
                        spec_lb, spec_ub, 'deeppoly', args.timeout_lp,
                        args.timeout_milp, args.use_area_heuristic, 0,
                        lexpr_weights, lexpr_cst, lexpr_dim, uexpr_weights,
                        uexpr_cst, uexpr_dim, expr_size)
                    perturbed_label_box, _, _, _ = eran.analyze_box(
                        spec_lb[:dim], spec_ub[:dim], 'deeppoly',
                        args.timeout_lp, args.timeout_milp,
                        args.use_area_heuristic, 0)
                    t_end = time.time()
                    #LRJ add normalized data below
                    #print(spec_lb[:dim])
                    #if len(spec_lb_all):
                    #    spec_lb_all=np.minimum(spec_lb_all,spec_lb[:dim])
                    #else:
                    #    spec_lb_all=spec_lb[:dim]

                    #if len(spec_ub_all):
                    #    spec_ub_all=np.maximum(spec_ub_all,spec_ub[:dim])
                    #else:
                    #    spec_ub_all=spec_ub[:dim]

                    print('DeepG: ', perturbed_label_poly, '\tInterval: ',
                          perturbed_label_box, '\tlabel: ', label,
                          '[Time: %.4f]' % (t_end - t_begin))

                    tot_chunks += 1
                    if perturbed_label_box != label:
                        ok_box = False
                    else:
                        ver_chunks_box += 1

                    if perturbed_label_poly != label:
                        ok_poly = False
                    else:
                        ver_chunks_poly += 1

                    lexpr_cst, uexpr_cst = [], []
                    lexpr_weights, uexpr_weights = [], []
                    lexpr_dim, uexpr_dim = [], []
        #LRJ add
        #print(len(spec_lb_all))
        #print(len(spec_ub_all))
        in_file = os.path.join(deepsymbol_input_folder,
                               'in_{}.in'.format(current_test))
        fp = open(in_file, "w")
        for index in range(len(spec_lb_all)):
            fp.write(
                str(spec_lb_all[index]) + ' ' + str(spec_ub_all[index]) + '\n')
        fp.close()

        total += 1
        if ok_box:
            correct_box += 1
        if ok_poly:
            correct_poly += 1
        if cex_found:
            assert (not ok_box) and (not ok_poly)
            attacked += 1
        cver_poly.append(ver_chunks_poly / float(tot_chunks))
        cver_box.append(ver_chunks_box / float(tot_chunks))
        tot_time += time.time() - begtime

        print('Verified[box]: {}, Verified[poly]: {}, CEX found: {}'.format(
            ok_box, ok_poly, cex_found))
        assert not cex_found or not ok_box, 'ERROR! Found counter-example, but image was verified with box!'
        assert not cex_found or not ok_poly, 'ERROR! Found counter-example, but image was verified with poly!'

        print('Attacks found: %.2f percent, %d/%d' %
              (100.0 * attacked / total, attacked, total))
        print('[Box]  Provably robust: %.2f percent, %d/%d' %
              (100.0 * correct_box / total, correct_box, total))
        print('[Poly] Provably robust: %.2f percent, %d/%d' %
              (100.0 * correct_poly / total, correct_poly, total))
        print('Empirically robust: %.2f percent, %d/%d' %
              (100.0 * (total - attacked) / total, total - attacked, total))
        print('[Box]  Average chunks verified: %.2f percent' %
              (100.0 * np.mean(cver_box)))
        print('[Poly]  Average chunks verified: %.2f percent' %
              (100.0 * np.mean(cver_poly)))
        print('Average time: ', tot_time / total)
        print(
            'Verified:%d Box Robust:%d Poly Robust:%d Box Robust Percentage:%.2f Poly Robust Percentage:%.2f'
            % (total, correct_box, correct_poly, 100.0 * correct_box / total,
               100.0 * correct_poly / total))
        tests = get_tests(dataset, config.geometric)
    else:
        tests = open(config.input_box, 'r').read()

# Merge ffdnet with deeppoly
#test_input = np.asarray([1,1], dtype=np.float64)
specLB = np.asarray([-1, -1, -1, -1], dtype=np.float64)
specUB = np.asarray([1, 1, 1, 1], dtype=np.float64)
#specLB = np.copy(test_input)
#specUB = np.copy(test_input)
one_dim_LB = specLB.flatten()
one_dim_UB = specUB.flatten()
print("enter eran function")
start = time.time()
eran_result = eran.analyze_box(one_dim_LB, one_dim_UB, init_domain(domain),
                               config.timeout_lp, config.timeout_milp,
                               config.use_default_heuristic, args.two_lbs,
                               args.random_prune)
print("leave eran function")
end = time.time()
nub = np.array(eran_result[3])
nlb = np.array(eran_result[2])
print(nlb[-1])
print(nub[-1])
print(end - start, "seconds")
fullpath = "exeu_time.csv"
with open(fullpath, 'a+', newline='') as write_obj:
    csv_writer = csv.writer(write_obj)
    #csv_writer.writerow(list(nlb[-1]))
    #csv_writer.writerow(list(nub[-1]))
    net_name_list = netname.split("/")
    row = [
示例#9
0
            specLB = np.copy(image)
            specUB = np.copy(image)
            test_input = np.copy(image)

            if config.mean is not None:
                normalize(specLB)
                normalize(specUB)

            print(', '.join([dataset, network, domain]), 'testing now')

            try:
                label, nn, nlb, nub, output_info = eran.analyze_box(
                    specLB,
                    specUB,
                    domain,
                    1,
                    1,
                    config.use_area_heuristic,
                    testing=True)

            except Exception as e:
                tested_file.write(', '.join([
                    dataset, network, domain, 'ERAN analyze error trace: ' +
                    traceback.format_exc()
                ]) + '\n\n\n')
                tested_file.flush()
                continue

            if dataset == 'cifar10':
                input = np.array(test_input,
                                 dtype=np.float32).reshape([1, 32, 32, 3])
示例#10
0
                else:
                    image= (np.float64(test[1:len(test)])/np.float64(255)) - 0.5

            specLB = np.copy(image)
            specUB = np.copy(image)
            test_input = np.copy(image)

            if is_trained_with_pytorch or is_onnx:
                normalize(specLB, means, stds)
                normalize(specUB, means, stds)
                normalize(test_input, means, stds)

            print(', '.join([dataset, network, domain]), 'testing now')

            try:
                label, nn, nlb, nub, output_info = eran.analyze_box(specLB, specUB, domain, 1, 1, True, testing=True)

            except Exception as e:
                tested_file.write(', '.join([dataset, network, domain, 'ERAN analyze error message: ' + str(e)]) + '\n')
                tested_file.flush()
                continue

            if dataset == 'cifar10':
                input = np.array(test_input, dtype=np.float32).reshape([1, 32, 32, 3])
            elif dataset == 'mnist':
                input = np.array(test_input, dtype=np.float32).reshape([1, 28, 28, 1])

            if is_onnx:
                input = input.transpose(0, 3, 1, 2)

            if is_onnx:
示例#11
0
      image= np.float64(test[1:len(test)])/np.float64(255)
  else:
      if(is_trained_with_pytorch):          
          image= (np.float64(test[1:len(test)])/np.float64(255))
      else:
          image= (np.float64(test[1:len(test)])/np.float64(255)) - 0.5
 
  
  specLB = np.copy(image)
  specUB = np.copy(image)
 
  if(is_trained_with_pytorch):
      normalize(specLB, means, stds)
      normalize(specUB, means, stds)
  
  label = eran.analyze_box(specLB, specUB, domain)
  if(label == int(test[0])):
      if(dataset=='mnist'):   
          specLB = np.clip(image - epsilon,0,1)
          specUB = np.clip(image + epsilon,0,1)
      else:
          if(is_trained_with_pytorch):
              specLB = np.clip(image - epsilon,0,1)
              specUB = np.clip(image + epsilon,0,1)
          else:
              specLB = np.clip(image-epsilon,-0.5,0.5)
              specUB = np.clip(image+epsilon,-0.5,0.5)
      if(is_trained_with_pytorch):
          normalize(specLB, means, stds)
          normalize(specUB, means, stds)
      start = time.time()
示例#12
0
class EranVerifier:
    """Wrapper object for the ERAN verifier to have a clean interface with proper types."""
    __TIMEOUT_LP = 1_000_000
    __TIMEOUT_MILP = 1_000_000

    def __init__(self, model):
        self.eran = ERAN(model=model, is_onnx=True)

    def analyze_classification_box(self, bounds: Interval):
        (dominant_class, _, nlb, nub,
         _) = self.eran.analyze_box(specLB=bounds.lower_bound,
                                    specUB=bounds.upper_bound,
                                    domain='deeppoly',
                                    timeout_lp=EranVerifier.__TIMEOUT_LP,
                                    timeout_milp=EranVerifier.__TIMEOUT_MILP,
                                    use_default_heuristic=True,
                                    testing=True)
        return dominant_class, nlb, nub

    def analyze_classification_linear(self, bounds: Interval,
                                      constraints: LinearBounds,
                                      params: List[Interval]):
        res = self.__eran_resources(bounds, constraints, params)

        (dominant_class, _, nlb, nub,
         _) = self.eran.analyze_box(specLB=res.lower_bound,
                                    specUB=res.upper_bound,
                                    domain='deeppoly',
                                    timeout_lp=EranVerifier.__TIMEOUT_LP,
                                    timeout_milp=EranVerifier.__TIMEOUT_MILP,
                                    lexpr_weights=res.lexpr_weights,
                                    lexpr_cst=res.lexpr_cst,
                                    lexpr_dim=res.lexpr_dim,
                                    uexpr_weights=res.uexpr_weights,
                                    uexpr_cst=res.uexpr_cst,
                                    uexpr_dim=res.uexpr_dim,
                                    expr_size=res.expr_size,
                                    use_default_heuristic=True,
                                    testing=True)
        return dominant_class, nlb, nub

    def analyze_segmentation_box(self, bounds: Interval,
                                 correct_labels: np.ndarray,
                                 valid_labels: np.ndarray,
                                 num_total_classes: int):
        (dominant_classes, nlb,
         nub) = EranVerifier.__analyze_segmentation_eran(
             eran=self.eran,
             true_labels=correct_labels,
             valid_classes=valid_labels,
             num_total_classes=num_total_classes,
             specLB=bounds.lower_bound,
             specUB=bounds.upper_bound,
             domain='deeppoly',
             timeout_lp=EranVerifier.__TIMEOUT_LP,
             timeout_milp=EranVerifier.__TIMEOUT_MILP,
             use_default_heuristic=True,
             testing=True)
        return dominant_classes, nlb, nub

    def analyze_segmentation_linear(self, bounds: Interval,
                                    constraints: LinearBounds,
                                    params: List[Interval],
                                    correct_labels: np.ndarray,
                                    valid_labels: np.ndarray,
                                    num_total_classes: int):
        res = self.__eran_resources(bounds, constraints, params)

        (dominant_classes, nlb,
         nub) = EranVerifier.__analyze_segmentation_eran(
             eran=self.eran,
             true_labels=correct_labels,
             valid_classes=valid_labels,
             num_total_classes=num_total_classes,
             specLB=res.lower_bound,
             specUB=res.upper_bound,
             domain='deeppoly',
             timeout_lp=EranVerifier.__TIMEOUT_LP,
             timeout_milp=EranVerifier.__TIMEOUT_MILP,
             lexpr_weights=res.lexpr_weights,
             lexpr_cst=res.lexpr_cst,
             lexpr_dim=res.lexpr_dim,
             uexpr_weights=res.uexpr_weights,
             uexpr_cst=res.uexpr_cst,
             uexpr_dim=res.uexpr_dim,
             expr_size=res.expr_size,
             use_default_heuristic=True,
             testing=True)
        return dominant_classes, nlb, nub

    # adapted from ERAN/tf_verify/ERAN to support segmentation
    @staticmethod
    def __analyze_segmentation_eran(eran: ERAN,
                                    true_labels,
                                    valid_classes,
                                    num_total_classes,
                                    specLB,
                                    specUB,
                                    domain,
                                    timeout_lp,
                                    timeout_milp,
                                    use_default_heuristic,
                                    lexpr_weights=None,
                                    lexpr_cst=None,
                                    lexpr_dim=None,
                                    uexpr_weights=None,
                                    uexpr_cst=None,
                                    uexpr_dim=None,
                                    expr_size=0,
                                    testing=False,
                                    prop=-1,
                                    spatial_constraints=None):
        assert domain == "deeppoly", "domain isn't valid, must be 'deeppoly'"
        specLB = np.reshape(specLB, (-1, ))
        specUB = np.reshape(specUB, (-1, ))
        eran.nn = layers()
        eran.nn.specLB = specLB
        eran.nn.specUB = specUB

        execute_list, output_info = eran.optimizer.get_deeppoly(
            eran.nn, specLB, specUB, lexpr_weights, lexpr_cst, lexpr_dim,
            uexpr_weights, uexpr_cst, uexpr_dim, expr_size,
            spatial_constraints)
        analyzer = Analyzer(execute_list, eran.nn, domain, timeout_lp,
                            timeout_milp, None, use_default_heuristic, -1,
                            prop, testing)
        dominant_classes, nlb, nub = EranVerifier.__analyze_segmentation_analyzer(
            analyzer, true_labels, valid_classes, num_total_classes)
        return dominant_classes, nlb, nub

    # adapted from ERAN/tf_verify/Analyzer to support segmentation
    @staticmethod
    def __analyze_segmentation_analyzer(analyzer: Analyzer, true_labels,
                                        valid_classes, num_classes: int):

        element, nlb, nub = analyzer.get_abstract0()

        number_points = len(true_labels)
        dominant_classes = []

        for n in range(number_points):
            dominant_class = -1
            label_failed = []
            candidate_labels = valid_classes
            true_label = true_labels[n]

            certified = True
            for candidate_label in candidate_labels:
                point_offset = n * num_classes
                if candidate_label != true_label and not analyzer.is_greater(
                        analyzer.man, element, point_offset + true_label,
                        point_offset + candidate_label,
                        analyzer.use_default_heuristic):
                    certified = False
                    label_failed.append(candidate_label)
                    break
            if certified:
                dominant_class = true_label
            dominant_classes.append(dominant_class)

        elina_abstract0_free(analyzer.man, element)
        return dominant_classes, nlb, nub

    @staticmethod
    def __eran_resources(bounds: Interval, constraints: LinearBounds,
                         params: List[Interval]) -> EranResources:
        res = EranResources()

        res.lower_bound = np.append(bounds.lower_bound,
                                    [p.lower_bound for p in params])
        res.upper_bound = np.append(bounds.upper_bound,
                                    [p.upper_bound for p in params])

        expr_size = len(params)
        res.expr_size = expr_size
        num_indices = len(res.lower_bound.flatten())
        param_indices = np.arange(num_indices - expr_size, num_indices)

        res.lexpr_weights = np.append(constraints.lower_slope.flatten(),
                                      np.zeros(expr_size * expr_size))
        res.uexpr_weights = np.append(constraints.upper_slope.flatten(),
                                      np.zeros(expr_size * expr_size))

        res.lexpr_cst = np.append(constraints.lower_offset.flatten(),
                                  [p.lower_bound for p in params])
        res.uexpr_cst = np.append(constraints.upper_offset.flatten(),
                                  [p.upper_bound for p in params])

        res.lexpr_dim = np.repeat([param_indices], num_indices,
                                  axis=0).flatten()
        res.uexpr_dim = np.repeat([param_indices], num_indices,
                                  axis=0).flatten()

        assert len(res.lower_bound) == num_indices
        assert len(res.upper_bound) == num_indices
        assert len(res.lexpr_weights) == num_indices * expr_size
        assert len(res.uexpr_weights) == num_indices * expr_size
        assert len(res.lexpr_cst) == num_indices
        assert len(res.uexpr_cst) == num_indices
        assert len(res.lexpr_dim) == num_indices * expr_size
        assert len(res.uexpr_dim) == num_indices * expr_size
        return res
示例#13
0
    if (specnumber == 9):
        num_splits = [10, 9, 1, 5, 14]
    else:
        num_splits = [10, 10, 1, 10, 10]
    step_size = []
    for i in range(5):
        step_size.append((specUB[i] - specLB[i]) / num_splits[i])
    #sorted_indices = np.argsort(widths)
    #input_to_split = sorted_indices[0]
    #print("input to split ", input_to_split)

    #step_size = widths/num_splits
    start_val = np.copy(specLB)
    end_val = np.copy(specUB)
    flag = True
    _, nn, _, _ = eran.analyze_box(specLB, specUB, 'deepzono', args.timeout_lp,
                                   args.timeout_milp, specnumber)
    start = time.time()
    for i in range(num_splits[0]):
        specLB[0] = start_val[0] + i * step_size[0]
        specUB[0] = np.fmin(end_val[0], start_val[0] + (i + 1) * step_size[0])

        for j in range(num_splits[1]):
            specLB[1] = start_val[1] + j * step_size[1]
            specUB[1] = np.fmin(end_val[1],
                                start_val[1] + (j + 1) * step_size[1])

            for k in range(num_splits[2]):
                specLB[2] = start_val[2] + k * step_size[2]
                specUB[2] = np.fmin(end_val[2],
                                    start_val[2] + (k + 1) * step_size[2])
                for l in range(num_splits[3]):
示例#14
0
def verify_acasxu_style(
    network_file: str,
    means: np.ndarray,
    stds: np.ndarray,
    input_box: List[Tuple[np.ndarray, np.ndarray]],
    output_constraints: List[List[Tuple[int, int, float]]],
    timeout_lp=1,
    timeout_milp=1,
    max_depth=10,
    permitted_depth_extensions=3,
    use_default_heuristic=True,
    complete=True,
    progress_bar=True
) -> Optional[List[Tuple[np.ndarray, Tuple[int, int, float],
                         Optional[float]]]]:
    """
    Verifies an fully-connected (non-convolutional) neural network.

    Returns None if an no counterexamples could be found and the network could not be verified.
    Returns an empty list if the network could be verified.
    Returns a list of inputs for which the network violates the constraints if the network could not be verified.
    """
    domain = "deeppoly"

    model, is_conv = read_onnx_net(network_file)
    eran = ERAN(model, is_onnx=True)

    # total will be updated later
    progress_bar = tqdm(total=1, disable=not progress_bar)

    specLB = [interval[0] for interval in input_box]
    specUB = [interval[1] for interval in input_box]
    _normalize(specLB, means, stds)
    _normalize(specUB, means, stds)

    counterexample_list = []
    # adex_holds stores whether x_adex (below) is actually a counterexample
    # if adex_holds is True, then x_adex is a spurious counterexample
    adex_holds = True

    verified_flag, nn, nlb, nub, _, x_adex = eran.analyze_box(
        specLB, specUB, _init_domain(domain), timeout_lp, timeout_milp,
        use_default_heuristic, output_constraints)

    if not verified_flag and x_adex is not None:
        adex_holds, _, _, _, _, _ = eran.analyze_box(x_adex, x_adex,
                                                     "deeppoly", timeout_lp,
                                                     timeout_milp,
                                                     use_default_heuristic,
                                                     output_constraints)
        if not adex_holds:
            verified_flag = False
            # we need to undo the input normalisation, that was applied to the counterexamples
            counterexample_list.append(
                (np.array(x_adex) * stds + means, output_constraints))

    if not verified_flag and adex_holds:
        # expensive min/max gradient calculation
        nn.set_last_weights(output_constraints)
        grads_lower, grads_upper = nn.back_propagate_gradient(nlb, nub)
        assert len(grads_lower) == len(specLB), 'back_propagate_gradient did not yield gradients for inputs. ' \
                                                '(verify_acasxu_style only supports non-convolutional networks).'

        smears = [
            max(-grad_l, grad_u) * (u - l) for grad_l, grad_u, l, u in zip(
                grads_lower, grads_upper, specLB, specUB)
        ]
        smears = [1e-8 if smear == 0 else smear for smear in smears]
        split_multiple = 20 / np.sum(smears)

        num_splits = [int(np.ceil(smear * split_multiple)) for smear in smears]
        step_size = []
        for i in range(len(specLB)):  # for each input dimension
            if num_splits[i] == 0:
                num_splits[i] = 1
            step_size.append((specUB[i] - specLB[i]) / num_splits[i])

        start_val = np.copy(specLB)
        end_val = np.copy(specUB)
        # _, nn, _, _, _, _ = eran.analyze_box(
        #     specLB, specUB, domain,
        #     timeout_lp, timeout_milp, use_default_heuristic, output_constraints
        # )

        # generate all combinations of splits of the input dimensions
        multi_bounds = [(specLB.copy(), specUB.copy())]
        for d in range(len(specLB)):  # for each input dimension
            # for each split from the previous dimensions
            new_multi_bounds = []
            for specLB_, specUB_ in multi_bounds:
                for i in range(num_splits[d]):
                    specLB_ = specLB_.copy()
                    specUB_ = specUB_.copy()
                    specLB_[d] = start_val[d] + i * step_size[d]
                    specUB_[d] = np.fmin(end_val[d],
                                         start_val[d] + (i + 1) * step_size[d])
                    new_multi_bounds.append((specLB_, specUB_))
            multi_bounds = new_multi_bounds

        # print(f"len(multi_bounds)={len(multi_bounds)}\n"
        #       f"means={means}\n"
        #       f"stds={stds}\n"
        #       f"grads_lower={grads_lower}\n"
        #       f"grads_upper={grads_upper}\n"
        #       f"smeans={smears}\n"
        #       f"num_splits={num_splits}\n"
        #       f"step_size={step_size}\n"
        #       f"start_val={start_val}\n"
        #       f"end_val={end_val}")

        progress_bar.reset(total=len(multi_bounds) + 1)
        progress_bar.update()  # for the first analyze_box run

        failed_already = Value('i', 1)
        pool = None
        try:
            # sequential version
            # res = itertools.starmap(
            #    lambda lb, ub: _acasxu_recursive(lb, ub, model, eran, output_constraints, failed_already,
            #                                     25, 0, domain, timeout_lp, timeout_milp, use_default_heuristic,
            #                                     complete),
            #    multi_bounds
            # )
            arguments = [{
                'specLB': lb,
                'specUB': ub,
                'network_file': network_file,
                'constraints': output_constraints,
                'max_depth': max_depth,
                'permitted_depth_extensions': permitted_depth_extensions,
                'domain': domain,
                'timeout_lp': timeout_lp,
                'timeout_milp': timeout_milp,
                'use_default_heuristic': use_default_heuristic,
                'complete': complete
            } for lb, ub in multi_bounds]
            # using only half of the CPUs sped up the computation on the computers it was tested on
            # and also kept CPU utilisation high for all CPUs.
            pool = Pool(processes=os.cpu_count() // 2,
                        initializer=_init,
                        initargs=(failed_already, ))
            res = pool.imap_unordered(_start_acasxu_recursive, arguments)

            counterexample_list = []
            verified_flag = True
            for verified, counterexamples in res:
                if not verified:
                    verified_flag = False
                    if counterexamples is not None:
                        # convert counterexamples to numpy
                        counterexamples = [
                            (np.array(cx), constraint, value)
                            for cx, constraint, value in counterexamples
                        ]
                        # we need to undo the input normalisation, that was applied to the counterexamples
                        counterexamples = [
                            (cx * stds + means, constraint, value)
                            for cx, constraint, value in counterexamples
                        ]
                        counterexample_list.extend(counterexamples)
                    else:
                        warning(
                            f"Property not verified for without counterexample."
                        )
                progress_bar.update()

        except Exception as ex:
            warning(f"Property not verified because of an exception: {ex}.")
            raise ex
        finally:
            progress_bar.close()
            if pool is not None:
                # make sure the Pool is properly closed
                pool.terminate()
                pool.join()
    else:
        # property has been verified in first analyze_box run.
        progress_bar.update()
        progress_bar.close()

    if not verified_flag and len(counterexample_list) > 0:
        info(
            f"Property not verified with counterexamples: {counterexample_list}."
        )
        return counterexample_list
    elif not verified_flag:
        info(f"Property not verified without counterexamples.")
        # raise RuntimeError("Property disproven, but no counterexample found.")
        return None
    else:
        info(f"Property verified.")
        return []
    if i % 100 == 0:
        print( prec , '/', i )
exit()
'''

from clever_wolf import *
import sys
sys.path.insert(0, '../ELINA/python_interface/')
from eran import ERAN
from config import config
config.dyn_krelu = False
config.numproc_krelu = 1
eran = ERAN(cut_model.tf_output, is_onnx=False)
imgLB = np.copy(img)
imgUB = np.copy(img)
label, nn, nlb, nub = eran.analyze_box(imgLB, imgUB, 'deepzono', 1, 1, True)
assert label == cut_model.y_true

cut_model.orig_image = image.copy()

if dataset == 'acasxu' or dataset == 'mortgage':
    corr_label = cut_model.y_true

# Create specLB/UB
if dataset == 'mnist':
    specLB = np.clip(image - epsilon, 0, 1)
    specUB = np.clip(image + epsilon, 0, 1)
elif dataset == 'mortgage':
    specLB = image.copy()
    specUB = image.copy()
    specLB[bounds_keys] = bounds_lb
示例#16
0
        num_splits = [10, 9, 1, 5, 14]
    else:
        num_splits = [10, 10, 1, 10, 10]
    step_size = []
    for i in range(5):
        step_size.append((specUB[i] - specLB[i]) / num_splits[i])
    #sorted_indices = np.argsort(widths)
    #input_to_split = sorted_indices[0]
    #print("input to split ", input_to_split)

    #step_size = widths/num_splits
    start_val = np.copy(specLB)
    end_val = np.copy(specUB)
    flag = True
    _, nn, _, _ = eran.analyze_box(specLB, specUB, init_domain(domain),
                                   args.timeout_lp, args.timeout_milp,
                                   args.use_area_heuristic, specnumber)
    start = time.time()
    for i in range(num_splits[0]):
        specLB[0] = start_val[0] + i * step_size[0]
        specUB[0] = np.fmin(end_val[0], start_val[0] + (i + 1) * step_size[0])

        for j in range(num_splits[1]):
            specLB[1] = start_val[1] + j * step_size[1]
            specUB[1] = np.fmin(end_val[1],
                                start_val[1] + (j + 1) * step_size[1])

            for k in range(num_splits[2]):
                specLB[2] = start_val[2] + k * step_size[2]
                specUB[2] = np.fmin(end_val[2],
                                    start_val[2] + (k + 1) * step_size[2])
def clever_wolf(nn, cut_model, y_true, y_tar, specLB, specUB, domain, args,
                pgd_args1, pgd_args2):
    try:
        delattr(cut_model, 'shrink_box_best')
    except:
        pass
    clever_start_time = time.time()
    if cut_model.y_tar == None:
        reset_pool((specLB, specUB))
        update_target_pool((y_true, y_tar))
        data, _, lb, ub = generate_initial_region_PGD(250, *pgd_args1)
        cut_model.update_target(y_true, y_tar)
        cut_model.reset_model(specLB, specUB)
        succ_attacks = data.shape[0]
        all_attacks = 250
        if not args.nowolf:
            samples = cut_model.sample_poly_under(250)
            pos_ex = sample_wolf_attacks(samples, 'pos_brent')
            succ_attacks += pos_ex.shape[0]
            all_attacks += 250

        data2, _, lb2, ub2 = generate_initial_region_PGD(250, *pgd_args2)
        succ_attacks += data2.shape[0]
        all_attacks += 250
        print('Target', y_tar, succ_attacks, '/', all_attacks)
        if succ_attacks > 0:
            reset_pool((specLB, specUB))
            update_target_pool((y_true, y_tar))
            cut_model.update_target(y_true, y_tar)
            cut_model.reset_model(specLB, specUB)

            data, bounds, lb, ub = generate_initial_region_PGD(
                2500, *pgd_args1)
            data2, bounds2, lb2, ub2 = generate_initial_region_PGD(
                2500, *pgd_args2)
            if lb is None and lb2 is None:
                data = np.zeros((0, cut_model.input_size))
                bounds = np.zeros(0)
            elif lb is None:
                data = data2
                bounds = bounds2
            elif not lb2 is None:
                data = np.concatenate((data, data2))
                bounds = np.concatenate((bounds, bounds2))

            if not args.nowolf:
                samples = cut_model.sample_poly_under(5000)
                pos_ex = sample_wolf_attacks(samples, 'pos_brent')
                if not pos_ex.shape[0] == 0:
                    data = np.concatenate((data, pos_ex))

            if data.shape[0] == 0:
                return False

            lb = np.min(data, axis=0)
            ub = np.max(data, axis=0)

            cut_model.update_bounds(lb, ub)
            cut_model.set_data(data)
            cut_model.bounds = bounds
            update_pool((lb, ub))
        else:
            return False
        s = time.time()
        config.dyn_krelu = False
        config.use_3relu = False
        config.use_2relu = False
        config.numproc_krelu = 24

        norm_ser = dill.dumps(normalize)
        denorm_ser = dill.dumps(denormalize)
        cut_model.norm_func = norm_ser
        cut_model.denorm_func = denorm_ser
        cut_model.dataset = dataset
        cut_model.is_conv = is_conv
        cut_model.norm_args = (means, stds)
        lb, ub = cut_model.overapprox_box()
        cut_model.orig_over_lb = lb
        cut_model.orig_over_ub = ub

        eran = ERAN(cut_model.tf_output, is_onnx=False)
        label, nn, nlb, nub = eran.analyze_box(lb, ub, 'deeppoly', 1, 1, True)
        print('Label:', label, 'Time:', time.time() - s, 's')
        if label == -1:
            cut_model.nlb = [np.array(lb) for lb in nlb]
            cut_model.nub = [np.array(ub) for ub in nub]
            lb, ub = cut_model.overapprox_box()
            cut_model.nlb.insert(0, lb)
            cut_model.nub.insert(0, ub)
        else:
            cut_model.shrink_box_best = cut_model.overapprox_box()
            cut_model.save(model_name)
            print('Verified, time:', int(time.time() - clever_start_time))
            print_vol(cut_model)
            return True
    print('Init model')
    if args.obox_approx:
        cut_model.approx_obox = True
    process = psutil.Process(os.getpid())
    start_lp_sampling = args.nowolf
    method = None
    res = None
    lp_params = (nn, domain, y_tar)
    wolf_params = [1000]

    shrink = cut_model.copy()
    if 'shrink_box_best' in dir(shrink):
        del shrink.shrink_box_best

    print_vol(cut_model)
    sys.stdout.flush()
    lbs, ubs, lb_max, ub_max = shrink.create_underapprox_box_lp(
        y_tar, shrink_to=args.obox_init, baseline=args.baseline)
    if args.baseline:
        print('\nBaseline, Time:', int(time.time() - clever_start_time),
              'sec,', 'Target:', y_tar, '\n')
    if not lb_max is None:
        data = cut_model.data
        cut_model.obox = None
        cut_model.ubox = None
        cut_model.update_target(y_true, y_tar)
        cut_model.reset_model(lb_max, ub_max)
        cut_model.update_bounds(lb_max, ub_max)
        cut_model.nlb = [np.array(lb) for lb in nlb]
        cut_model.nub = [np.array(ub) for ub in nub]
        cut_model.nlb.insert(0, lb_max)
        cut_model.nub.insert(0, ub_max)
        cut_model.data = data
    if not lbs is None:
        cut_model.shrink_box_best = (np.array(lbs), np.array(ubs))
    else:
        print('Failed to converge')
        return False

    print_vol(cut_model)
    if args.baseline:
        cut_model.save(model_name, baseline=True)
        return True
    cut_model.p_c = 0.9
    for cut in range(50):

        cut_model.it_cut = cut
        cut_model.p_c *= 0.95
        if cut >= args.max_cuts:
            cut_model.p_c = 0
        sys.stdout.flush()
        '''
        if dataset == 'mortgage':
            project = np.array( [ False ] * cut_model.input_size )
            project[ bounds_keys ] = True
            W, lb, ub = cut_model.denorm_W( project, means, stds )
            lb_sh, ub_sh = cut_model.shrink_box_best[0].copy(), cut_model.shrink_box_best[1].copy()
            denormalize( lb_sh, dataset, means, stds, False )
            denormalize( ub_sh, dataset, means, stds, False )
            lb_sh = lb_sh[bounds_keys] 
            ub_sh = ub_sh[bounds_keys] 
            center = None
            pos_ex = None
            neg_ex = None
            v2 = draw2d_region( W, lb, ub, lb_sh, ub_sh, model_name + '_reg' + str(cut) + '.png', ( bounds_lb, bounds_ub ), center=center, pos_ex=pos_ex, neg_ex=neg_ex, draw_hp=W.shape[0] - 1 )
            print( 'Volume:', v2 )
        '''

        gc.collect()
        print('\nCut:', cut, ', Time:', int(time.time() - clever_start_time),
              'sec,', 'Target:', y_tar, ',Memory:',
              process.memory_info().rss / (1024 * 1024), '\n')

        verified, _ = lp_cut(cut_model, *lp_params)

        if verified:
            if dataset == 'mortgage':
                project = np.array([False] * cut_model.input_size)
                project[bounds_keys] = True
                W, lb, ub = cut_model.denorm_W(project, means, stds)

                lb_sh, ub_sh = cut_model.shrink_box_best[0].copy(
                ), cut_model.shrink_box_best[1].copy()
                denormalize(lb_sh, dataset, means, stds, False)
                denormalize(ub_sh, dataset, means, stds, False)
                lb_sh = lb_sh[bounds_keys]
                ub_sh = ub_sh[bounds_keys]

                center = None
                pos_ex = None
                neg_ex = None
                v2 = draw2d_region(W,
                                   lb,
                                   ub,
                                   lb_sh,
                                   ub_sh,
                                   model_name + '_reg' + str(cut + 1) + '.png',
                                   (bounds_lb, bounds_ub),
                                   center=center,
                                   pos_ex=pos_ex,
                                   neg_ex=neg_ex,
                                   draw_hp=W.shape[0] - 1)
                print('Volume:', v2)
            cut_model.save(model_name)
            print('Verified, time:', int(time.time() - clever_start_time))
            print_vol(cut_model)
            return True
    print('Failed to converge')
    return False
示例#18
0
def _acasxu_recursive(specLB, specUB, model, eran: ERAN, constraints, failed_already, max_depth=10, depth=0,
                      permitted_depth_extensions=3, depth_extension=0,
                      domain="deeppoly", timeout_lp=1, timeout_milp=1,
                      use_default_heuristic=True, complete=True) \
        -> Tuple[bool, Optional[Sequence[Tuple[np.ndarray, Tuple[int, int, float]]]]]:
    # Returns whether the constraint is violated, and potentially counterexamples as witness of violation.
    # The counterexamples are returned together with the atomic comparison constraint they violate.
    hold, nn, nlb, nub, _, _ = \
        eran.analyze_box(specLB, specUB, domain, timeout_lp, timeout_milp, use_default_heuristic, constraints)
    if hold:
        return True, []
    elif depth >= max_depth:
        if failed_already.value and complete:  # failed_already.value means "not falsified already"
            verified_flag, adv_examples, adv_values, adv_violated_constraints = \
                verify_network_with_milp(nn, specLB, specUB, nlb, nub, constraints)
            xs = []
            if verified_flag:
                return True, []
            else:
                if adv_examples is not None:
                    for adv_image, violated_constraint, adv_value in \
                            zip(adv_examples, adv_violated_constraints, adv_values):
                        # counterexamples found with milp may be improper counterexamples
                        # for constraints with multiple or-clauses, for example the or clauses
                        # are treated individually and returned counterexamples violate individual
                        # or-clauses, but they do not necessarily violate the whole or-term.
                        hold, _, nlb, nub, _, x = eran.analyze_box(
                            adv_image, adv_image, domain, timeout_lp,
                            timeout_milp, use_default_heuristic, constraints)
                        # print(f'hold={hold}, adv_image={adv_image}')
                        if not hold:
                            info(
                                f"property violated at {adv_image} with violation {adv_value} output_score {nlb[-1]}"
                            )
                            failed_already.value = 0
                            # break  # do not break, try to find many counterexamples
                        else:  # spurious counterexample
                            info(
                                f"spurious counterexample found: {adv_image}  with violation {adv_value} "
                                f"output_score {nlb[-1]}")
                            # for spurious counterexamples we continue search, maybe we will still find a proper cx
                        xs.append((adv_image, violated_constraint, adv_value))
            if len(xs) > 0:  # counterexample found
                return False, xs
            # if verified_flag was false, but no concrete counterexamples were found
            # continue with bounds splitting (perform depth_extension) unless the number
            # of permitted depth extensions was also reached
            if depth_extension >= permitted_depth_extensions:
                return False, None
            # debug(f"Extending Recursion Depth ({depth_extension+1}. time)")
            depth_extension += 1
            depth = 0
        else:
            return False, None

    # grads = _estimate_grads(specLB, specUB, model, input_shape=eran.input_shape)
    # grads + small epsilon so if gradient estimation becomes 0 it will divide the biggest interval.
    # smears = np.multiply(grads + 0.00001, [u-l for u, l in zip(specUB, specLB)])

    nn.set_last_weights(constraints)
    grads_lower, grads_upper = nn.back_propagate_gradient(nlb, nub)
    smears = [
        max(-grad_l, grad_u) * (u - l) for grad_l, grad_u, l, u in zip(
            grads_lower, grads_upper, specLB, specUB)
    ]

    index = np.argmax(smears)
    m = (specLB[index] + specUB[index]) / 2

    result = failed_already.value
    xs = []
    if result:
        result1, xs1 = _acasxu_recursive(
            specLB, [ub if i != index else m for i, ub in enumerate(specUB)],
            model, eran, constraints, failed_already, max_depth, depth + 1,
            permitted_depth_extensions, depth_extension, domain, timeout_lp,
            timeout_milp, use_default_heuristic, complete)
        result = result and result1
        if xs1 is not None:
            xs.extend(xs1)
    if result:
        result2, xs2 = _acasxu_recursive(
            [lb if i != index else m for i, lb in enumerate(specLB)], specUB,
            model, eran, constraints, failed_already, max_depth, depth + 1,
            permitted_depth_extensions, depth_extension, domain, timeout_lp,
            timeout_milp, use_default_heuristic, complete)
        result = result and result2
        if xs2 is not None:
            xs.extend(xs2)
    return result, xs