Beispiel #1
0
def test_net_impl_2():
    """ Validate that my AcasNet implementation is correct given real interval inputs. """
    dom = IntervalDom()

    net = AcasNet(dom, 2, 1, [2]).to(device)
    inputs = torch.tensor([[[-2, -1], [-1, 1]], [[-0.5, 0.5], [1.5, 3]]],
                          device=device)
    inputs_lb = inputs[:, :, 0]
    inputs_ub = inputs[:, :, 1]

    with torch.no_grad():
        lin0 = net.all_linears[0]
        lin0.weight[0][0] = -0.5
        lin0.weight[0][1] = 0.5
        lin0.bias[0] = -1

        lin0.weight[1][0] = 0.5
        lin0.weight[1][1] = -0.5
        lin0.bias[1] = 1

        lin1 = net.all_linears[1]
        lin1.weight[0][0] = -1
        lin1.weight[0][1] = 1
        lin1.bias[0] = -1

    outs_lb, outs_ub = net(dom.Ele.by_intvl(inputs_lb, inputs_ub)).gamma()
    answer = torch.tensor([[[-1.5, 0]], [[-1.75, -0.5]]], device=device)
    answer_lb = answer[:, :, 0]
    answer_ub = answer[:, :, 1]

    assert torch.equal(outs_lb, answer_lb)
    assert torch.equal(outs_ub, answer_ub)
    return
Beispiel #2
0
def test_acas_input_optimizable():
    dp = DeeppolyDom()
    vi = IntervalDom()

    _tc4(vi)
    _tc4(dp)
    return
Beispiel #3
0
def test_net_impl_1():
    dp = DeeppolyDom()
    vi = IntervalDom()

    _tc1(vi)
    _tc1(dp)
    return
Beispiel #4
0
def test_acas_net_optimizable():
    dp = DeeppolyDom()
    vi = IntervalDom()

    _tc3(vi)
    _tc3(dp)
    return
Beispiel #5
0
    def setup_rest(self, args: argparse.Namespace):
        """ Override this method to set up those not easily specified via command line arguments. """
        # validation
        assert not (args.no_pts and args.no_abs), 'training what?'

        args.dom = {
            'deeppoly': DeeppolyDom(),
            'interval': IntervalDom()
        }[args.dom]

        if args.use_scheduler:
            # having a scheduler does improve the accuracy quite a bit
            args.scheduler_fn = lambda opti: ReduceLROnPlateau(
                opti, factor=0.8, patience=10)
        else:
            args.scheduler_fn = lambda opti: None
        return
Beispiel #6
0
def art(top_k: int = 10, lr: float = 1e-2, batch_size: int = 16):
    """ VErification Guided Abstracted Learning towards one prop: Train to get all split regions safe.
    :param top_k: if too large, later training epochs will incur many unnecessary refinements
    :param lr: 1e-3 needs 35 epochs, 1e-2 needs 11 epochs.
    :return: <spent epochs, spent time in seconds>
    """
    dom = IntervalDom()
    print(f'Using top_k={top_k}, lr={lr}, batch_size={batch_size}.')

    prop = DemoProp.property12(dom)
    print('For 2-layer ReLU network, it should sat props:', prop.name)
    v = Bisecter(dom, prop)
    net = demo_net_inited(dom).to(device)

    in_lbs, in_ubs = prop.lbub(device)
    boxes_lb, boxes_ub = in_lbs, in_ubs

    opti = Adam(net.parameters(), lr=lr)

    epoch = 0
    total_loss = -1
    start = timer()

    orig_dists = go(dom, net, prop, boxes_lb, boxes_ub)
    print('Before everything, the mean/max dist are', orig_dists.mean(),
          orig_dists.max())

    results = []
    with torch.no_grad():
        dists = go(dom, net, prop, boxes_lb, boxes_ub)
        results.append((0, dists.max().item()))

    while total_loss != 0.:
        epoch += 1

        trainset = AbsData(boxes_lb, boxes_ub)
        with torch.no_grad():
            dists = go(dom, net, prop, boxes_lb, boxes_ub)
        print(
            f'[{utils.time_since(start)}] At epoch {epoch}: Loaded {len(trainset)} pieces of boxes for training,',
            f'min loss {dists.min()}, max loss {dists.max()}.')

        trainset_loader = data.DataLoader(trainset,
                                          batch_size=batch_size,
                                          shuffle=True)
        nbatches = ceil(len(trainset) / batch_size)

        for i, (batch_lb, batch_ub) in enumerate(trainset_loader):
            print(f'\rEpoch {epoch}: {i / nbatches * 100 :.2f}%', end='')
            opti.zero_grad()
            dists = go(dom, net, prop, batch_lb, batch_ub)
            loss = dists.mean()
            loss.backward()
            opti.step()

        # refine inputs
        boxes_lb, boxes_ub = v.split(boxes_lb,
                                     boxes_ub,
                                     None,
                                     net,
                                     top_k,
                                     stop_on_k_ops=1)

        with torch.no_grad():
            dists = go(dom, net, prop, boxes_lb, boxes_ub)
            total_loss = dists.mean()
            results.append((epoch, dists.max().item()))

        print(
            f'\r[{utils.time_since(start)}] After epoch {epoch}: total loss {total_loss},'
            f'min dist {dists.min()}, max dist {dists.max()}.')
        pass

    print()
    for e, worst in results:
        print(f'({e}, {worst})')
    return epoch, timer() - start
Beispiel #7
0
def demo_analysis():
    print('Analyzing the initial network and safety properties.')
    dom = IntervalDom()

    prop = DemoProp.property12(dom)
    v = Bisecter(dom, prop)
    net = demo_net_inited(dom).to(device)

    in_lbs, in_ubs = prop.lbub(device)
    boxes_lb, boxes_ub = in_lbs, in_ubs

    viols_lb, viols_ub = v.verify(boxes_lb, boxes_ub, None, net)
    print('Found Violation LB:', viols_lb[:10])
    print('Found Violation UB:', viols_ub[:10])

    pts = torch.tensor([[4., 1.]])
    print('Picked violation pts input:', pts)
    print('Picked violation pts output:', net(pts))

    def _inspect_refined(refined_lbs, refined_ubs, title: str):
        print('=====', title, '=====')
        print('Refined LBs:', refined_lbs)
        print('Refined UBs:', refined_ubs)
        dists = go(dom, net, prop, refined_lbs, refined_ubs)
        print(f'Dists: {dists}')
        print(f'Avg dist: {dists.mean()}, Max dist: {dists.max()}')
        print('=====', title, '=====')
        print()
        return

    # (1) inspect right after initialization
    ins = dom.Ele.by_intvl(boxes_lb, boxes_ub)
    outs = net(ins)
    print('Original LB:', boxes_lb)
    print('Original UB:', boxes_ub)
    print('Out LB after initialization:', outs.lb())
    print('Out UB after initialization:', outs.ub())
    _inspect_refined(boxes_lb, boxes_ub, 'Right after initialization')

    # (2) manually specify to split into 2 pieces, along dim 1
    _inspect_refined(torch.tensor([
        [0., 0.5],
        [2.5, 0.5],
    ]), torch.tensor([
        [2.5, 2.5],
        [5., 2.5],
    ]), 'Manual Split into 2, along dim1')  # intvl: max 9.375

    # (3) manually specify to split into 2 pieces, along dim 2
    _inspect_refined(torch.tensor([
        [0., 0.5],
        [0., 1.5],
    ]), torch.tensor([
        [5., 1.5],
        [5., 2.5],
    ]), 'Manual Split into 2, along dim2')  # intvl: max 11.625

    # (4) manually split into 4 pieces, all along dim1
    _inspect_refined(
        torch.tensor([
            [0., 0.5],
            [1.25, 0.5],
            [2.5, 0.5],
            [3.75, 0.5],
        ]), torch.tensor([
            [1.25, 2.5],
            [2.5, 2.5],
            [3.75, 2.5],
            [5., 2.5],
        ]), 'Manual Split into 4, all along dim1')  # intvl: max 8.125

    # (5) manually split into 4 pieces, all along dim2
    _inspect_refined(
        torch.tensor([
            [0., 0.5],
            [0., 1.0],
            [0., 1.5],
            [0., 2.0],
        ]), torch.tensor([
            [5., 1.0],
            [5., 1.5],
            [5., 2.0],
            [5., 2.5],
        ]), 'Manual Split into 4, all along dim2')  # intvl: max 11.5

    # (6) manually split into 4 pieces, along both dim1 and dim2
    _inspect_refined(
        torch.tensor([
            [0., 0.5],
            [0., 0.5],
            [2.5, 1.5],
            [2.5, 1.5],
        ]), torch.tensor([
            [2.5, 1.5],
            [2.5, 1.5],
            [5., 2.5],
            [5., 2.5],
        ]), 'Manual Split into 4, all both dim1 and dim2')  # intvl: max 6.875

    # (7) inspect heuristic splitting into 2 pieces, automatically
    split_lbs, split_ubs = v.split(boxes_lb,
                                   boxes_ub,
                                   None,
                                   net,
                                   batch_size=100,
                                   stop_on_k_ops=1)
    _inspect_refined(split_lbs, split_ubs, 'One Step Heuristic splitting')

    # (3) inspect naive splitting into K pieces in every dimension  (not used, too similar to ReluVal..)
    results = []
    for k in range(1, 11):
        split_lbs, split_ubs = naive_split(boxes_lb, boxes_ub, k)
        print(
            f'===== Naive Splitting into {k} pieces along every dimension ====='
        )
        dists = go(dom, net, prop, split_lbs, split_ubs)
        worst_dist = dists.max()
        avg_dist = dists.mean()
        print('After splitting, dists:', dists)
        print('After splitting into', k, 'pieces, worst distance:', worst_dist,
              'mean distance:', avg_dist)
        total_loss = dists.mean()
        print('Total loss after initialization:', total_loss)
        print()
        results.append((k, worst_dist.item(), avg_dist.item()))

    print('\n\nAfter everything:')
    _, worst_base, avg_base = results[0]
    for k, worst, avg in results:
        print(f'({k}, {1.0 - worst/worst_base}, {1.0 - avg/avg_base})')
    return
def test_reluval(logs_dir: str = './reluval_logs/'):
    """ Use property 2 logs from ReluVal for thoroughly examination.
        Need to run ReluVal's script 2 first and prepare the logs in proper location.
    :param logs_dir: directory of logs
    """
    if not Path(logs_dir).is_dir():
        print(f'{logs_dir} is not valid path for all logs.')
        return

    dom = IntervalDom()

    def validate_dnn(dnn, cex):
        """ Validate that the DNN outputs the same result as NNET does. """
        oi = torch.tensor([cex.inputs])
        oo = dnn(oi)
        oo = oo[0].detach().numpy()
        target = cex.outputs
        err = _errors(oo, target)

        print('My PyTorch:', oo)
        print('ReluVal C++:', target)
        print('Error:', err)
        return err

    def validate_by_prop(dnn, cex, prop_id: int = 2):
        """ It seems the computed outputs are quite different (10^-2 error). So confirm it's true CEX instead? """
        oi = torch.tensor([cex.inputs])
        oo = dnn(oi)

        if prop_id != 2:
            raise NotImplementedError()
        prop = AcasProp.property2(dom)

        e = dom.Ele.by_intvl(oo, oo)
        dist = prop.safe_dist(e)

        mse = nn.MSELoss()
        loss = mse(dist, torch.zeros_like(dist))
        print(f'My PyTorch loss for property{prop_id}: {loss}')
        return loss

    def validate_cex(c, log_path):
        log_name = Path(log_path).name
        prefix = 'ACASXU_run2a_'
        assert prefix in log_name
        id = log_name[len(prefix):len(prefix) +
                      3]  # e.g. 2_1 for ACASXU_run2a_2_1_batch_2000.nnet.log
        net_path = f'./acas_nets/ACASXU_run2a_{id}_batch_2000.nnet'
        print(net_path)
        dnn, mins, maxs = AcasNet.load_nnet(net_path, dom)

        # err = validate_dnn(dnn, c)
        err = validate_by_prop(dnn, c)
        print()
        return err

    reluval = ReluVal()
    log_files = [fn for fn in os.listdir(logs_dir) if fn.endswith('.nnet.log')]
    errs = []
    for log_name in log_files:
        with open(Path(logs_dir, log_name), 'r') as f:
            log_data = f.read()

        cexs = reluval.extract(log_data)
        for c in cexs:
            print('Validing', c)
            err = validate_cex(c, log_name)
            errs.append(err)
        pass

    print(
        'Losses for forward propagation (should be > 0, so that CEX is genuine):'
    )
    for err in errs:
        print(err)
    print('Avg:', sum(errs) / len(errs))
    return