Example #1
0
def show_stale_flows(args, sort_by='table'):
    config.get_models(
        args,
        {
            "elan_elan_instances",
            "elan_elan_interfaces",
            "ietf_interfaces_interfaces",
            "ietf_interfaces_interfaces_state",
            "interface_service_bindings_service_bindings",
            "l3vpn_vpn_interfaces",
            # "mip_mac",
            "neutron_neutron",
            "odl_fib_fib_entries",
            "odl_interface_meta_if_index_interface_map",
            "odl_l3vpn_vpn_instance_to_vpn_id",
            "odl_inventory_nodes_config",
            "odl_inventory_nodes_operational"
        })
    compute_map = config.gmodels.odl_inventory_nodes_operational.get_dpn_host_mapping(
    )
    nports = config.gmodels.neutron_neutron.get_ports_by_key()

    for flow in utils.sort(get_stale_flows(['ifm', 'acl', 'elan', 'l3vpn']),
                           sort_by):
        host = compute_map.get(flow.get('dpnid'), flow.get('dpnid'))
        ip_list = get_ips_for_iface(nports, flow.get('ifname'))
        if ip_list:
            flow['iface-ips'] = ip_list
        result = 'Table:{}, Host:{}, FlowId:{}{}'.format(
            flow['table'], host, flow['id'], utils.show_optionals(flow))
        print result
Example #2
0
def analyze_inventory(args):
    config.get_models(
        args,
        {"odl_inventory_nodes_config", "odl_inventory_nodes_operational"})

    if args.isConfig:
        nodes = config.gmodels.odl_inventory_nodes_config.get_clist_by_key()
        print "Inventory Config:"
    else:
        print "Inventory Operational:"
        nodes = config.gmodels.odl_inventory_nodes_operational.get_clist_by_key(
        )
    node = nodes.get("openflow:" + args.nodeid)
    if node is None:
        print "node: {} was not found".format("openflow:" + args.nodeid)
        return
    tables = node.get(Nodes.NODE_TABLE)
    # groups = node.get(Nodes.NODE_GROUP)
    flow_list = []
    print "Flows:"
    for table in tables:
        for flow in table.get('flow', []):
            if not args.ifname or args.ifname in utils.nstr(
                    flow.get('flow-name')):
                flow_dict = {
                    'table': table['id'],
                    'id': flow['id'],
                    'name': flow.get('flow-name'),
                    'flow': flow
                }
                flow_list.append(flow_dict)
    flows = sorted(flow_list, key=lambda x: x['table'])
    for flow in flows:
        print 'Table:', flow['table']
        print 'FlowId:', flow['id'], 'FlowName:', flow.get('name')
Example #3
0
def show_all_flows(args):
    config.get_models(
        args, {
            "elan_elan_instances", "elan_elan_interfaces",
            "ietf_interfaces_interfaces", "ietf_interfaces_interfaces_state",
            "interface_service_bindings_service_bindings",
            "l3vpn_vpn_interfaces", "neutron_neutron", "odl_fib_fib_entries",
            "odl_interface_meta_if_index_interface_map",
            "odl_l3vpn_vpn_instance_to_vpn_id", "odl_inventory_nodes_config",
            "odl_inventory_nodes_operational"
        })
    dump_flows(args, modules=['all'])
Example #4
0
File: show.py Project: ww09/netvirt
def show_stale_bindings(args):
    config.get_models(
        args, {
            "ietf_interfaces_interfaces",
            "interface_service_bindings_service_bindings"
        })
    stale_ids, bindings = flows.get_stale_bindings(args)
    for iface_id in sorted(stale_ids):
        for binding in bindings[iface_id].itervalues():
            # if binding.get('bound-services'):
            path = get_data_path('bindings', binding)
            print utils.format_json(bindings[iface_id])
            print('http://{}:{}/restconf/config/{}'.format(
                args.ip, args.port, path))
Example #5
0
def show_dup_flows(args):
    config.get_models(
        args,
        {
            "elan_elan_instances",
            "elan_elan_interfaces",
            "ietf_interfaces_interfaces",
            "ietf_interfaces_interfaces_state",
            "interface_service_bindings_service_bindings",
            "l3vpn_vpn_interfaces",
            # "mip_mac",
            "odl_fib_fib_entries",
            "odl_interface_meta_if_index_interface_map",
            "odl_l3vpn_vpn_instance_to_vpn_id",
            "odl_inventory_nodes_config",
            "odl_inventory_nodes_operational"
        })
    mmac = {}  # config.gmodels.mip_mac.get_entries_by_key()
    einsts = config.gmodels.elan_elan_instances.get_clist_by_key()
    compute_map = config.gmodels.odl_inventory_nodes_operational.get_dpn_host_mapping(
    )

    flows = utils.sort(get_all_flows(['elan']), 'table')
    matches = collections.defaultdict(list)
    for flow in flows:
        dup_key = get_key_for_dup_detect(args, flow)
        if dup_key:
            if matches and matches.get(dup_key):
                matches[dup_key].append(flow)
            else:
                matches[dup_key].append(flow)
    for k, v in matches.iteritems():
        if len(v) > 1:
            dpnid = k.split(':')[0]
            host = compute_map.get(dpnid, dpnid)
            result = 'Host:{}, FlowCount:{}, MatchKey:{}, ElanTag:{}'.format(
                host, len(v), k, v[0].get('elan-tag'))
            print result
            for idx, flow in enumerate(v):
                result = "Duplicate"
                mac_addr = flow.get('dst-mac')
                if mac_addr and mmac.get(mac_addr):
                    result = is_correct_elan_flow(flow, mmac.get(mac_addr),
                                                  einsts, host)
                print '    {} Flow-{}:{}'.format(
                    result, idx,
                    utils.format_json(args,
                                      flow_parser.parse_flow(
                                          flow.get('flow'))))
Example #6
0
def show_elan_flows(args):
    config.get_models(
        args, {
            "elan_elan_instances", "elan_elan_interfaces",
            "ietf_interfaces_interfaces", "ietf_interfaces_interfaces_state",
            "odl_interface_meta_if_index_interface_map",
            "odl_inventory_nodes_config", "odl_inventory_nodes_operational"
        })
    compute_map = config.gmodels.odl_inventory_nodes_operational.get_dpn_host_mapping(
    )
    for flow in utils.sort(get_all_flows(args, modules=['elan']), 'id'):
        host = compute_map.get(flow.get('dpnid'), flow.get('dpnid'))
        result = 'MacHost:{}{}, Table:{}, FlowId:{}, {}, Flow:{}'.format(
            flow['id'][-17:], host, flow['table'], flow['id'],
            utils.show_optionals(flow),
            utils.format_json(args, flow_parser.parse_flow(flow['flow'])))
        print result
Example #7
0
def show_learned_mac_flows(args):
    config.get_models(
        args,
        {
            "elan_elan_instances",
            "elan_elan_interfaces",
            "ietf_interfaces_interfaces",
            "ietf_interfaces_interfaces_state",
            "interface_service_bindings_service_bindings",
            "l3vpn_vpn_interfaces",
            # "mip_mac",
            "neutron_neutron",
            "odl_fib_fib_entries",
            "odl_interface_meta_if_index_interface_map",
            "odl_l3vpn_vpn_instance_to_vpn_id",
            "odl_inventory_nodes_config",
            "odl_inventory_nodes_operational"
        })
    nports = config.gmodels.neutron_neutron.get_ports_by_key(key='mac-address')
    compute_map = config.gmodels.odl_inventory_nodes_operational.get_dpn_host_mapping(
    )

    flows = utils.sort(get_all_flows(['elan']), 'table')
    for flow_info in flows:
        flow = flow_info.get('flow')
        dpnid = flow_info.get('dpnid')
        host = compute_map.get(dpnid, dpnid)
        if ((flow_info.get('table') == 50 and flow.get('idle-timeout') == 300
             and not nports.get(flow_info.get('src-mac')))
                or (flow_info.get('table') == 51
                    and not nports.get(flow_info.get('dst-mac')))):  # NOQA

            result = 'Table:{}, Host:{}, FlowId:{}{}'.format(
                flow_info.get('table'), host, flow.get('id'),
                utils.show_optionals(flow_info))
            print result
            print 'Flow:{}'.format(
                utils.format_json(args, flow_parser.parse_flow(flow)))
Example #8
0
def main(outdir):
    for subdir in ['all', 'snapshots', 'clusters']:
        if not os.path.exists(os.path.join(outdir, subdir)):
            os.makedirs(os.path.join(outdir, subdir), exist_ok=True)

    if data_type == 'grid':
        get_data = inputs.get_data_grid
        percent_good = evaluation.percent_good_grid
    elif data_type == 'ring':
        get_data = inputs.get_data_ring
        percent_good = evaluation.percent_good_ring
    else:
        raise NotImplementedError()

    zdist = distributions.Normal(torch.zeros(z_dim, device=device),
                                 torch.ones(z_dim, device=device))
    z_test = zdist.sample((test_batch_size, ))

    x_test, y_test = get_test(get_data=get_data,
                              batch_size=test_batch_size,
                              variance=variance,
                              k_value=k_value,
                              device=device)

    x_cluster, _ = get_test(get_data=get_data,
                            batch_size=10000,
                            variance=variance,
                            k_value=k_value,
                            device=device)

    train_loader = get_dataset(get_data=get_data,
                               batch_size=train_batch_size,
                               npts=npts,
                               variance=variance,
                               k_value=k_value)

    def train(trainer, g, d, clusterer, exp_dir):
        it = 0
        if os.path.exists(os.path.join(exp_dir, 'log.txt')):
            os.remove(os.path.join(exp_dir, 'log.txt'))

        for epoch in range(nepochs):
            for x_real, y in train_loader:
                z = zdist.sample((train_batch_size, ))
                x_real, y = x_real.to(device), y.to(device)
                y = clusterer.get_labels(x_real, y)

                dloss, _ = trainer.discriminator_trainstep(x_real, y, z)
                gloss = trainer.generator_trainstep(y, z)

                if it % args.recluster_every == 0 and args.clusterer != 'supervised':
                    if args.clusterer != 'burnin' or it >= args.burnin_time:
                        clusterer.recluster(discriminator, x_batch=x_real)

                if it % 1000 == 0:
                    x_fake = g(z_test, clusterer.get_labels(
                        x_test, y_test)).detach().cpu().numpy()

                    visualize_generated(x_fake,
                                        x_test.detach().cpu().numpy(), y, it,
                                        exp_dir)

                    visualize_clusters(x_test.detach().cpu().numpy(),
                                       clusterer.get_labels(x_test, y_test),
                                       it, exp_dir)

                    torch.save(
                        {
                            'generator': g.state_dict(),
                            'discriminator': d.state_dict(),
                            'g_optimizer': g_optimizer.state_dict(),
                            'd_optimizer': d_optimizer.state_dict()
                        },
                        os.path.join(exp_dir, 'snapshots', 'model_%d.pt' % it))

                if it % 1000 == 0:
                    g.eval()
                    d.eval()

                    x_fake = g(z_test, clusterer.get_labels(
                        x_test, y_test)).detach().cpu().numpy()
                    percent, modes, kl = percent_good(x_fake, var=variance)
                    log_message = f'[epoch {epoch} it {it}] dloss = {dloss}, gloss = {gloss}, prop_real = {percent}, modes = {modes}, kl = {kl}'
                    with open(os.path.join(exp_dir, 'log.txt'), 'a+') as f:
                        f.write(log_message + '\n')
                    print(log_message)

                it += 1

    # train a G/D from scratch
    generator, discriminator = get_models(args.model_type, 'conditional',
                                          num_clusters, args.d_act_dim, device)
    g_optimizer, d_optimizer = get_optimizers(generator, discriminator)
    trainer = Trainer(generator,
                      discriminator,
                      g_optimizer,
                      d_optimizer,
                      gan_type='standard',
                      reg_type='none',
                      reg_param=0)
    clusterer = clusterer_dict[args.clusterer](discriminator=discriminator,
                                               k_value=num_clusters,
                                               x_cluster=x_cluster)
    clusterer.recluster(discriminator=discriminator)
    train(trainer, generator, discriminator, clusterer, os.path.join(outdir))
Example #9
0
import contextlib
import argparse
import json
import time
import ast
from pathlib import Path

import sys
sys.setrecursionlimit(3000) # Necessary for Glow

from config import get_datasets, get_models, get_config, get_schema, expand_grid


parser = argparse.ArgumentParser()

parser.add_argument("--model", choices=get_models())
parser.add_argument("--dataset", choices=get_datasets())
parser.add_argument("--baseline", action="store_true", help="Run baseline flow instead of CIF")
parser.add_argument("--num-seeds", type=int, default=1, help="Number of random seeds to use.")
parser.add_argument("--checkpoints", choices=["best-valid", "latest", "both", "none"], default="both", help="Type of checkpoints to save (default: %(default)s)")
parser.add_argument("--nosave", action="store_true", help="Don't save anything to disk")
parser.add_argument("--data-root", default="data/", help="Location of training data (default: %(default)s)")
parser.add_argument("--logdir-root", default="runs/", help="Location of log files (default: %(default)s)")

parser.add_argument("--config", default=[], action="append", help="Override config entries at runtime. Specify as `key=value' (e.g. `--config max_epochs=50'). Any config value can be overridden, but some (e.g. `model') may lead to unforeseen consequences, so this should be used with care.")

parser.add_argument("--load", help="Directory of a run to load. If this flag is specified, the following flags will be ignored silently: --model, --dataset, --baseline, --num-seeds, --checkpoints, --nosave, --data-root, --logdir-root. Their values *can* be overridden via --config, but this may lead to unforeseen consequences in some cases.")

parser.add_argument("--print-config", action="store_true", help="Print the full config and exit")
parser.add_argument("--print-schema", action="store_true", help="Print the model schema and exit")
parser.add_argument("--print-model", action="store_true", help="Print the model and exit")
Example #10
0
from PIL import Image
import pickle
import time
import os
from perlin import create_perlin_noise

slim = tf.contrib.slim
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
max_epsilon = 20
num_iter = 100
momentum = 1
model_index = [8, 14, 13, 3, 2, 6, 10, 9]
rand_index = [0, 3]
eps = max_epsilon / num_iter
configs = {'batch_size': 64, 'epoch': 5}
model_ind, models = get_models(model_index)


def op_with_scalar_cast(a, b, f):
    try:
        return f(a, b)
    except (TypeError, ValueError):
        pass


def mul(a, b):
    def multiply(a, b):
        return a * b

    return op_with_scalar_cast(a, b, multiply)