Ejemplo n.º 1
0
def running_config_snmp(context, config, words):
    s_obj_type  = 'snmp-server-config'

    snmp_config = context.get_table_from_store(s_obj_type)
    if len(snmp_config) > 1:
        print 'running_config_snmp: more than one snmp record'
    if len(snmp_config) == 0:
        return

    snmp_config = snmp_config[0]


    s_config = []
    if run_config.not_default_value(s_obj_type, 'community', snmp_config.get('community')):
        s_config.append('snmp-server community ro %s\n' %
                        utif.quote_string(snmp_config['community']))

    if run_config.not_default_value(s_obj_type, 'location', snmp_config.get('location')):
        s_config.append('snmp-server location %s\n' %
                        utif.quote_string(snmp_config['location']))

    if run_config.not_default_value(s_obj_type, 'contact', snmp_config.get('contact')):
        s_config.append('snmp-server contact %s\n' %
                        utif.quote_string(snmp_config['contact']))
    if run_config.not_default_value(s_obj_type, 'server-enable', snmp_config['server-enable']):
        s_config.append('snmp-server enable\n')

    if len(s_config):   
        config.append('!\n')
        config += s_config
def running_config_host(context, config, words):
    """
    Add host details, including tags.
    """

    # When there are no tags, and no host-aliases, the hosts don't need
    # to be read.  If it were possible to determine the number of entries
    # without querying, then it would make sense to consider enumerating
    # the host-aliases and tags if they were substantially smaller than
    # the number of hosts.
    #

    host_alias_dict = create_obj_type_dict("host-alias", 'host')
    host_security_ip = create_obj_type_dict("host-security-ip-address", 'host')
    host_security_ap = create_obj_type_dict("host-security-attachment-point", 'host')
    switch_config = create_obj_type_dict('switch-interface-config', 'id')

    # Notice that only hosts configured are enumerated here, since
    # discovered hosts can't have configuration.   All configuration
    # is attached to configured hosts, which are joined after discovered
    # hosts for show commands.
    hosts = sdnsh.get_table_from_store("host-config")
    key = mi.pk('host-config')

    if len(words) > 0:
        if not words[0] in [h[key] for h in hosts]:
            return sdnsh.error_msg('No such host "%s"' % words[0])

    for h in hosts:
        if len(words) > 0 and words[0] != h[key]:
            continue

        # host_config holds 'config' only for host.
        host_config = []
        a_s = ''
        if h.get('address-space','') != 'default':
            a_s = 'address-space %s ' % h['address-space']
        vlan = ''
        if h.get('vlan', '') != '':
            vlan = 'vlan %s ' % h['vlan']
        host_config.append('!\nhost %s%s%s\n' % (a_s, vlan, h['mac']))
        #
        # add the host alias for this id if it exists
        if h[key] in host_alias_dict:
            host_config.append("  host-alias %s\n" %
                               host_alias_dict[h[key]][0]['id'])

        #
        # check for security policies
        if h[key] in host_security_ip:
            for ip in host_security_ip[h[key]]:
                host_config.append("  security policy bind ip-address %s\n" % ip['ip-address'])

        if h[key] in host_security_ap:
            for ap in host_security_ap[h[key]]:
                host_config.append("  security policy bind attachment-point %s %s\n" %
                                    (ap.get('dpid', 'all'),
                                    utif.quote_string(ap['if-name-regex'])))

        config += host_config
def address_space_running_config_include_field(context, config,
        address_space, obj_type, field, value, indent, prefix = ""):
    """
    Identify fields of obj_types who's values differ from the default
    values, since these need to be included into the running-config
    """
    if context.mi.not_default_value(obj_type, field, value):
        if context.mi.is_field_string(obj_type, field):
            config.append(' ' * (indent + indent) + prefix + field +
                          " %s\n" % utif.quote_string(value))
        else:
            config.append(' ' * (indent + indent) + prefix + field +
                          ' %s\n' % value)
def running_config_tacacs(context, config, words):
    t_obj_type  = 'tacacs-plus-config'
    th_obj_type = 'tacacs-plus-host'

    tacacs = context.get_table_from_store(t_obj_type)
    if len(tacacs) > 1:
        print 'running_config_tacacs: more than one tacacs record'

    tacacs_host = context.rest_query_objects(th_obj_type,
                                             {'orderby' : 'timestamp'})


    t_config = []

    if tacacs:
        tacacs = tacacs[0]

        if run_config.not_default_value(t_obj_type, 'tacacs-plus-authn', tacacs['tacacs-plus-authn']) and \
           run_config.not_default_value(t_obj_type, 'local-authn', tacacs['local-authn']):
            t_config.append('aaa authentication login default group tacacs+\n')
        elif run_config.not_default_value(t_obj_type, 'tacacs-plus-authn', tacacs['tacacs-plus-authn']):
            t_config.append('aaa authentication login default group tacacs+ local\n')

        if run_config.not_default_value(t_obj_type, 'tacacs-plus-authz', tacacs['tacacs-plus-authz']) and \
           run_config.not_default_value(t_obj_type, 'local-authz', tacacs['local-authz']):
            t_config.append('aaa authorization exec default group tacacs+\n')
        elif run_config.not_default_value(t_obj_type, 'tacacs-plus-authz', tacacs['tacacs-plus-authz']):
            t_config.append('aaa authorization exec default group tacacs+ local\n')

        if run_config.not_default_value(t_obj_type, 'tacacs-plus-acct', tacacs['tacacs-plus-acct']):
            t_config.append('aaa accounting exec default start-stop group tacacs+\n')

        if run_config.not_default_value(t_obj_type, 'key', tacacs['key']):
            t_config.append('tacacs server key %s\n' % tacacs['key'])

        if run_config.not_default_value(t_obj_type, 'timeout', tacacs['timeout']):
            t_config.append('tacacs server timeout %s\n' % tacacs['timeout'])

    for h in tacacs_host:
        if run_config.not_default_value(th_obj_type, 'key', h['key']):
            key = ' key %s' %  utif.quote_string(h['key'])
        else:
            key = ''

        t_config.append('tacacs server host %s%s\n' % (h['ip'], key))

    if len(t_config):   
        config.append('!\n')
        config += t_config
Ejemplo n.º 5
0
def address_space_running_config_include_field(context,
                                               config,
                                               address_space,
                                               obj_type,
                                               field,
                                               value,
                                               indent,
                                               prefix=""):
    """
    Identify fields of obj_types who's values differ from the default
    values, since these need to be included into the running-config
    """
    if context.mi.not_default_value(obj_type, field, value):
        if context.mi.is_field_string(obj_type, field):
            config.append(' ' * (indent + indent) + prefix + field +
                          " %s\n" % utif.quote_string(value))
        else:
            config.append(' ' * (indent + indent) + prefix + field +
                          ' %s\n' % value)
import run_config
import utif


def running_config_tech_support(context, config, words):
    # waiting for new tag-mapping to arrive to rewrite
    try:
        tech_support_entries = context.get_table_from_store(
            'tech-support-config')
    except Exception, e:
        tech_support_entries = []
    t_config = []
    for ts_entry in tech_support_entries:
        t_config.append(
            "tech-support-config %s '%s'\n" %
            (ts_entry['cmd-type'], utif.quote_string(ts_entry['cmd'])))
    if len(t_config) > 0:
        config.append('!\n')
        config += t_config


tag_running_config_tuple = (({
    'optional': False,
    'field': 'running-config',
    'type': 'enum',
    'values': 'tech-support',
    'short-help': 'Configuration for tech support',
    'doc': 'running-config|show-tech-support',
}, ), )

#
Ejemplo n.º 7
0
def command_finished(words):
    if record_urls:
        record_urls.write('COMMAND "%s"\n' %
                          ' '.join([utif.quote_string(x) for x in words]))
def command_finished(words):
    if record_urls:
        record_urls.write('COMMAND "%s"\n' %
                          ' '.join([utif.quote_string(x) for x in words]))
Ejemplo n.º 9
0
def collect_object_fields(
    obj_type, field, data, mode, completions, prefix=None, other=None, parent_field=None, parent_id=None, scoped=None
):
    """
    Returns the list of possible completions for a particular obj-type.
    """

    data = dict(data)
    if parent_field:
        data[parent_field] = parent_id
    if prefix:
        data[field + "__startswith"] = prefix

    key = mi.pk(obj_type)
    if scoped:
        obj_id = sdnsh.get_current_mode_obj()
        if sdnsh.current_mode() != mode:
            # XXX needs to be covered, shouldn't reach in like this
            for x in sdnsh.mode_stack:
                if x["mode_name"] == mode:
                    obj_id = x["obj"]
        obj_d = {key: obj_id}

        if obj_type in mi.alias_obj_types:
            # the submode ought to identify the foreign key
            data[mi.alias_obj_type_field(obj_type)] = obj_id
        else:
            mi.split_compound_into_dict(obj_type, key, obj_d, is_prefix=True)
            for (k, v) in obj_d.items():
                if k != key and not k in data:
                    data[k] = v

    # if this is one of the obj_type's associated with aliases, should
    # the list of values be back-transformed into alias names?
    # yes, because if the current value has an inverse alias, the existing
    # inverse for the type implies that during a previous insert of this
    # value, it was converted from its alias name to the current name.
    #
    # collect the complete collection of aliases, since its likely
    # more than one back-to-alias conversion will be required, and query
    # its value before obj_type in the hope that it was recently cached.
    #
    alias_obj_type = mi.obj_type_related_config_obj_type(obj_type)
    if other and other in mi.alias_obj_type_xref:
        alias_obj_type = mi.alias_obj_type_xref[other][0]
    elif field != mi.pk(obj_type):
        if mi.is_foreign_key(obj_type, field):
            (alias_obj_type, fk_name) = mi.foreign_key_references(obj_type, field)
            alias_obj_type = mi.obj_type_related_config_obj_type(alias_obj_type)
            if alias_obj_type in mi.alias_obj_type_xref:
                alias_obj_type = mi.alias_obj_type_xref[alias_obj_type][0]
            else:
                alias_obj_type = None
        else:
            if sdnsh.description:  # description debugging
                print "collect_object_fields: no alias for %s " "field %s not pk, and not fk" % (obj_type, field)
            alias_obj_type = None
    elif obj_type in mi.alias_obj_type_xref:
        alias_obj_type = mi.alias_obj_type_xref[obj_type][0]
    else:
        alias_obj_type = None

    alias_dict = {}
    if alias_obj_type:
        foreign_field = mi.alias_obj_type_field(alias_obj_type)
        alias_dict = create_obj_type_dict(alias_obj_type, foreign_field)
        alias_key = mi.pk(alias_obj_type)

    # Remove any data fields which have values of None, these are fields
    # which are getting reset.
    for reset_fields in [x for x in data.keys() if data[x] == None]:
        del data[reset_fields]

    # collect complete obj_type
    if not mi.obj_type_has_model(obj_type):
        result = rest_to_model.get_model_from_url(obj_type, data)
    else:
        result = sdnsh.rest_query_objects(obj_type, data)
    check_rest_result(result)
    if sdnsh.description:  # description debugging
        print "collect_object_fields:", obj_type, field, data, result

    is_compound = mi.is_compound_key(obj_type, key)
    d = {}
    for item in result:
        if is_compound:
            mi.split_compound_into_dict(obj_type, key, item)
        value = item.get(field)
        # XXX hack to correctly format tag completions
        if obj_type == "tag" and field == "id":
            value = "%s.%s=%s" % tuple(value.split("|"))
        # remember to only add new items
        if value:
            if type(value) == list:
                # Need a mechanism to select values from the list, field's not enough
                for item in value:
                    if utif.quote_string(str(item)) not in completions:
                        if str(item) in alias_dict:
                            alias_item = alias_dict[str(item)][0][alias_key]
                            if alias_item.startswith(prefix):
                                item = alias_item
                        d[utif.quote_string(str(item))] = None
            elif utif.quote_string(str(value)) not in completions:
                if str(value) in alias_dict:
                    alias_value = alias_dict[str(value)][0][alias_key]
                    if alias_value.startswith(prefix):
                        value = alias_value
                d[utif.quote_string(str(value))] = None

    # if there's an alias for this object, and a prefix is included,
    # then the alias'es which match also need to be directly included,
    # since its not clear whether the prefix applies to the actual
    # id or the alias.  since alias_dict is already the complete
    # collection of aliases for this obj-type, use it for matching names
    if alias_obj_type and prefix and prefix != "":
        alias_pk = mi.pk(alias_obj_type)
        for (n, v) in alias_dict.items():
            # 'n' here is the foreign key reference to this obj-type
            for item in [x[alias_pk] for x in v if x[alias_pk].startswith(prefix)]:
                if utif.quote_string(str(item)) not in completions:
                    d[utif.quote_string(str(item))] = None

    return utif.add_delim(list(d), " ")
Ejemplo n.º 10
0
def complete_from_another(
    other,
    obj_type,
    field,
    data,
    completions,
    no_command,
    prefix=None,
    parent_field=None,
    parent_id=None,
    scoped=None,
    explicit=None,
):
    """
    Completion function used when another obj_type is used to populate
    values for the current obj_type

    the 'other' field identifies the obj_type to use to collect choices from,
    it can consist of two parts  other|field.  When field isn't described here,
    it comes from the description parameter, however, the 'field' value there may
    be in use to describe the value of the associated action.

    """
    if sdnsh.description:  # description debugging
        print "complete_from_another:", other, field, data, parent_field, parent_id, scoped

    # complete_from_another is intended to include other fields, which
    # shouldn't apply for a no command.
    if no_command:
        return

    if other.find("|") >= 0:
        parts = other.split("|")
        other = parts[0]
        field = parts[1]

    if not mi.obj_type_exists(other):
        raise error.CommandDescriptionError("Unknown obj-type/other: %s" % other)

    id = mi.pk(other)
    data = dict(data)
    if parent_field and parent_id:
        data[parent_field] = parent_id
    if prefix:
        data[field + "__startswith"] = prefix
    key = mi.pk(other)
    if scoped:
        key = mi.pk(other)
        if type(scoped) == str and scoped in data:
            obj_d = {key: data[scoped]}
        else:
            obj_d = {key: sdnsh.get_current_mode_obj()}
        mi.split_compound_into_dict(other, key, obj_d, is_prefix=True)
        for (k, v) in obj_d.items():
            if k != key and not k in data:
                data[k] = v
    if mi.is_primitive_compound_key(other, key):
        # try to use the field values to populate the primary key...
        value = ""
        s = mi.compound_key_separator(other, key)
        missing = None
        for kf in mi.deep_compound_key_fields(other, key):
            if kf in data:
                value += data[kf] + s
            else:
                # the fields must appear in order
                missing = kf
                break
        # For prefix extention to work here, the other field must have
        # named the field, for example switch's interface completion,
        # uses "other : 'port|number'"
        post_prefix_match = False
        if prefix:
            post_prefix_match = True
            if missing == field:
                value += prefix
                post_prefix_match = False
        if mi.obj_type_has_model(other):
            result = sdnsh.get_table_from_store(other, key, value)
        else:
            result = rest_to_model.get_model_from_url(other, {key: value})

        if post_prefix_match:
            # try to match the missing field, more work ought to be done
            # to identify whether the 'missing' field is the correect to match against
            #
            result = [x for x in result if field in x and str(x[field]).startswith(prefix)]
    elif mi.is_compound_key(other, key):
        search = {}
        if parent_id:
            from_id = {mi.pk(obj_type): parent_id}
            mi.split_compound_into_dict(obj_type, mi.pk(obj_type), from_id, is_prefix=True)
            # the field name used to collapse the result is the last
            # field in the compound key (id of 'other'),  this may need
            # improvement for other commands
            for deep_field in mi.deep_compound_key_fields(other, key):
                if deep_field in from_id:
                    search[deep_field] = from_id[deep_field]
                if deep_field in data:
                    search[deep_field] = data[deep_field]
        if scoped:
            # move known compound fields from obj_d into search.
            for deep_field in mi.deep_compound_key_fields(other, key):
                if deep_field in obj_d:
                    search[deep_field] = obj_d[deep_field]
        #
        # possibly other search keys?
        if prefix:
            search[field + "__startswith"] = prefix
        if explicit:
            search.clear()
            search[scoped] = data[scoped]
            if prefix:
                search[field + "__startswith"] = prefix
        if mi.obj_type_has_model(other):
            result = sdnsh.rest_query_objects(other, search)
        else:
            result = rest_to_model.get_model_from_url(other, search)
    elif mi.obj_type_has_field(other, field) and mi.is_primary_key(other, field):
        result = utif.add_delim(objects_starting_with(other, prefix), " ")
        completions.update(dict([[x, "%s selection" % pretty(other)] for x in result]))
        return
    elif mi.obj_type_has_field(obj_type, field) and mi.is_foreign_key(obj_type, field):
        # look up the values of the foreign key's from the other table
        (fk_obj_type, fk_fn) = mi.foreign_key_references(obj_type, field)
        result = sdnsh.get_table_from_store(fk_obj_type, fk_fn, prefix)
        field = fk_fn
    elif mi.obj_type_has_field(obj_type, field) and field == other:
        # In this situation, this obj_type has a field, which seems to be named
        # based on the other model's name, which seems to be requesting to
        # search the other model.
        field = mi.pk(other)
        result += utif.add_delim(objects_starting_with(other, prefix), " ")
        completions.update(dict([[x, "%s selection" % pretty(other)] for x in result]))
        return
    else:
        if mi.obj_type_has_model(other):
            result = sdnsh.rest_query_objects(other, data)
        else:
            result = rest_to_model.get_model_from_url(other, data)

    check_rest_result(result)
    if sdnsh.description:  # description debugging
        print "complete_from_another:", other, field, data, len(result)

    d = {}
    for item in result:
        value = item.get(field)
        # XXX hack to correctly format tag completions
        if other == "tag":
            value = "%s.%s=%s" % tuple(value.split("|"))
        # assume that 'values' are 'unique' within results
        if value and utif.quote_string(value) not in completions:
            d[utif.quote_string(str(value))] = None

    if sdnsh.description:  # description debugging
        print "complete_from_another: final", other, field, data, d.keys()

    result = utif.add_delim(list(d), " ")
    completions.update(dict([[x, "%s selection" % pretty(other)] for x in result]))
# show running tech_support_config [as]
#

import run_config
import utif

def running_config_tech_support(context, config, words):
    # waiting for new tag-mapping to arrive to rewrite
    try:
        tech_support_entries = context.get_table_from_store('tech-support-config')
    except Exception, e:
        tech_support_entries = []
    t_config = []
    for ts_entry in tech_support_entries:
        t_config.append("tech-support-config %s '%s'\n" % (ts_entry['cmd-type'], utif.quote_string(ts_entry['cmd'])))
    if len(t_config) > 0:
        config.append('!\n')
        config += t_config


tag_running_config_tuple = (
                            (
                             {
                             'optional'   : False,
                             'field'      : 'running-config',
                             'type'       : 'enum',
                             'values'     : 'tech-support',
                             'short-help' : 'Configuration for tech support',
                             'doc'        : 'running-config|show-tech-support',
                             },
#
# --------------------------------------------------------------------------------


def running_config_tag(context, config, words):
    # waiting for new tag-mapping to arrive to rewrite
    try:
        tag_mapping = create_obj_type_dict(context, 'tag-mapping', 'tag')
    except Exception, e:
        tag_mapping = {}

    t_config = []

    for tag in context.get_table_from_store('tag'):
        tag_name = '%s.%s=%s' % (tag['namespace'], tag['name'],
                                 utif.quote_string(tag['value']))
        if len(words) == 1 and tag_name != words[0]:
            continue
        if tag.get('persist', False):
            t_config.append('tag ' + tag_name + '\n')
            tms = tag_mapping.get(tag['id'], [])
            for tm in tms:
                m = '  match'
                if tm.get('mac', '') != '':
                    m += ' mac %s' % tm['mac']
                if tm.get('vlan', '') != '':
                    m += ' vlan %s' % tm['vlan']
                if tm.get('dpid', '') != '':
                    m += ' switch %s %s' % (tm['dpid'], tm.get('ifname', ''))
                t_config.append(m + '\n')
def collect_object_fields(obj_type,
                          field,
                          data,
                          mode,
                          completions,
                          prefix=None,
                          other=None,
                          parent_field=None,
                          parent_id=None,
                          scoped=None):
    """
    Returns the list of possible completions for a particular obj-type.
    """

    data = dict(data)
    if parent_field:
        data[parent_field] = parent_id
    if prefix:
        data[field + '__startswith'] = prefix

    key = mi.pk(obj_type)
    if scoped:
        obj_id = sdnsh.get_current_mode_obj()
        if sdnsh.current_mode() != mode:
            # XXX needs to be covered, shouldn't reach in like this
            for x in sdnsh.mode_stack:
                if x['mode_name'] == mode:
                    obj_id = x['obj']
        obj_d = {key: obj_id}

        if obj_type in mi.alias_obj_types:
            # the submode ought to identify the foreign key
            data[mi.alias_obj_type_field(obj_type)] = obj_id
        else:
            mi.split_compound_into_dict(obj_type, key, obj_d, is_prefix=True)
            for (k, v) in obj_d.items():
                if k != key and not k in data:
                    data[k] = v

    # if this is one of the obj_type's associated with aliases, should
    # the list of values be back-transformed into alias names?
    # yes, because if the current value has an inverse alias, the existing
    # inverse for the type implies that during a previous insert of this
    # value, it was converted from its alias name to the current name.
    #
    # collect the complete collection of aliases, since its likely
    # more than one back-to-alias conversion will be required, and query
    # its value before obj_type in the hope that it was recently cached.
    #
    alias_obj_type = mi.obj_type_related_config_obj_type(obj_type)
    if other and other in mi.alias_obj_type_xref:
        alias_obj_type = mi.alias_obj_type_xref[other][0]
    elif field != mi.pk(obj_type):
        if mi.is_foreign_key(obj_type, field):
            (alias_obj_type,
             fk_name) = mi.foreign_key_references(obj_type, field)
            alias_obj_type = mi.obj_type_related_config_obj_type(
                alias_obj_type)
            if alias_obj_type in mi.alias_obj_type_xref:
                alias_obj_type = mi.alias_obj_type_xref[alias_obj_type][0]
            else:
                alias_obj_type = None
        else:
            if sdnsh.description:  # description debugging
                print 'collect_object_fields: no alias for %s ' \
                      'field %s not pk, and not fk' % (obj_type, field)
            alias_obj_type = None
    elif obj_type in mi.alias_obj_type_xref:
        alias_obj_type = mi.alias_obj_type_xref[obj_type][0]
    else:
        alias_obj_type = None

    alias_dict = {}
    if alias_obj_type:
        foreign_field = mi.alias_obj_type_field(alias_obj_type)
        alias_dict = create_obj_type_dict(alias_obj_type, foreign_field)
        alias_key = mi.pk(alias_obj_type)

    # Remove any data fields which have values of None, these are fields
    # which are getting reset.
    for reset_fields in [x for x in data.keys() if data[x] == None]:
        del data[reset_fields]

    # collect complete obj_type
    if not mi.obj_type_has_model(obj_type):
        result = rest_to_model.get_model_from_url(obj_type, data)
    else:
        result = sdnsh.rest_query_objects(obj_type, data)
    check_rest_result(result)
    if sdnsh.description:  # description debugging
        print "collect_object_fields:", obj_type, field, data, result

    is_compound = mi.is_compound_key(obj_type, key)
    d = {}
    for item in result:
        if is_compound:
            mi.split_compound_into_dict(obj_type, key, item)
        value = item.get(field)
        # XXX hack to correctly format tag completions
        if obj_type == 'tag' and field == 'id':
            value = '%s.%s=%s' % tuple(value.split('|'))
        # remember to only add new items
        if value:
            if type(value) == list:
                # Need a mechanism to select values from the list, field's not enough
                for item in value:
                    if utif.quote_string(str(item)) not in completions:
                        if str(item) in alias_dict:
                            alias_item = alias_dict[str(item)][0][alias_key]
                            if alias_item.startswith(prefix):
                                item = alias_item
                        d[utif.quote_string(str(item))] = None
            elif utif.quote_string(str(value)) not in completions:
                if str(value) in alias_dict:
                    alias_value = alias_dict[str(value)][0][alias_key]
                    if alias_value.startswith(prefix):
                        value = alias_value
                d[utif.quote_string(str(value))] = None

    # if there's an alias for this object, and a prefix is included,
    # then the alias'es which match also need to be directly included,
    # since its not clear whether the prefix applies to the actual
    # id or the alias.  since alias_dict is already the complete
    # collection of aliases for this obj-type, use it for matching names
    if alias_obj_type and prefix and prefix != '':
        alias_pk = mi.pk(alias_obj_type)
        for (n, v) in alias_dict.items():
            # 'n' here is the foreign key reference to this obj-type
            for item in [
                    x[alias_pk] for x in v if x[alias_pk].startswith(prefix)
            ]:
                if utif.quote_string(str(item)) not in completions:
                    d[utif.quote_string(str(item))] = None

    return utif.add_delim(list(d), ' ')
def complete_from_another(other,
                          obj_type,
                          field,
                          data,
                          completions,
                          no_command,
                          prefix=None,
                          parent_field=None,
                          parent_id=None,
                          scoped=None,
                          explicit=None):
    """
    Completion function used when another obj_type is used to populate
    values for the current obj_type

    the 'other' field identifies the obj_type to use to collect choices from,
    it can consist of two parts  other|field.  When field isn't described here,
    it comes from the description parameter, however, the 'field' value there may
    be in use to describe the value of the associated action.

    """
    if sdnsh.description:  # description debugging
        print "complete_from_another:", other, field, data, parent_field, parent_id, scoped

    # complete_from_another is intended to include other fields, which
    # shouldn't apply for a no command.
    if no_command:
        return

    if other.find('|') >= 0:
        parts = other.split('|')
        other = parts[0]
        field = parts[1]

    if not mi.obj_type_exists(other):
        raise error.CommandDescriptionError("Unknown obj-type/other: %s" %
                                            other)

    id = mi.pk(other)
    data = dict(data)
    if parent_field and parent_id:
        data[parent_field] = parent_id
    if prefix:
        data[field + '__startswith'] = prefix
    key = mi.pk(other)
    if scoped:
        key = mi.pk(other)
        if type(scoped) == str and scoped in data:
            obj_d = {key: data[scoped]}
        else:
            obj_d = {key: sdnsh.get_current_mode_obj()}
        mi.split_compound_into_dict(other, key, obj_d, is_prefix=True)
        for (k, v) in obj_d.items():
            if k != key and not k in data:
                data[k] = v
    if mi.is_primitive_compound_key(other, key):
        # try to use the field values to populate the primary key...
        value = ""
        s = mi.compound_key_separator(other, key)
        missing = None
        for kf in mi.deep_compound_key_fields(other, key):
            if kf in data:
                value += data[kf] + s
            else:
                # the fields must appear in order
                missing = kf
                break
        # For prefix extention to work here, the other field must have
        # named the field, for example switch's interface completion,
        # uses "other : 'port|number'"
        post_prefix_match = False
        if prefix:
            post_prefix_match = True
            if missing == field:
                value += prefix
                post_prefix_match = False
        if mi.obj_type_has_model(other):
            result = sdnsh.get_table_from_store(other, key, value)
        else:
            result = rest_to_model.get_model_from_url(other, {key: value})

        if post_prefix_match:
            # try to match the missing field, more work ought to be done
            # to identify whether the 'missing' field is the correect to match against
            #
            result = [
                x for x in result
                if field in x and str(x[field]).startswith(prefix)
            ]
    elif mi.is_compound_key(other, key):
        search = {}
        if parent_id:
            from_id = {mi.pk(obj_type): parent_id}
            mi.split_compound_into_dict(obj_type,
                                        mi.pk(obj_type),
                                        from_id,
                                        is_prefix=True)
            # the field name used to collapse the result is the last
            # field in the compound key (id of 'other'),  this may need
            # improvement for other commands
            for deep_field in mi.deep_compound_key_fields(other, key):
                if deep_field in from_id:
                    search[deep_field] = from_id[deep_field]
                if deep_field in data:
                    search[deep_field] = data[deep_field]
        if scoped:
            # move known compound fields from obj_d into search.
            for deep_field in mi.deep_compound_key_fields(other, key):
                if deep_field in obj_d:
                    search[deep_field] = obj_d[deep_field]
        #
        # possibly other search keys?
        if prefix:
            search[field + '__startswith'] = prefix
        if explicit:
            search.clear()
            search[scoped] = data[scoped]
            if prefix:
                search[field + '__startswith'] = prefix
        if mi.obj_type_has_model(other):
            result = sdnsh.rest_query_objects(other, search)
        else:
            result = rest_to_model.get_model_from_url(other, search)
    elif mi.obj_type_has_field(other, field) and mi.is_primary_key(
            other, field):
        result = utif.add_delim(objects_starting_with(other, prefix), ' ')
        completions.update(
            dict([[x, "%s selection" % pretty(other)] for x in result]))
        return
    elif mi.obj_type_has_field(obj_type, field) and \
      mi.is_foreign_key(obj_type, field):
        # look up the values of the foreign key's from the other table
        (fk_obj_type, fk_fn) = mi.foreign_key_references(obj_type, field)
        result = sdnsh.get_table_from_store(fk_obj_type, fk_fn, prefix)
        field = fk_fn
    elif mi.obj_type_has_field(obj_type, field) and field == other:
        # In this situation, this obj_type has a field, which seems to be named
        # based on the other model's name, which seems to be requesting to
        # search the other model.
        field = mi.pk(other)
        result += utif.add_delim(objects_starting_with(other, prefix), ' ')
        completions.update(
            dict([[x, "%s selection" % pretty(other)] for x in result]))
        return
    else:
        if mi.obj_type_has_model(other):
            result = sdnsh.rest_query_objects(other, data)
        else:
            result = rest_to_model.get_model_from_url(other, data)

    check_rest_result(result)
    if sdnsh.description:  # description debugging
        print "complete_from_another:", other, field, data, len(result)

    d = {}
    for item in result:
        value = item.get(field)
        # XXX hack to correctly format tag completions
        if other == 'tag':
            value = '%s.%s=%s' % tuple(value.split('|'))
        # assume that 'values' are 'unique' within results
        if value and utif.quote_string(value) not in completions:
            d[utif.quote_string(str(value))] = None

    if sdnsh.description:  # description debugging
        print "complete_from_another: final", other, field, data, d.keys()

    result = utif.add_delim(list(d), ' ')
    completions.update(
        dict([[x, "%s selection" % pretty(other)] for x in result]))
def running_config_tag(context, config, words):
    # waiting for new tag-mapping to arrive to rewrite
    try:
        tag_mapping = create_obj_type_dict(context,
                                           'tag-mapping',
                                           'tag')
    except Exception, e:
        tag_mapping = {}


    t_config = []

    for tag in context.get_table_from_store('tag'):
        tag_name = '%s.%s=%s' % (tag['namespace'],
                                 tag['name'],
                                 utif.quote_string(tag['value']))
        if len(words) == 1 and tag_name != words[0]:
            continue
        if tag.get('persist', False): 
            t_config.append('tag ' + tag_name + '\n')
            tms = tag_mapping.get(tag['id'], [])
            for tm in tms:
                m = '  match'
                if tm.get('mac', '') != '':
                    m  += ' mac %s' % tm['mac']
                if tm.get('vlan', '') != '':
                    m += ' vlan %s' % tm['vlan']
                if tm.get('dpid', '') != '':
                    m += ' switch %s %s' % (tm['dpid'], tm.get('ifname', ''))
                t_config.append(m + '\n')