def state_to_nb_objects(state, context=None):
    c = KnowledgeBaseContext.get_context(context)

    tm = numpy_type_map
    #Throw the attribute values into tuples and find the lengths of
    #	all of the strings.
    data_by_type = {}
    mlens_by_type = {}
    for name, elm in state.items():
        # print(elm)
        assert 'type' in elm, "All objects need 'type' attribute to be numbalized."
        typ = elm['type']

        spec = c.registered_specs[typ]

        values = [
            elm.get(k, "" if s_obj['type'] == "string" else np.nan)
            for k, s_obj in spec.items() if k != 'type'
        ]
        assert len(values) == len(
            spec), "Dict with keys [{}], cannot be cast to {} [{}]".format(
                ",".join(elm.keys()), name, ",", join(spec.keys()))

        elm_data = tuple([name] + values)

        data = data_by_type.get(typ, [])
        data.append(elm_data)
        data_by_type[typ] = data

        if (typ not in mlens_by_type):
            mlens_by_type[typ] = [0] * len(elm_data)
        mlens = mlens_by_type[typ]

        for i, (attr, s_obj) in enumerate(spec.items()):
            typ = s_obj['type']
            cast_fn = py_type_map[typ]
            if (typ == 'string'):
                L = len(cast_fn(elm.get(attr, "")))
                if (L > mlens[i + 1]): mlens[i + 1] = L

    #Make a fixed bitwidth numpy datatype for the tuples based off of
    #	the max string lengths per attribute.
    out = {}
    for spec_typ, len_arrs in mlens_by_type.items():
        #Pick string widths that fit into [20,80,320,...] to` avoid jit recompiles
        spec = c.registered_specs[spec_typ]
        mlens = exp_fixed_width(np.array(mlens_by_type[spec_typ]), 20)
        dtype = dtype = [('__name__', tm['string'] % int(mlens[0]))]
        for i, (attr, s_obj) in enumerate(spec.items()):
            typ = s_obj['type']
            if (typ == 'string'):
                dtype.append((attr, tm['string'] % int(mlens[i + 1])))
            else:
                dtype.append((attr, tm[typ]))
        pack_from_numpy = c.jitstructs[spec_typ].pack_from_numpy
        # print(data_by_type[spec_typ],dtype)
        out[spec_typ] = pack_from_numpy(
            np.array(data_by_type[spec_typ], dtype=dtype), mlens)
    return out
def unenumerize_value(c, value, typ=None, context=None):
    c = KnowledgeBaseContext.get_context(context)
    if (value in c.string_backmap):
        return c.string_backmap[value]
    elif (value in c.number_backmap):
        return c.number_backmap[value]
    else:
        raise ValueError("No enum for %r." % value)
Exemple #3
0
 def __new__(cls, context=None):
     context_data = KnowledgeBaseContext.get_context(context).context_data
     kb_data = init_kb_data(context_data)
     self = structref.StructRefProxy.__new__(cls, kb_data, context_data)
     # _BaseContextful.__init__(self,context) #Maybe want this afterall
     self.kb_data = kb_data
     self.context_data = context_data
     return self
def nb_objects_to_enumerized(nb_objects, context=None):
    c = KnowledgeBaseContext.get_context(context)
    out = Dict.empty(unicode_type, Dict_Unicode_to_Enums)
    for typ, objs in nb_objects.items():
        enumerize_nb_objs = c.jitstructs[typ].enumerize_nb_objs

        out_typ = out[typ] = Dict.empty(unicode_type, i8[:])
        enumerize_nb_objs(objs, out_typ, c.string_enums, c.number_enums,
                          c.string_backmap, c.number_backmap, c.enum_counter)
    return out
def enumerize_value(c, value, typ=None, context=None):
    c = KnowledgeBaseContext.get_context(context)
    if (typ is None): typ = infer_type(value)
    if (typ == 'string'):
        value = str(value)
        _assert_map(value, c.string_enums, c.string_backmap, c.enum_counter)
        # print(value,typ)
        return c.string_enums[value]
    elif (typ == 'number'):
        value = float(value)
        _assert_map(value, c.number_enums, c.number_backmap, c.enum_counter)
        return c.number_enums[value]
    else:
        raise ValueError("Unrecognized type %r" % typ)
def enumerize(value, typ=None, context=None):
    c = KnowledgeBaseContext.get_context(context)
    if (isinstance(value, np.ndarray)):
        if (np.issubdtype(value.dtype, np.number)):
            return enumerize_array(value, c.number_enums, c.number_backmap,
                                   c.enum_counter)
        elif (np.issubdtype(value.dtype, np.str_)):
            return enumerize_str_array(value, c.string_enums, c.string_backmap,
                                       c.enum_counter)
        else:
            raise ValueError("Type could not be enumerized %s" % type(value))

    elif (isinstance(value, (list, tuple))):
        return [c.enumerize_value(x) for x in value]
    else:
        return c.enumerize_value(value)
def enumerized_to_vectorized(c,
                             enumerized_state,
                             return_inversion_data=False,
                             context=None):
    c = KnowledgeBaseContext.get_context(context)

    nominal, continuous, inversion_data = enumerized_to_vectorized(
        enumerized_state,
        c.spec_flags['nominal'],
        c.number_backmap,
        return_inversion_data=return_inversion_data)
    out = {
        'nominal': nominal,
        'continuous': continuous,
        'inversion_data': inversion_data
    }
    return out
def object_to_nb_object(name, obj_d, context=None):
    c = KnowledgeBaseContext.get_context(context)
    assert 'type' in obj_d, "Object %s missing required attribute 'type'" % name
    assert obj_d['type'] in c.registered_specs, \
       "Object specification not defined for %s" % obj_d['type']
    spec = c.registered_specs[obj_d['type']]
    o_struct_type = c.jitstructs[obj_d['type']]
    args = []
    for x, s_obj in spec.items():
        t = s_obj['type']
        try:
            args.append(py_type_map[t](obj_d[x]))
        except ValueError as e:
            raise ValueError("Cannot cast %r to %r in %r of %r" %
                             (obj_d[x], py_type_map[t], x, name)) from e

    obj = o_struct_type(*args)
    return obj