Пример #1
0
def subvol_list(structType, model):
    #use dictionaries to store voxels corresponding to regions, region_classes (e.g. head) or regions/structures
    region_list = decode(model['regions'])
    region_dict = OrderedDict()
    region_struct_dict = OrderedDict()
    #create dictionary of voxels and volumes for each region
    reg_voxel = omdict((zip(model['grid'][:]['region'],
                            range(len(model['grid'])))))
    reg_voxel_vol = omdict((zip(model['grid'][:]['region'],
                                model['grid'][:]['volume'])))
    for regnum in reg_voxel.keys():
        region_dict[region_list[regnum]] = {
            'vox': reg_voxel.allvalues(regnum),
            'vol': sum(reg_voxel_vol.allvalues(regnum))
        }
        # for regions of more than one type, create dictionary of voxels and volumes for each type of each region
        if len(np.unique(
                model['grid'][reg_voxel.allvalues(regnum)]['type'])) > 1:
            types = decode(model['grid'][reg_voxel.allvalues(regnum)]['type'])
            struct_voxels = omdict((zip(types, reg_voxel.allvalues(regnum))))
            struct_vox_vol = omdict((zip(types,
                                         reg_voxel_vol.allvalues(regnum))))
            for struct in struct_voxels.keys():
                depth = model['grid'][struct_voxels.allvalues(
                    struct)]['y0'] - model['grid'][struct_voxels.allvalues(
                        struct)]['y2']
                #Depth is an array.  For submemb, only a single value, for cyt - different values.  Presently only storing one of the values
                key = region_list[regnum] + struct[0:3]
                region_struct_dict[key] = {
                    'vox': struct_voxels.allvalues(struct),
                    'depth': depth[0],
                    'vol': sum(struct_vox_vol.allvalues(struct))
                }
    return region_list, region_dict, region_struct_dict
Пример #2
0
def _load_one_model(filepath):
    lineNo = 0
    top_def = None
    cur_def = None
    with open(filepath, 'r+t') as strm:
        for line in strm:
            lineNo += 1

            # Strip out any comment
            pos = line.find('#')
            if pos >= 0:
                line = line[:pos]

            line = line.strip()
            if not line: # empty line, ignore!
                continue

            m = re.match('^[-]*\s*(?P<key>\w+)\s*:\s*(?P<value>.+)$', line)
            if not m:
              print(f'Failed to parse {filepath}:{lineNo}')
              continue  # TODO(HS): Should this be an error?

            key = m.group('key').strip()
            value = m.group('value').strip()
            if key in [ 'obj_def', 'class_def', 'group_def' ]:
                top_def = orderedmultidict.omdict([
                    ('type', key),
                    ('name', value),
                    ('extends', None),
                    ('subclasses', set()),
                ])
                cur_def = top_def

            elif key in ['name']:
                cur_def['vpiname'] = value

            elif key in ['extends']:
                top_def[key] = value

            elif key in ['property', 'class_ref', 'obj_ref', 'group_ref', 'class']:
                cur_def = orderedmultidict.omdict([
                    ('type', key),
                    ('name', value),
                ])
                top_def.add(key, cur_def)

            elif key in ['type', 'card', 'vpi', 'vpi_obj']:
                cur_def[key] = value

            else:
                print(f'Found unknown key "{key}" at {filepath}:{lineNo}')

    return top_def
Пример #3
0
    def __init__(self, type):  # @ReservedAssignment
        super(NamedObjectMap, self).__init__()
        self.type = type
        self._data = omdict()

        if not issubclass(type, NamedObject):
            raise TypeError("type must be a NamedObject")
Пример #4
0
    def __init__(self, file):
        self.file = file

        # Start processing the file at line 2, ie. the first
        # non-header line.
        self.linenr = 2

        # Maintain a backlog of Lines that we know we will need later
        # on from some file
        self.backlog = omdict()
Пример #5
0
def multi_spines(model):
    spine_dict=OrderedDict()
    #create list of spine voxels
    #first identify all spine voxels and spine labels
    groups=model['grid'][:]['group']
    newgroups=list()
    for n,i in enumerate(groups):
        #in python3 must explicitly decode from byte to string
        if type(i) is np.bytes_:
            groups[n]=i.decode('UTF-8')
            newgroups.append(i.decode('UTF-8'))
        if newgroups[n] =='':
            newgroups[n]='nonspine'
    groups=newgroups
    spine_voxel=omdict((zip(groups,range(len(model['grid'])) ) ))
    spine_voxel_vol=omdict(( zip(groups,model['grid'][:]['volume']) ))
    #create a unique set of spine labels
    newspinelist=spine_voxel.keys()
    for spinenum,spine in enumerate(newspinelist):
        spine_dict[spine]={'vox':spine_voxel.allvalues(spine), 'vol': sum(spine_voxel_vol.allvalues(spine))}
    return newspinelist,spine_dict
Пример #6
0
 def _parse_query(self):
     # type: () -> URI
     query = self.query if self.query is not None else ""
     query_dict = omdict()
     queries = query.split("&")
     query_items = []
     for q in queries:
         key, _, val = q.partition("=")
         val = unquote_plus(val.replace("+", " "))
         query_items.append((key, val))
     query_dict.load(query_items)
     return attr.evolve(self, query_dict=query_dict, query=query)
Пример #7
0
def test_normalize_params_omdict():
    """
    Order is guaranteed for omdicts.
    """
    from orderedmultidict import omdict
    dct = omdict()
    dct['q'] = 'office'
    dct['facet'] = 'on'
    dct.add('facet.field', 'network')
    dct.add('facet.field', 'runtime')
    tuples = normalize_params(dct)
    assert tuples == [('q', 'office'), ('facet', 'on'),
                      ('facet.field', 'network'), ('facet.field', 'runtime')]
Пример #8
0
    def __setitem__(self, key, value):
        if not isinstance(value, self.type):
            raise TypeError("can only add " + self.type.__name__ + " objects")

        if isinstance(key, six.integer_types):
            self._data = omdict([
                (value.name, value) if i == key else (k, v)
                for i, (k, v) in enumerate(six.iteritems(self._data))
            ])
        else:
            if value.name != key:
                raise ValueError("key does not match name of " +
                                 self.type.__name__)

            self._data[key] = value
Пример #9
0
def test_normalize_params_omdict():
    """
    Order is guaranteed for omdicts.
    """
    from orderedmultidict import omdict
    dct = omdict()
    dct['q'] = 'office'
    dct['facet'] = 'on'
    dct.add('facet.field', 'network')
    dct.add('facet.field', 'runtime')
    tuples = normalize_params(dct)
    assert tuples == [('q', 'office'),
                      ('facet', 'on'),
                      ('facet.field', 'network'),
                      ('facet.field', 'runtime')]
Пример #10
0
 def _parse_query(self):
     # type: () -> URI
     query = self.query if self.query is not None else ""
     query_dict = omdict()
     queries = query.split("&")
     query_items = []
     subdirectory = self.subdirectory if self.subdirectory else None
     for q in queries:
         key, _, val = q.partition("=")
         val = unquote_plus(val.replace("+", " "))
         if key == "subdirectory" and not subdirectory:
             subdirectory = val
         else:
             query_items.append((key, val))
     query_dict.load(query_items)
     return attr.evolve(
         self, query_dict=query_dict, subdirectory=subdirectory, query=query
     )
Пример #11
0
    def test_update_updateall(self):
        data, omd1, omd2 = omdict(), OneDimensionalOrderedMultidict(), OneDimensionalOrderedMultidict()

        # All permutations of (self.keys, self.values) and (self.keys,
        # self.valuelists).
        allitems = chain(product(self.keys, self.values),
                         product(self.keys, self.valuelists))

        # All updates of length one item, two items, and three items.
        iterators = [permutations(allitems, 1),
                     permutations(allitems, 2),
                     permutations(allitems, 3),
                     permutations(allitems, 4),
        ]

        for iterator in iterators:
            for update in iterator:
                data.update(update)
                omd1.update(update)
                omd2.updateall(update)
                for key in omd1.iterkeys():
                    if isinstance(data[key], list):
                        assert omd1[key] == data[key][-1]
                    else:
                        assert omd1[key] == data[key]
                for key in omd2.iterkeys():
                    data_values_unpacked = []
                    for value in data.getlist(key):
                        if isinstance(value, list):
                            data_values_unpacked.extend(value)
                        else:
                            data_values_unpacked.append(value)

                    assert omd2.getlist(key) == data_values_unpacked

        # Test different empty list value locations.
        update_tests = [([(1, None), (2, None)],
                         [(1, [1, 11]), (2, [2, 22])],
                         [(1, 11), (2, 22)]),
                        ([(1, None), (2, None)],
                         [(1, []), (1, 1), (1, 11)],
                         [(1, 11), (2, None)]),
                        ([(1, None), (2, None)],
                         [(1, 1), (1, []), (1, 11)],
                         [(1, 11), (2, None)]),
                        ([(1, None), (2, None)],
                         [(1, 1), (1, 11), (1, [])],
                         [(2, None)]),
        ]
        for init, update, result in update_tests:
            omd = OneDimensionalOrderedMultidict(init)
            omd.update(update)
            assert omd.allitems() == result

        updateall_tests = [([(1, None), (2, None)],
                            [(1, [1, 11]), (2, [2, 22])],
                            [(1, 1), (2, 2), (1, 11), (2, 22)]),
                           ([(1, None), (2, None)],
                            [(1, []), (1, 1), (1, 11)],
                            [(1, 1), (2, None), (1, 11)]),
                           ([(1, None), (2, None)],
                            [(1, 1), (1, []), (1, 11)],
                            [(1, 11), (2, None)]),
                           ([(1, None), (2, None)],
                            [(1, 1), (1, 11), (1, [])],
                            [(2, None)]),
        ]
        for init, update, result in updateall_tests:
            omd = OneDimensionalOrderedMultidict(init)
            omd.updateall(update)
            assert omd.allitems() == result
Пример #12
0
 def __init__(self, groupdb):
     self.groupdb = groupdb
     self.initial_groups = omdict()
     self.new_groups = omdict()
Пример #13
0
    def test_update_updateall(self):
        data, omd1, omd2 = omdict(), omdict1D(), omdict1D()

        # All permutations of (self.keys, self.values) and (self.keys,
        # self.valuelists).
        allitems = chain(product(self.keys, self.values),
                         product(self.keys, self.valuelists))

        # All updates of length one item, two items, and three items.
        iterators = [
            permutations(allitems, 1),
            permutations(allitems, 2),
            permutations(allitems, 3),
            permutations(allitems, 4),
        ]

        for iterator in iterators:
            for update in iterator:
                data.update(update)
                omd1.update(update)
                omd2.updateall(update)
                for key in omd1.iterkeys():
                    if isinstance(data[key], list):
                        assert omd1[key] == data[key][-1]
                    else:
                        assert omd1[key] == data[key]
                for key in omd2.iterkeys():
                    data_values_unpacked = []
                    for value in data.getlist(key):
                        if isinstance(value, list):
                            data_values_unpacked.extend(value)
                        else:
                            data_values_unpacked.append(value)

                    assert omd2.getlist(key) == data_values_unpacked

        # Test different empty list value locations.
        update_tests = [
            ([(1, None), (2, None)], [(1, [1, 11]), (2, [2, 22])], [(1, 11),
                                                                    (2, 22)]),
            ([(1, None), (2, None)], [(1, []), (1, 1), (1, 11)], [(1, 11),
                                                                  (2, None)]),
            ([(1, None), (2, None)], [(1, 1), (1, []), (1, 11)], [(1, 11),
                                                                  (2, None)]),
            ([(1, None), (2, None)], [(1, 1), (1, 11), (1, [])], [(2, None)]),
        ]
        for init, update, result in update_tests:
            omd = omdict1D(init)
            omd.update(update)
            assert omd.allitems() == result

        updateall_tests = [
            ([(1, None), (2, None)], [(1, [1, 11]), (2, [2, 22])], [(1, 1),
                                                                    (2, 2),
                                                                    (1, 11),
                                                                    (2, 22)]),
            ([(1, None), (2, None)], [(1, []), (1, 1), (1, 11)], [(1, 1),
                                                                  (2, None),
                                                                  (1, 11)]),
            ([(1, None), (2, None)], [(1, 1), (1, []), (1, 11)], [(1, 11),
                                                                  (2, None)]),
            ([(1, None), (2, None)], [(1, 1), (1, 11), (1, [])], [(2, None)]),
        ]
        for init, update, result in updateall_tests:
            omd = omdict1D(init)
            omd.updateall(update)
            assert omd.allitems() == result
Пример #14
0
 def clear(self):
     self._data = omdict()
Пример #15
0
 def __init__(self, groupdb):
     self.groupdb = groupdb
     self.initial_groups = omdict()
     self.new_groups = omdict()
Пример #16
0
#---------------------------------------- Preprocess test file sentences
preprocessed_sentences = []
test_trees = []
for sentence in test_sentences:
    processed = preprocess(sentence.split())
    preprocessed_sentences.append(processed)
    test_trees.append(cp.parse(processed))

#------------------------------ Create annotated test sentences both ways

dictionary_parsed_sentences = []
i = 0
spec_case = 0
for sentence, tree in zip(preprocessed_sentences, test_trees):
    sentence_dict = omdict()
    neural_sentence = ""
    for word_tuple in sentence:
        sentence_dict.add(word_tuple[0], "O")
        neural_sentence = neural_sentence + " " + word_tuple[0]

    for subtree in tree.subtrees(lambda t: t.label() == "NP"):
        probab = {
            "per": 0,
            "event": 0,
            "geo": 0,
            "org": 0,
            "obj": 0,
            "gpe": 0,
            "time": 0
        }