Esempio n. 1
0
        def FindMaxShare(self):
                """ Ordereddict which is used to Sort the values ///
                in given dict data structure && namedtuple which is used to create tuple-like object from objects"""

                try:
                        with open(self.file) as input:
                                reader = csv.reader(input)
                                Result = namedtuple('Result', ['Share', 'Year', 'Month'])
                                od = OrderedDict()
                                names = next(reader)[2:]
                                for name in names:
                                        od[name] = Result(0, 'Year', 'Month')
                                for row in reader:
                                        Year, Month = row[:2]
                                        """ zip function used take an two input sequence ///
                                        and return product of the sequnce"""
                                        for name, Share in zip(names, map(int, row[2:])):
                                                if od[name].Share < Share:
                                                        od[name] = Result(Share, Year, Month)
                        print "Excepted Result of the Function"
                        print od.items()

                except IOError as e:
                        print e
                except Exception as e:
                        print e
Esempio n. 2
0
def combine(**kwargs):
  """Generate combinations based on its keyword arguments.

  Two sets of returned combinations can be concatenated using +.  Their product
  can be computed using `times()`.

  Args:
    **kwargs: keyword arguments of form `option=[possibilities, ...]`
         or `option=the_only_possibility`.

  Returns:
    a list of dictionaries for each combination. Keys in the dictionaries are
    the keyword argument names.  Each key has one value - one of the
    corresponding keyword argument values.
  """
  if not kwargs:
    return [OrderedDict()]

  sort_by_key = lambda k: k[0][0]
  kwargs = OrderedDict(sorted(kwargs.items(), key=sort_by_key))
  first = list(kwargs.items())[0]

  rest = dict(list(kwargs.items())[1:])
  rest_combined = combine(**rest)

  key = first[0]
  values = first[1]
  if not isinstance(values, list):
    values = [values]

  return [
      OrderedDict(sorted(list(combined.items()) + [(key, v)], key=sort_by_key))
      for v in values
      for combined in rest_combined
  ]
Esempio n. 3
0
 def insert(self,key,value,index=None,akey=None,bkey=None):
     tmp1=OrderedDict()
     tmp2=OrderedDict()
     if ((index is not None) and (isinstance(index, int))):
         if index<len(self.keys()):
             for i in self.iterkeys():
                 if self.indexofkey(i)<index:
                     tmp1[i]=self[i]
                 elif self.indexofkey(i)>=index:
                     tmp2[i]=self[i]
             self.clear()
             for i in tmp1.items():
                 self[i[0]]=i[1]
             self[key]=value
             for i in tmp2.items():
                 self[i[0]]=i[1]
             return self
         if index==len(self.keys()):
             self[key]=value
     if akey is not None:
         if akey in self.iterkeys():               
             self.insert(key,value,index=self.indexofkey(akey)+1)
         else:
             raise KeyError
     if bkey is not None:
         if bkey in self.iterkeys():
             self.insert(key, value, index=self.indexofkey(bkey))
         else:
             raise KeyError
def drawing_lines(start_date, end_date):

    (positive_dict, negative_dict, neutral_dict) = get_sentiment_dates(start_date, end_date)
    positive_dict = OrderedDict(sorted(positive_dict.items(), key=lambda t: t[0]))
    pos_keys = positive_dict.keys()
    pos_vals = positive_dict.values()
    pos_keys = pos_keys[-30:]  # get the last 30 days

    negative_dict = OrderedDict(sorted(negative_dict.items(), key=lambda t: t[0]))
    neg_keys = negative_dict.keys()
    neg_vals = negative_dict.values()
    neg_keys = neg_keys[-30:]  # get the last 30 days

    neutral_dict = OrderedDict(sorted(neutral_dict.items(), key=lambda t: t[0]))
    neu_keys = neutral_dict.keys()
    neu_vals = neutral_dict.values()
    neu_keys = neu_keys[-30:]  # get the last 30 days

    figure_title = 'Sentiment between ' + start_date + ' and ' + end_date
    fig, ax = plt.subplots()
    ax.plot(pos_keys, pos_vals, 'o-', label='Positive')
    ax.plot(neg_keys, neg_vals, 'o-', label='Negative')
    ax.plot(neu_keys, neu_vals, 'o-', label='Neutral')
    fig.autofmt_xdate()
    plt.legend(shadow=True, fancybox=True)
    plt.title(figure_title)
    plt.show()

    return
    def post(self, request, pk):
        """ Clean the data and save opening hours in the database.
        Old opening hours are purged before new ones are saved.
        """
        location = self.get_object()
        # open days, disabled widget data won't make it into request.POST
        present_prefixes = [x.split('-')[0] for x in request.POST.keys()]
        day_forms = OrderedDict()
        for day_no, day_name in WEEKDAYS:
            for slot_no in (1, 2):
                prefix = self.form_prefix(day_no, slot_no)
                # skip closed day as it would be invalid form due to no data
                if prefix not in present_prefixes:
                    continue
                day_forms[prefix] = (day_no, Slot(request.POST, prefix=prefix))

        if all([day_form[1].is_valid() for pre, day_form in day_forms.items()]):
            OpeningHours.objects.filter(company=location).delete()
            for prefix, day_form in day_forms.items():
                day, form = day_form
                opens, shuts = [str_to_time(form.cleaned_data[x])
                                for x in ('opens', 'shuts')]
                if opens != shuts:
                    OpeningHours(from_hour=opens, to_hour=shuts,
                                 company=location, weekday=day).save()
        return redirect(request.path_info)
Esempio n. 6
0
    def __init__(self, data, label=None, batch_size=1, shuffle=False,
                 last_batch_handle='pad', data_name='data',
                 label_name='softmax_label'):
        super(NDArrayIter, self).__init__(batch_size)

        self.data = _init_data(data, allow_empty=False, default_name=data_name)
        self.label = _init_data(label, allow_empty=True, default_name=label_name)

        # shuffle data
        if shuffle:
            idx = np.arange(self.data[0][1].shape[0])
            np.random.shuffle(idx)
            self.data = [(k, array(v.asnumpy()[idx], v.context)) for k, v in self.data]
            self.label = [(k, array(v.asnumpy()[idx], v.context)) for k, v in self.label]

        # batching
        if last_batch_handle == 'discard':
            new_n = self.data[0][1].shape[0] - self.data[0][1].shape[0] % batch_size
            data_dict = OrderedDict(self.data)
            label_dict = OrderedDict(self.label)
            for k, _ in self.data:
                data_dict[k] = data_dict[k][:new_n]
            for k, _ in self.label:
                label_dict[k] = label_dict[k][:new_n]
            self.data = data_dict.items()
            self.label = label_dict.items()

        self.data_list = [x[1] for x in self.data] + [x[1] for x in self.label]
        self.num_source = len(self.data_list)
        self.num_data = self.data_list[0].shape[0]
        assert self.num_data >= batch_size, \
            "batch_size need to be smaller than data size."
        self.cursor = -batch_size
        self.batch_size = batch_size
        self.last_batch_handle = last_batch_handle
def test():
    from collections import OrderedDict as StdlibOrderedDict

    ordered_dict = OrderedDict(((1, 'a'), (2, 'b'), (3, 'c')))
    stdlib_ordered_dict = StdlibOrderedDict(((1, 'a'), (2, 'b'), (3, 'c')))
    
    assert ordered_dict == stdlib_ordered_dict
    assert stdlib_ordered_dict == ordered_dict
    assert ordered_dict.items() == stdlib_ordered_dict.items()
    assert ordered_dict.keys() == stdlib_ordered_dict.keys()
    assert ordered_dict.values() == stdlib_ordered_dict.values()
    
    ordered_dict.move_to_end(1)
    
    assert ordered_dict != stdlib_ordered_dict
    #assert stdlib_ordered_dict != ordered_dict
    assert ordered_dict.items() != stdlib_ordered_dict.items()
    assert ordered_dict.keys() != stdlib_ordered_dict.keys()
    assert ordered_dict.values() != stdlib_ordered_dict.values()
    
    del stdlib_ordered_dict[1]
    stdlib_ordered_dict[1] = 'a'
    
    assert ordered_dict == stdlib_ordered_dict
    assert stdlib_ordered_dict == ordered_dict
    assert ordered_dict.items() == stdlib_ordered_dict.items()
    assert ordered_dict.keys() == stdlib_ordered_dict.keys()
    assert ordered_dict.values() == stdlib_ordered_dict.values()
    
    assert ordered_dict == OrderedDict(stdlib_ordered_dict) == \
                                                            stdlib_ordered_dict
    assert ordered_dict == StdlibOrderedDict(ordered_dict) == \
                                                            stdlib_ordered_dict
    
Esempio n. 8
0
def walk_and_clean(data):
    """
    Recursively walks list of dicts (which may themselves embed lists and dicts),
    transforming namedtuples to OrderedDicts and
    using ``clean_key_name(k)`` to make keys into SQL-safe column names

    >>> data = [{'a': 1}, [{'B': 2}, {'B': 3}], {'F': {'G': 4}}]
    >>> pprint(walk_and_clean(data))
        [OrderedDict([('a', 1)]),
         [OrderedDict([('b', 2)]), OrderedDict([('b', 3)])],
          OrderedDict([('f', OrderedDict([('g', 4)]))])]
    """
    # transform namedtuples to OrderedDicts
    if hasattr(data, '_fields'):
        data = OrderedDict((k,v) for (k,v) in zip(data._fields, data))
    # Recursively clean up child dicts and lists
    if hasattr(data, 'items') and hasattr(data, '__setitem__'):
        for (key, val) in data.items():
            data[key] = walk_and_clean(val)
    elif isinstance(data, list) or isinstance(data, tuple) \
         or hasattr(data, '__next__') or hasattr(data, 'next'):
        data = [walk_and_clean(d) for d in data]

    # Clean up any keys in this dict itself
    if hasattr(data, 'items'):
        original_keys = data.keys()
        tup = ((clean_key_name(k), v) for (k, v) in data.items())
        data = OrderedDict(tup)
        if len(data) < len(original_keys):
            raise KeyError('Cleaning up %s created duplicates' %
                           original_keys)
    return data
Esempio n. 9
0
def _init_data(data, allow_empty, default_name):
    """Convert data into canonical form."""
    assert (data is not None) or allow_empty
    if data is None:
        data = []

    if isinstance(data, (np.ndarray, NDArray)):
        data = [data]
    if isinstance(data, list):
        if not allow_empty:
            assert(len(data) > 0)
        if len(data) == 1:
            data = OrderedDict([(default_name, data[0])]) # pylint: disable=redefined-variable-type
        else:
            data = OrderedDict( # pylint: disable=redefined-variable-type
                [('_%d_%s' % (i, default_name), d) for i, d in enumerate(data)])
    if not isinstance(data, dict):
        raise TypeError("Input must be NDArray, numpy.ndarray, " + \
                "a list of them or dict with them as values")
    for k, v in data.items():
        if not isinstance(v, NDArray):
            try:
                data[k] = array(v)
            except:
                raise TypeError(("Invalid type '%s' for %s, "  % (type(v), k)) + \
                    "should be NDArray or numpy.ndarray")

    return list(data.items())
Esempio n. 10
0
    def format(self, data, keys=None, group_by=None, domain=None):
        rows_dict = OrderedDict()
        tmp_data = OrderedDict()
        sorted_data = []
        value_chains = get_domain_configuration(domain).by_type_hierarchy
        for key, row in data.items():
            to_list = list(key)

            def find_name(list, deep):
                for element in list:
                    if deep == len(key)-3 and key[deep+1] == element.val:
                        return element.text
                    elif key[deep+1] == element.val:
                        return find_name(element.next, deep+1)

            name = find_name(value_chains, 0)
            to_list[2] = name
            tmp_data.update({tuple(to_list): row})
        if tmp_data:
            sorted_data = sorted(tmp_data.items(), key=lambda x: (x[0][0], x[0][2]))

        for row in sorted_data:
            formatted_row = self._format.format_row(row[1])
            if not rows_dict.has_key(formatted_row[0]):
                rows_dict[formatted_row[0]] = []
            rows_dict[formatted_row[0]].append(formatted_row[1])

        min_length = min([len(item[1]) for item in rows_dict.items()])

        for key, row in rows_dict.items():
            total_column = self.calculate_total_column(row)
            res = [key, total_column]
            res.extend(row[0:min_length])
            yield res
Esempio n. 11
0
def regulardict_to_ordereddict():
    """sort a dict by it's key, value, or customized rules. user can choose ascend or descend.
    OrderedDict其实并不是生成一个全新的字典。OrderedDict只是生成了一个新的Key的序列, 然后通过维护这个
    Key序列来决定输出的顺序。
    
    如果 d 的 key 和 value 都是可排序的数字或者字符串, 而我们不引用任何复杂的规则, 仅仅是根据key或者
    value来排序, 那么生成的OrderedDict的内存开销就不变, 因为仅仅是在调用iter方法时, 临时排序输出即可。
    而如果使用形如:
        根据value中第二个元素进行排序
    那么就会带来额外的内存开销。本例中就是这种情况。
    """
    d = {"c":[1, 3],
         "a":[3, 2],
         "b":[2, 1]}
    
    print("{:=^100}".format("sort by value, ascend"))
    od1 = OrderedDict( sorted(list(d.items()), 
                             key=lambda t: t[1], # t[0]指根据key排序, t[1]指根据value排序
                             reverse = False) ) # True指逆序排序,False指正序排序
    for k,v in list(od1.items()):
        print(k,v) ## 看看是否按照设定有序输出
        
    print("{:=^100}".format("sort by value[1], descend"))
    od2 = OrderedDict( sorted(list(d.items()), 
                             key=lambda t: t[1][1], # t[1][1]指根据value[1]排序
                             reverse = True) )
    for k,v in list(od2.items()):
        print(k,v) ## 看看是否按照设定有序输出
        
    print("原始字典占用内存大小为: %s" % sys.getsizeof(d)) # 288
    print("有序字典占用内存大小为: %s" % sys.getsizeof(od1)) # 1304
    print("有序字典占用内存大小为: %s" % sys.getsizeof(od2)) # 1304
    print("d == od1? %s" % (d == od1)) # True
    print("d == od2? %s" % (d == od2)) # True
Esempio n. 12
0
async def pq_dtable_merge(current, past, _dsn_db=None):

    dins, dchg, ddel = _dtable_diff(current, past)
    table_name = current.__dset_item_class__.__name__

    dobj_cls = current.__dset_item_class__
    attrs = OrderedDict((attr_name, attr) for attr_name, attr in
                        iter_chain(dobj_cls.__dobject_key__.items(),
                                   dobj_cls.__dobject_att__.items()))

    seq_attrs = {}
    for n, attr in attrs.items():
        if issubclass(attr.type, dsequence):
            seq_attrs[n] = attr

    dobj_cls = current.__dset_item_class__
    attrs = OrderedDict((attr_name, attr) for attr_name, attr in
                        iter_chain(dobj_cls.__dobject_key__.items(),
                                   dobj_cls.__dobject_att__.items()))
    seq_attrs = {}
    for n, attr in attrs.items():
        if issubclass(attr.type, dsequence):
            seq_attrs[n] = attr

    if dins.values:
        await _do_insert(table_name, seq_attrs, dins, _dsn_db)

    if dchg.values:
        await _do_update(table_name, seq_attrs, dchg, _dsn_db)

    if ddel.pkey_values:
        await _do_delete(table_name, ddel, _dsn_db)
Esempio n. 13
0
 def recursive_split(rect_key,rect_coords,category_idx,split_dir,gap):
     """
     given a key of the boxes and the data to analyze,
     split the key into several keys stratificated by the given
     category in the assigned direction
     """
     ticks = []
     category = categories[category_idx]
     chiave=rect_key
     divisione = OrderedDict()
     for tipo in category:
         divisione[tipo]=0.
         for k,v in counted.items():
             if k[len(rect_key)-1]!=tipo:
                 continue 
             if not all( k[k1]==v1 for k1,v1 in enumerate(rect_key[1:])):
                 continue
             divisione[tipo]+=v
     totali = 1.*sum(divisione.values())
     if totali: #check for empty categories
         divisione = OrderedDict( (k,v/totali) for k,v in divisione.items() )
     else:
         divisione = OrderedDict( (k,0.) for k,v in divisione.items() )
     prop = divisione.values()
     div_keys = divisione.keys()
     new_rects = split_rect(*rect_coords,proportion=prop,direction=split_dir,gap=gap)
     divisi = OrderedDict( (chiave+(k,),v) for k,v in zip(div_keys,new_rects))
     d = (split_dir == 'h')
     ticks = [ (k,O[d]+0.5*[h,w][d]) for k,(O,h,w) in zip(div_keys,new_rects) ]
     return divisi,zip(*ticks)
     def __init__(self, dictionary):
        self.ES_source = ''
        self.Birds = ''
        self.BirdsAll = ''
        self.BirdsIUCN = ''
        self.BirdsUSFWS = ''


        self.NSF = ''
        self.NSP = ''
        self.NSM = ''
        self.Crop = ''
        self.Pesticide = ''
        self.IUCN_Amphibians = ''
        self.IUCN_Birds = ''
        self.IUCN_Mammals = ''
        self.IUCN_Mammals_Marine = ''
        self.IUCN_Coral = ''
        self.IUCN_Reptiles = ''
        self.IUCN_Seagrasses = ''
        self.IUCN_SeaCucumbers = ''
        self.IUCN_Mangrove = ''
        self.IUCN_MarineFish = ''
        self.USFWS_p = ''
        self.USFWS_l = ''


        dictionary = OrderedDict(sorted(dictionary.items(), key=lambda t: t[0]))
        logger.info('===================')
        dictionary = OrderedDict(dictionary)
        for k, v in dictionary.items():
            setattr(self, k, v)
Esempio n. 15
0
class ReaderCollection(Iterator):#Done
    def __init__(self, klasses, config, order=constant(0)):
        '''
        klasses : (dict) contains name:class pairs
        '''
        self.readers = OrderedDict([(name,klass.reader_class(config)(klass,config)) for name, klass in klasses.items()])
        self._peeks = OrderedDict()
        for name, reader in self.readers.items():
            try:
                self._peeks[name] = next(reader)
            except StopIteration: # OK
                self.readers[name].close()
        self.order = order
        try:
            self.update()
        except:
            1+1
            raise
        
    def report(self):
        return dict([(name,reader.report()) for name,reader in self.readers.items()])
        
    def update(self):#TODO: This could be made more efficient by maintaining a sort.
        self._peek = None
        if self._peeks:
            for k, v in self._peeks.items():
                if v is not None:
                    if self._peek is None:
                        self._peek = k
                    elif v.container_key() <= self._peeks[self._peek].container_key():
                        if self.order(v) < self.order(self._peeks[self._peek]):
                            self._peek = k
                        elif v.container_key() < self._peeks[self._peek].container_key():
                            self._peek = k
        
    def peek(self):
        if self._peek is None:
            return None
        return self._peeks[self._peek]
    
    def __next__(self):
        if self._peek is None:
            raise StopIteration
        result = self._peeks[self._peek]
        if result is None:
            raise StopIteration
        try:
            self._peeks[self._peek] = next(self.readers[self._peek])
        except StopIteration:
            del self._peeks[self._peek]
            self.readers[self._peek].close()
        self.update()
        return result
    
    def __iter__(self):
        return self
    
    def close(self):
        for reader in self.readers.values():
            reader.close()
Esempio n. 16
0
def create_jobs():
    g1_components = range(1)
    g2_components = range(2)
    g3_components = range(2)
    component_groups = OrderedDict((('g1', g1_components), ('g2', g2_components), ('g3', g3_components)))

    api = api_select.api(__file__)
    def job(name, expect_order, params=None):
        api.job(name, exec_time=0.5, max_fails=0, expect_invocations=1, expect_order=expect_order, params=params)

    api.flow_job()
    job('prepare', 1)
    for gname, group in component_groups.items():
        for component in group:
            job('deploy_component_' + gname + '_' + str(component), 2)
    job('report_deploy', 3)
    job('prepare_tests', 3)
    job('test_ui', 4)
    job('test_x', 4)
    for gname, group in component_groups.items():
        for component in group:
            job('test_component_' + gname + '_' + str(component), 5)
    job('report', 6, params=(('s1', 'tst_regression', 'desc'), ('c1', ('complete', 'partial'), 'desc')))
    job('promote', 7)
    return api
Esempio n. 17
0
  def _topological_sort(self, goal_info_by_goal):
    dependees_by_goal = OrderedDict()

    def add_dependee(goal, dependee=None):
      dependees = dependees_by_goal.get(goal)
      if dependees is None:
        dependees = set()
        dependees_by_goal[goal] = dependees
      if dependee:
        dependees.add(dependee)

    for goal, goal_info in goal_info_by_goal.items():
      add_dependee(goal)
      for dependency in goal_info.goal_dependencies:
        add_dependee(dependency, goal)

    satisfied = set()
    while dependees_by_goal:
      count = len(dependees_by_goal)
      for goal, dependees in dependees_by_goal.items():
        unsatisfied = len(dependees - satisfied)
        if unsatisfied == 0:
          satisfied.add(goal)
          dependees_by_goal.pop(goal)
          yield goal_info_by_goal[goal]
          break
      if len(dependees_by_goal) == count:
        for dependees in dependees_by_goal.values():
          dependees.difference_update(satisfied)
        # TODO(John Sirois): Do a better job here and actually collect and print cycle paths
        # between Goals/Tasks.  The developer can most directly address that data.
        raise self.GoalCycleError('Cycle detected in goal dependencies:\n\t{0}'
                                  .format('\n\t'.join('{0} <- {1}'.format(goal, list(dependees))
                                                      for goal, dependees
                                                      in dependees_by_goal.items())))
Esempio n. 18
0
def _init_data(data, allow_empty, default_name):
    """Convert data into canonical form."""
    assert (data is not None) or allow_empty
    if data is None:
        data = []

    if isinstance(data, (np.ndarray, minpy.array.Array)):
        data = [data]
    if isinstance(data, list):
        if not allow_empty:
            assert (len(data) > 0)
        if len(data) == 1:
            data = OrderedDict([(default_name, data[0])])
        else:
            data = OrderedDict([('_%d_%s' % (i, default_name), d)
                                for i, d in enumerate(data)])
    if not isinstance(data, dict):
        raise TypeError(
            "Input must be NDArray, numpy.ndarray, MinPy Array, or "
            "a list of them or dict with them as values.")
    for k, v in data.items():
        if not isinstance(v, (np.ndarray, minpy.array.Array)):
            raise TypeError(("Invalid type '%s' for %s, " % (type(
                v), k)) + "should be NDArray, numpy.ndarray, or MinPy Array.")

    return list(data.items())
Esempio n. 19
0
def generate_patterns(urls, prefix=''):
	def url_sort_key(t):
		#Put id matching at end
		if '(' in t[0]:
			return 1000
		#put empty even further behind
		if t[0]=='':
			return 1001
		return -len(t[0])
	if prefix != '':
		prefix += '/'
	urls = OrderedDict(sorted(urls.items(), key=url_sort_key))
	patterns = []
	for key, subset in urls.items():
		if isinstance(subset, dict):
			patterns += generate_patterns(subset, prefix+key)
		else:
			if type(subset).__name__ == 'type':
				subset = subset.as_view()
			elif type(subset).__name__ == 'function':
				pass
			if prefix == '/' or key == '':
				patterns.append(url(r'^'+prefix[:-1]+key, subset))
			else:
				patterns.append(url(r'^'+prefix+key, subset))
	return patterns
def makeAllCompare(result1, result2):
    """A short to make all compare bar diagramms, using the module makePicture.py
    
    
    :param result1: The first result data that stored locally after computation.py processing.
    :type result: Dict.
    :param result2: The second result data that stored locally after computation.py processing.
    :type result: Dict.
    :returns: None
    """
    # first sort this input dict
    from collections import OrderedDict
    result1 = OrderedDict(sorted(result1.items(), key=lambda t: t[0]))
    result2 = OrderedDict(sorted(result2.items(), key=lambda t: t[0]))
    # get iterator for this two dict
    i1 = iter(result1)
    i2 = iter(result2)
    try:
        while True:
            k1 = next(i1)
            dict1 = result1[k1]
            k2 = next(i2)
            dict2 = result2[k2]
            if len(dict1.keys()) < 5 or len(dict2.keys()) < 5:
                continue
            makeCompareBar(dict1, dict2, k1, k2)
    except StopIteration:
        pass
    finally:
        del i1,i2
Esempio n. 21
0
class WeightedRandom(Balancer):
    def __init__(self, servers):
        self.serverlist = OrderedDict([(server, 0) for server in servers])
        allTestResults = [testServerPool([server[0] for server in self.serverlist.items()]) for _ in range(0, 5)]
        
        for testResults in allTestResults:
            for testResult in testResults.items():
                server = testResult[0]
                results = testResult[1]
                testScore = sum([result[1] for testPath, result in results.items()])
                self.serverlist[server] += testScore / 5
        self.serverlist = OrderedDict(sorted(self.serverlist.items(), key = lambda x: x[1]))
    
    def get_server(self, *args, **kwargs):
        servers = list(filter(lambda x: not x[1] == float('inf'), list(self.serverlist.items())))
        total = sum(weight for (server, weight) in servers)
        r = random.uniform(0, total)
        currentPos = 0
        for (server, weight) in servers:
            if currentPos + weight >= r:
                return server
            currentPos += weight
        assert False, 'failed to get server'
        
    def inform(self, action, server, *args, **kwargs):
        raise NotImplementedError
        
    def updateWeights(self):
        raise NotImplementedError
Esempio n. 22
0
def humans_per_hour(game, **kwargs):
	data = []
	end_date = min(timezone.now(), game.end_date)
	end_td = end_date - game.start_date
	end_hour = end_td.days * 24 + round(end_td.seconds / 3600, 0)
	for dorm, dormName in DORMS:
		sh = game.get_active_players().filter(dorm=dorm).count() # starting humans in this dorm
		d = OrderedDict([(0, sh)])
		kills = Kill.objects.exclude(parent=None).filter(victim__game=game, victim__dorm=dorm).order_by('date')
		for index, kill in enumerate(kills, 1):
			kd = max(kill.date, game.start_date) - game.start_date
			hours = kd.days * 24 + round(kd.seconds / 3600, 1)
			d[min(hours, end_hour)] = sh - index # overwrite
		if end_hour not in d:
			d[end_hour] = d[d.keys()[-1]]
		data.append({'name': dormName, 'data': d.items()})
	# add dataset for all dorms
	sh = game.get_active_players().count() - Kill.objects.filter(parent=None, killer__game=game).count() # subtract LZs
	d = OrderedDict([(0, sh)])
	kills = Kill.objects.exclude(parent=None).filter(victim__game=game).order_by('date')
	for index, kill in enumerate(kills, 1):
		kd = max(kill.date, game.start_date) - game.start_date
		hours = kd.days * 24 + round(kd.seconds / 3600, 1)
		d[min(hours, end_hour)] = sh - index # overwrite
	if end_hour not in d:
		d[end_hour] = d[d.keys()[-1]]
	data.append({'name': 'ALL', 'data': d.items()})
	return data
    def update_data_to_contain_top10_words(self, tagged_data, top10):
        """
        Takes in a data set and removes the occurances of all words except top 10 entropy words
        :param tagged_data tagged dataset
        :param top10 top 10 highest entropy words
        :return updated data set
        """
        sentence_index = range(len(tagged_data))
        indexed_sentences = OrderedDict(zip(sentence_index,tagged_data))
        for sentence_id, sentence in indexed_sentences.items():
            word_index = range(len(tagged_data[sentence_id]))
            indexed_words = OrderedDict(zip(word_index,tagged_data[sentence_id]))
            for word_id, tagged_word in indexed_words.items():
                if tagged_word[0] not in top10:
                    del indexed_words[word_id]
            indexed_sentences[sentence_id] = list(indexed_words.values())

        sentence_index = range(len(tagged_data))
        updated_tagged_data = list(indexed_sentences.values())
        indexed_sentences = OrderedDict(zip(sentence_index, updated_tagged_data))
        for sentence_id, sentence in indexed_sentences.items():
            if not sentence:
                del indexed_sentences[sentence_id]
        updated_tagged_data = list(indexed_sentences.values())
        return updated_tagged_data
Esempio n. 24
0
def plot_nodes(node_positions, node_links=None, radius=1.0, scalefree=False, square=True, figsize=None):
    fig, ax = plt.subplots(1, 1, figsize=figsize)
    nodes = []
    node_positions = OrderedDict(sorted(node_positions.items()))
    for node, position in node_positions.items():
        x, y, z = position
        ax.scatter(x, y)
        ax.annotate(node, xy=(x, y), xytext=(-10, 5), textcoords='offset points', ha='center', va='bottom')
        ax.add_patch(plt.Circle((x, y), radius=radius, edgecolor='b', fill=False, alpha=0.2))

    if node_links:
        for node, links in node_links.items():
            for link in links:
                x, y = zip(node_positions.values()[node][0:2], node_positions.values()[link][0:2])
                ax.plot(x, y, color='k', lw=1, alpha=1.0, linestyle=':')

    if square:
        ax.set_aspect('equal', adjustable='datalim')
    if scalefree:
        ax = format_axes(ax)
        ax.set_xticklabels([])
        ax.set_yticklabels([])
        ax.grid(False)

    return fig
Esempio n. 25
0
 def get_round_results(self, competition, round, valid_session_rounds, scores):
     results = OrderedDict()
     for score in scores:
         session_entry = score.target.session_entry
         if session_entry.session_round.shot_round.id is not round.id:
             continue
         if session_entry.session_round not in valid_session_rounds:
             continue
         categories = self.get_categories_for_entry(competition, session_entry.competition_entry)
         for category in categories:
             if category not in results:
                 results[category] = {}
             if session_entry.competition_entry not in results[category]:
                 results[category][session_entry.competition_entry] = []
             results[category][session_entry.competition_entry].append(score)
     for category, scores in results.items():
         scores = OrderedDict((entry, rounds) for entry, rounds in scores.items() if len(rounds) >= 2)
         if not scores:
             results.pop(category)
             continue
         for entry in scores:
             scores[entry] = sorted(scores[entry], key=lambda s: s.target.session_entry.session_round.session.start)[:2]
         new_scores = [ScoreMock(
             disqualified=any(s.disqualified for s in sub_scores),
             retired=any(s.disqualified for s in sub_scores),
             target=sub_scores[0].target,
             score=sum(s.score for s in sub_scores),
             hits=sum(s.hits for s in sub_scores),
             golds=sum(s.golds for s in sub_scores),
             xs=sum(s.xs for s in sub_scores),
         ) for entry, sub_scores in scores.items()]
         if not self.leaderboard:
             new_scores = filter(lambda s: s.score > 0, new_scores)
         results[category] = self.sort_results(new_scores)
     return results
def build_doc2vec_models(train):
    '''
    Given a dataframe (train), get a list of gensim TaggedDocuments that contain
    a list of the words in the document and the document id. Create three different
    doc2vec models (distributed bag of words, distributed model with averaging of
    vectors, and distributed model with concatenation of vectors). To get the
    300-feature vector representing each document, pass the document id to the model.
    Models are trained in multiple epochs, with the data sorted prior to each one.
    Save the models so they can be used later without needing to retrain.
    '''
    # Pass the entire training set, because this is how word2vec will
    # know what the 300-feature vector for that document is.
    # The sentiment values are not used at this time, so no data leakage
    cleaned_tagged_docs = clean_reviews(train, method='doc2vec')
    # The three models of doc2vec being tested share:
    # 300 feature size (to match word2vec and indico models)
    # window=5 (both sides) approximates a 10-word total window size
    # min_count=2 gets rid of unique words
    simple_models = [
    # PV-DM w/concatenation
    Doc2Vec(dm=1, dm_concat=1, size=300, window=5, negative=5, hs=0, min_count=1, workers=cores),
    # PV-DBOW
    Doc2Vec(dm=0, size=300, negative=5, hs=0, min_count=1, workers=cores),
    # PV-DM w/average
    Doc2Vec(dm=1, dm_mean=1, size=300, window=10, negative=5, hs=0, min_count=1, workers=cores),
    ]
    models_by_name = OrderedDict((str(model), model) for model in simple_models)
    # speed setup by sharing results of 1st model's vocabulary scan
    simple_models[0].build_vocab(cleaned_tagged_docs)
    # PV-DM/concat requires one special NULL word so it serves as template
    for model in simple_models[1:]:
        model.reset_from(simple_models[0])
    # start each model with basic values
    alpha, min_alpha, passes = (0.025, 0.001, 20)
    alpha_delta = (alpha - min_alpha) / passes
    print("START %s" % datetime.datetime.now())
    # run through multiple epochs, shuffling between, for best training
    for epoch in range(passes):
        shuffle(cleaned_tagged_docs)  # shuffling gets best results
        for name, train_model in models_by_name.items():
            # train
            duration = 'na'
            train_model.alpha, train_model.min_alpha = alpha, alpha
            with elapsed_timer() as elapsed:
                train_model.train(cleaned_tagged_docs)
                duration = '%.1f' % elapsed()
                if ((epoch + 1) % 5) == 0 or epoch == 0:
                    print("%i passes : %s %ss " % (epoch + 1, name, duration))
        print('completed pass %i at alpha %f' % (epoch + 1, alpha))
        alpha -= alpha_delta
    print("END %s" % str(datetime.datetime.now()))
    # save the final models so we don't need to do this again
    for name, train_model in models_by_name.items():
        train_model.init_sims(replace=True)
        name = name.replace('(','_')
        name = name.replace(')','')
        name = name.replace('/','-')
        name = name.replace(',','_')
        train_model.save(name)
    print "models saved"
def light(fname, camera_options={}, geometry_options={},
          comment=None, verbose=False,
          overwrite=False, remove=[]):
          
    if os.path.isfile(fname) and not overwrite:
        R = raw_input('this camera exists, overwrite? (y/n)').lower()
        if R != 'y':
            sys.exit()
    
    camera = OrderedDict()
    camera['camera.name'] = 'simple camera'       

    for K, V in camera_options.items():
        camera[K] = V   
    
    geometry = OrderedDict()
    geometry['geometry.azimuth'] =  0.0
    geometry['geometry.zenith'] =  0.0
    
    for K, V in geometry_options.items():
        geometry[K] = V
        
    ### concatenate settings
    camera_settings = OrderedDict(camera.items() + 
                                  geometry.items())
                                  
    for drop in remove:
        del camera_settings[drop]
        
    write_camera(fname, camera_settings, comment, verbose)
Esempio n. 28
0
    def __init__(self, event):
        super(ContributionListGenerator, self).__init__(event)
        self.default_list_config = {'filters': {'items': {}}}

        session_empty = {None: _('No session')}
        track_empty = {None: _('No track')}
        type_empty = {None: _('No type')}
        session_choices = OrderedDict((unicode(s.id), s.title) for s in sorted(self.event.sessions,
                                                                               key=attrgetter('title')))
        track_choices = OrderedDict((unicode(t.id), t.title) for t in sorted(self.event.tracks,
                                                                             key=attrgetter('title')))
        type_choices = OrderedDict((unicode(t.id), t.name) for t in sorted(self.event.contribution_types,
                                                                           key=attrgetter('name')))
        self.static_items = OrderedDict([
            ('session', {'title': _('Session'),
                         'filter_choices': OrderedDict(session_empty.items() + session_choices.items())}),
            ('track', {'title': _('Track'),
                       'filter_choices': OrderedDict(track_empty.items() + track_choices.items())}),
            ('type', {'title': _('Type'),
                      'filter_choices': OrderedDict(type_empty.items() + type_choices.items())}),
            ('status', {'title': _('Status'), 'filter_choices': {'scheduled': _('Scheduled'),
                                                                 'unscheduled': _('Not scheduled')}})
        ])

        self.list_config = self._get_config()
Esempio n. 29
0
    def show_page(self, success):
        """  Display main course list page """
        username = self.user_manager.session_username()
        user_info = self.database.users.find_one({"username": username})

        all_courses = self.course_factory.get_all_courses()

        # Display
        open_courses = {courseid: course for courseid, course in all_courses.items()
                        if self.user_manager.course_is_open_to_user(course, username, False) and
                        self.user_manager.course_is_user_registered(course, username)}
        open_courses = OrderedDict(sorted(iter(open_courses.items()), key=lambda x: x[1].get_name(self.user_manager.session_language())))

        last_submissions = self.submission_manager.get_user_last_submissions(5, {"courseid": {"$in": list(open_courses.keys())}})
        except_free_last_submissions = []
        for submission in last_submissions:
            try:
                submission["task"] = open_courses[submission['courseid']].get_task(submission['taskid'])
                except_free_last_submissions.append(submission)
            except:
                pass

        registerable_courses = {courseid: course for courseid, course in all_courses.items() if
                                not self.user_manager.course_is_user_registered(course, username) and
                                course.is_registration_possible(user_info)}

        registerable_courses = OrderedDict(sorted(iter(registerable_courses.items()), key=lambda x: x[1].get_name(self.user_manager.session_language())))

        return self.template_helper.get_renderer().mycourses(open_courses, registerable_courses, except_free_last_submissions, success)
Esempio n. 30
0
class IdleConnectionManager(object):
    def __init__(self, connections_max_idle_ms):
        if connections_max_idle_ms > 0:
            self.connections_max_idle = connections_max_idle_ms / 1000
        else:
            self.connections_max_idle = float('inf')
        self.next_idle_close_check_time = None
        self.update_next_idle_close_check_time(time.time())
        self.lru_connections = OrderedDict()

    def update(self, conn_id):
        # order should reflect last-update
        if conn_id in self.lru_connections:
            del self.lru_connections[conn_id]
        self.lru_connections[conn_id] = time.time()

    def remove(self, conn_id):
        if conn_id in self.lru_connections:
            del self.lru_connections[conn_id]

    def is_expired(self, conn_id):
        if conn_id not in self.lru_connections:
            return None
        return time.time() >= self.lru_connections[conn_id] + self.connections_max_idle

    def next_check_ms(self):
        now = time.time()
        if not self.lru_connections:
            return float('inf')
        elif self.next_idle_close_check_time <= now:
            return 0
        else:
            return int((self.next_idle_close_check_time - now) * 1000)

    def update_next_idle_close_check_time(self, ts):
        self.next_idle_close_check_time = ts + self.connections_max_idle

    def poll_expired_connection(self):
        if time.time() < self.next_idle_close_check_time:
            return None

        if not len(self.lru_connections):
            return None

        oldest_conn_id = None
        oldest_ts = None
        if OrderedDict is dict:
            for conn_id, ts in self.lru_connections.items():
                if oldest_conn_id is None or ts < oldest_ts:
                    oldest_conn_id = conn_id
                    oldest_ts = ts
        else:
            (oldest_conn_id, oldest_ts) = next(iter(self.lru_connections.items()))

        self.update_next_idle_close_check_time(oldest_ts)

        if time.time() >= oldest_ts + self.connections_max_idle:
            return (oldest_conn_id, oldest_ts)
        else:
            return None
Esempio n. 31
0
    def _api_query(
            self,
            verb: Literal['get', 'post'],
            method_type: Literal['Public', 'Private'],
            path: str,
            options: Optional[Dict] = None,
    ) -> Dict:
        """An IndependentrReserve query

        May raise RemoteError
        """
        url = f'{self.uri}/{method_type}/{path}'

        tries = QUERY_RETRY_TIMES
        while True:
            data = None
            log.debug(
                'IndependentReserve API Query',
                verb=verb,
                url=url,
                options=options,
            )
            if method_type == 'Private':
                nonce = int(time.time() * 1000)
                call_options = OrderedDict(options.copy()) if options else OrderedDict()
                call_options.update({
                    'nonce': nonce,
                    'apiKey': self.api_key,
                })
                # Make sure dict starts with apiKey, nonce
                call_options.move_to_end('nonce', last=False)
                call_options.move_to_end('apiKey', last=False)
                keys = [url] + [f'{k}={v}' for k, v in call_options.items()]
                message = ','.join(keys)
                signature = hmac.new(
                    self.secret,
                    msg=message.encode('utf-8'),
                    digestmod=hashlib.sha256,
                ).hexdigest().upper()
                # Make sure dict starts with apiKey, nonce, signature
                call_options['signature'] = str(signature)
                call_options.move_to_end('signature', last=False)
                call_options.move_to_end('nonce', last=False)
                call_options.move_to_end('apiKey', last=False)
                data = json.dumps(call_options, sort_keys=False)
            try:
                response = self.session.request(
                    method=verb,
                    url=url,
                    data=data,
                    timeout=DEFAULT_TIMEOUT_TUPLE,
                )
            except requests.exceptions.RequestException as e:
                raise RemoteError(f'IndependentReserve API request failed due to {str(e)}') from e

            if response.status_code not in (200, 429):
                raise RemoteError(
                    f'IndependentReserve api request for {response.url} failed with HTTP status '
                    f'code {response.status_code} and response {response.text}',
                )

            if response.status_code == 429:
                if tries >= 1:
                    backoff_seconds = 10 / tries
                    log.debug(
                        f'Got a 429 from IndependentReserve. Backing off for {backoff_seconds}')
                    gevent.sleep(backoff_seconds)
                    tries -= 1
                    continue

                # else
                raise RemoteError(
                    f'IndependentReserve api request for {response.url} failed with HTTP '
                    f'status code {response.status_code} and response {response.text}',
                )

            break  # else all good, we can break off the retry loop

        try:
            json_ret = json.loads(response.text)
        except JSONDecodeError as e:
            raise RemoteError('IndependentReserve returned invalid JSON response') from e

        return json_ret
Esempio n. 32
0
def to_cvfd(
    vertdict,
    nodestart=None,
    nodestop=None,
    skip_hanging_node_check=False,
    verbose=False,
):
    """
    Convert a vertex dictionary into verts and iverts

    Parameters
    ----------
    vertdict
        vertdict is a dictionary {icell: [(x1, y1), (x2, y2), (x3, y3), ...]}

    nodestart : int
        starting node number. (default is zero)

    nodestop : int
        ending node number up to but not including. (default is len(vertdict))

    skip_hanging_node_check : bool
        skip the hanging node check.  this may only be necessary for quad-based
        grid refinement. (default is False)

    verbose : bool
        print messages to the screen. (default is False)

    Returns
    -------
    verts : ndarray
        array of x, y vertices

    iverts : list
        list containing a list for each cell

    """

    if nodestart is None:
        nodestart = 0
    if nodestop is None:
        nodestop = len(vertdict)
    ncells = nodestop - nodestart

    # First create vertexdict {(x1, y1): ivert1, (x2, y2): ivert2, ...} and
    # vertexlist [[ivert1, ivert2, ...], [ivert9, ivert10, ...], ...]
    # In the process, filter out any duplicate vertices
    vertexdict = OrderedDict()
    vertexlist = []
    xcyc = np.empty((ncells, 2), dtype=float)
    iv = 0
    nvertstart = 0
    if verbose:
        print("Converting vertdict to cvfd representation.")
        print("Number of cells in vertdict is: {}".format(len(vertdict)))
        print(
            "Cell {} up to {} (but not including) will be processed.".format(
                nodestart, nodestop
            )
        )
    for icell in range(nodestart, nodestop):
        points = vertdict[icell]
        nvertstart += len(points)
        xc, yc = centroid_of_polygon(points)
        xcyc[icell, 0] = xc
        xcyc[icell, 1] = yc
        ivertlist = []
        for p in points:
            pt = tuple(p)
            if pt in vertexdict:
                ivert = vertexdict[pt]
            else:
                vertexdict[pt] = iv
                ivert = iv
                iv += 1
            ivertlist.append(ivert)
        if ivertlist[0] != ivertlist[-1]:
            raise Exception("Cell {} not closed".format(icell))
        vertexlist.append(ivertlist)

    # next create vertex_cell_dict = {}; for each vertex, store list of cells
    # that use it
    nvert = len(vertexdict)
    if verbose:
        print("Started with {} vertices.".format(nvertstart))
        print("Ended up with {} vertices.".format(nvert))
        print(
            "Reduced total number of vertices by {}".format(nvertstart - nvert)
        )
        print("Creating dict of vertices with their associated cells")
    vertex_cell_dict = OrderedDict()
    for icell in range(nodestart, nodestop):
        ivertlist = vertexlist[icell]
        for ivert in ivertlist:
            if ivert in vertex_cell_dict:
                if icell not in vertex_cell_dict[ivert]:
                    vertex_cell_dict[ivert].append(icell)
            else:
                vertex_cell_dict[ivert] = [icell]
    if verbose:
        print("Done creating dict of vertices with their associated cells")

    # Now, go through each vertex and look at the cells that use the vertex.
    # For quadtree-like grids, there may be a need to add a new hanging node
    # vertex to the larger cell.
    if not skip_hanging_node_check:
        if verbose:
            print("Checking for hanging nodes.")
        vertexdict_keys = list(vertexdict.keys())
        finished = False
        while not finished:
            finished = True
            for ivert, cell_list in vertex_cell_dict.items():
                for icell1 in cell_list:
                    for icell2 in cell_list:

                        # skip if same cell
                        if icell1 == icell2:
                            continue

                        # skip if share face already
                        ivertlist1 = vertexlist[icell1]
                        ivertlist2 = vertexlist[icell2]
                        if shared_face(ivertlist1, ivertlist2):
                            continue

                        # don't share a face, so need to segment if necessary
                        segmented = segment_face(
                            ivert, ivertlist1, ivertlist2, vertexdict_keys
                        )
                        if segmented:
                            finished = False
        if verbose:
            print("Done checking for hanging nodes.")

    verts = np.array(vertexdict_keys)
    iverts = vertexlist

    return verts, iverts
from collections import OrderedDict
favorite_languages = OrderedDict()

favorite_languages['jen'] = 'python'
favorite_languages['sarah'] = 'c'
favorite_languages['edward'] = 'ruby'
favorite_languages['phil'] = 'python'
for name, languages in favorite_languages.items():
	print(name.title() + "'s favorite languages is " +
		languages.title() + ".")
Esempio n. 34
0
    def _cmake_cross_build_defines(self, the_os, os_ver):
        ret = OrderedDict()
        os_ver = get_env("CONAN_CMAKE_SYSTEM_VERSION", os_ver)
        toolchain_file = get_env("CONAN_CMAKE_TOOLCHAIN_FILE", "")

        if toolchain_file != "":
            logger.info("Setting Cross build toolchain file: %s" % toolchain_file)
            ret["CMAKE_TOOLCHAIN_FILE"] = toolchain_file
            return ret

        if self._cmake_system_name is False:
            return ret

        if self._cmake_system_name is not True:  # String not empty
            ret["CMAKE_SYSTEM_NAME"] = self._cmake_system_name
            ret["CMAKE_SYSTEM_VERSION"] = os_ver
        else:  # detect if we are cross building and the system name and version
            if cross_building(self._conanfile.settings):  # We are cross building
                build_os, _, host_os, _ = get_cross_building_settings(self._conanfile.settings)
                if host_os != build_os:
                    if the_os:  # the_os is the host (regular setting)
                        ret["CMAKE_SYSTEM_NAME"] = "Darwin" if the_os in ["iOS", "tvOS",
                                                                          "watchOS"] else the_os
                        if os_ver:
                            ret["CMAKE_SYSTEM_VERSION"] = os_ver
                    else:
                        ret["CMAKE_SYSTEM_NAME"] = "Generic"

        if ret:  # If enabled cross compile
            for env_var in ["CONAN_CMAKE_SYSTEM_PROCESSOR",
                            "CONAN_CMAKE_FIND_ROOT_PATH",
                            "CONAN_CMAKE_FIND_ROOT_PATH_MODE_PROGRAM",
                            "CONAN_CMAKE_FIND_ROOT_PATH_MODE_LIBRARY",
                            "CONAN_CMAKE_FIND_ROOT_PATH_MODE_INCLUDE"]:

                value = os.getenv(env_var, None)
                if value:
                    ret[env_var] = value

            if self._conanfile and self._conanfile.deps_cpp_info.sysroot:
                sysroot_path = self._conanfile.deps_cpp_info.sysroot
            else:
                sysroot_path = os.getenv("CONAN_CMAKE_FIND_ROOT_PATH", None)

            if sysroot_path:
                # Needs to be set here, can't be managed in the cmake generator, CMake needs
                # to know about the sysroot before any other thing
                ret["CMAKE_SYSROOT"] = sysroot_path.replace("\\", "/")

            # Adjust Android stuff
            if self._os == "Android":
                arch_abi_settings = {"armv8": "arm64-v8a",
                                     "armv7": "armeabi-v7a",
                                     "armv7hf": "armeabi-v7a",
                                     "armv6": "armeabi-v6",
                                     "armv5": "armeabi"
                                     }.get(self._arch,
                                           self._arch)
                if arch_abi_settings:
                    ret["CMAKE_ANDROID_ARCH_ABI"] = arch_abi_settings

        logger.info("Setting Cross build flags: %s"
                    % ", ".join(["%s=%s" % (k, v) for k, v in ret.items()]))
        return ret
Esempio n. 35
0
def index():

    words = None

    forms = OrderedDict()
    forms['sample'] = WordCounterSample(g.current_lang)
    forms['paste'] = WordCounterPaste(
        'I am Sam\nSam I am\nThat Sam-I-am!\nThat Sam-I-am!\nI do not like that Sam-I-am!\nDo you like \ngreen eggs and ham?\nI do not like them, Sam-I-am.\nI do not like\ngreen eggs and ham.\nWould you like them \nhere or there?\nI would not like them\nhere or there.\nI would not like them anywhere.'
    )
    forms['upload'] = WordCounterUpload()
    forms['link'] = WordCounterLink()

    if request.method == 'POST':
        ignore_case = True
        ignore_stopwords = True

        btn_value = request.form['btn']
        sample_id = ''
        extras_to_save = {}

        if btn_value == 'paste':
            words = forms['paste'].data['area']
            ignore_case = forms[btn_value].data['ignore_case_paste']
            ignore_stopwords = forms[btn_value].data['ignore_stopwords_paste']
            logger.debug("New from paste: %d chars", len(words))
            title = _('your text')
        elif btn_value == 'upload':
            upload_file = forms['upload'].data['upload']
            words = process_upload(upload_file)
            ignore_case = forms[btn_value].data['ignore_case_upload']
            ignore_stopwords = forms[btn_value].data['ignore_stopwords_upload']
            title = upload_file.filename
            logger.debug("New from upload: %s", title)
        elif btn_value == 'sample':
            sample_source = forms['sample'].data['sample']
            samplename = filehandler.get_sample_title(sample_source)
            title = samplename
            ignore_case = forms[btn_value].data['ignore_case_sample']
            ignore_stopwords = forms[btn_value].data['ignore_stopwords_sample']
            sample_id = title + str(ignore_case) + str(ignore_stopwords)
            existing_doc_id = mongo.results_for_sample('wordcounter',
                                                       sample_id)
            if existing_doc_id is not None:
                logger.debug("Existing from sample: %s", sample_source)
                return redirect(request.url + 'results/' + existing_doc_id)
            logger.info("New from sample: %s", sample_source)
            sample_path = filehandler.get_sample_path(sample_source)
            logger.debug("  loading from %s", sample_path)
            words = filehandler.convert_to_txt(sample_path)
            extras_to_save = filehandler.get_sample(sample_source)
        elif btn_value == 'link':
            url = forms['link'].data['link']
            # TODO: should actually accept https
            if 'https://' in url:
                url = url.replace('https', 'http')
            elif not 'http://' in url:
                url = 'http://' + url
            logger.debug("New from link: %s", url)
            content = filehandler.download_webpage(url)
            words = content['text']
            ignore_case = forms[btn_value].data['ignore_case_link']
            ignore_stopwords = forms[btn_value].data['ignore_stopwords_link']
            title = _(content['title'])

        if words is not None:
            logger.debug("  about to process words")
            counts = process_words(words, ignore_case, ignore_stopwords,
                                   btn_value == 'sample')
            logger.debug("  finished counts, about to save")
            doc_id = mongo.save_words('wordcounter', counts, ignore_case,
                                      ignore_stopwords, title, sample_id,
                                      btn_value, extras_to_save)
            logger.debug("  saved")
            return redirect(request.url + 'results/' + doc_id + '?submit=true')

    return render_template('wordcounter.html',
                           forms=forms.items(),
                           tool_name='wordcounter',
                           max_file_size_in_mb=g.max_file_size_mb)
Esempio n. 36
0
class ImageBuilderConfig:
    """
    Holds the complete image configuration settings. This includes
    * the image configuration itself as it is passed to the script
    * all noc block configurations found by the script
    * device configuration information as found in the bsp.yml of the device
      information passed to the script.
    """
    # pylint: disable=too-many-instance-attributes
    def __init__(self, config, blocks, device):
        self.noc_blocks = OrderedDict()
        self.stream_endpoints = OrderedDict()
        self.connections = []
        self.clk_domains = []
        self.block_ports = OrderedDict()
        self.io_ports = OrderedDict()
        self.clocks = OrderedDict()
        self.block_con = []
        self.io_port_con_ms = []
        self.io_port_con_bl = []
        self.clk_domain_con = []
        # read configuration from config dictionary
        self.__dict__.update(**config)
        self.blocks = blocks
        self.device = device
        self._check_configuration()
        self._update_sep_defaults()
        self._set_indices()
        self._collect_noc_ports()
        self._collect_io_ports()
        self._collect_clocks()
        self.pick_connections()
        self.pick_clk_domains()

    def _check_configuration(self):
        """
        Do plausibility checks on the current configuration
        """
        logging.info("Plausibility checks on the current configuration")
        failure = None
        if not any([bool(sep["ctrl"]) for sep in self.stream_endpoints.values()]):
            failure = "At least one streaming endpoint needs to have ctrl enabled"
        if failure:
            logging.error(failure)
            raise ValueError(failure)

    def _update_sep_defaults(self):
        """
        Update any missing stream endpoint attributes with default values
        """
        for sep in self.stream_endpoints:
            if "num_data_i" not in self.stream_endpoints[sep]:
                self.stream_endpoints[sep]["num_data_i"] = 1
            if "num_data_o" not in self.stream_endpoints[sep]:
                self.stream_endpoints[sep]["num_data_o"] = 1

    def _set_indices(self):
        """
        Add an index for each port of each stream endpoint and noc block.
        These indices are used to generate static_router.hex
        """
        start = 1
        i = 0
        for i, sep in enumerate(self.stream_endpoints.values()):
            sep["index"] = i + start
        start = start + i + 1
        for i, block in enumerate(self.noc_blocks.values()):
            block["index"] = start + i

    def _collect_noc_ports(self):
        """
        Create lookup table for noc blocks. The key is a tuple of block
        name, port name and flow direction. If any block port has num_ports > 1
        then unroll that port into multiple ports of the same name plus a
        number to make its name unique.
        """
        for name, block in self.noc_blocks.items():
            desc = self.blocks[block["block_desc"]]
            # Update per-instance parameters
            if not hasattr(desc, "parameters"):
                setattr(desc, "parameters", {})
            if "parameters" not in block:
                block["parameters"] = OrderedDict()
            for key in block["parameters"].keys():
                if key not in desc.parameters:
                    logging.error("Unknown parameter %s for block %s", key, name)
                    del block["parameters"][key]
            for param, value in desc.parameters.items():
                if param not in block["parameters"]:
                    block["parameters"][param] = value
            # Generate list of block ports, adding 'index' to each port's dict
            for direction in ("inputs", "outputs"):
                index = 0
                for port_name, port_info in desc.data[direction].items():
                    num_ports = 1
                    if "num_ports" in port_info:
                        parameter = port_info["num_ports"]
                        num_ports = parameter

                    # If num_ports isn't an integer, it could be an expression
                    # using values from the parameters section (e.g.,
                    # NUM_PORTS*NUM_BRANCHES for a stream-splitting block).
                    # If the parameter doesn't resolve to an integer, treat it
                    # as an expression that needs to be evaluated, hopefully to
                    # an integer.
                    if not isinstance(num_ports, int):
                        # Create a regex to find identifiers.
                        regex_ident = re.compile(r'[A-Za-z_][A-Za-z0-9_]*')

                        # Get a list of all identifiers in the num_ports
                        # expression and iterate over them all
                        idents = re.finditer(regex_ident, num_ports)
                        for ident in idents:
                            # If the identifier represents a valid parameter
                            # in the block, replace the identifier text with
                            # the value of the parameter. If no matching
                            # parameter is found, just leave the text in
                            # place. That may result in an exception being
                            # thrown from eval(), but we'll catch it and
                            # report an error a bit later on.
                            if ident[0] in block["parameters"]:
                                val = str(block["parameters"][ident[0]])
                                num_ports = re.sub(ident[0], val, num_ports)

                        # Now, with identifiers resolved to parameter values,
                        # attempt to evaluate the expression. If eval() fails,
                        # we'll catch the exception, num_ports will remain non-
                        # integral, and the if statement after the exception
                        # is caught will inform the user.
                        try:
                            num_ports = eval(num_ports)
                        except:
                            pass

                    # Make sure the parameter resolved to a number
                    if not isinstance(num_ports, int):
                        logging.error(
                            "'num_ports' of port '%s' on block '%s' "
                            "resolved to invalid value of '%s'",
                            port_name, name, str(num_ports))
                        sys.exit(1)
                    if num_ports < 1 or num_ports > 64:
                        logging.error(
                            "'num_ports' of port '%s' on block '%s' "
                            "has invalid value '%s', must be in [1, 64]",
                            port_name, name, str(num_ports))
                        sys.exit(1)
                    if "num_ports" in port_info:
                        # If num_ports was a variable in the YAML, unroll into
                        # multiple ports
                        for i in range(num_ports):
                            new_port_info = port_info.copy()
                            new_port_info['index'] = index
                            index = index + 1
                            self.block_ports.update({(name, port_name + "_" \
                                + str(i), direction[:-1]) : new_port_info})
                    else:
                        port_info['index'] = index
                        self.block_ports.update(
                            {(name, port_name, direction[:-1]) : port_info})
                        index = index + 1
        ports = self.stream_endpoints
        for sep in self.stream_endpoints:
            inputs = {(sep, "in%d" % port, "input") :
                      ports[sep] for port in range(ports[sep]["num_data_i"])}
            self.block_ports.update(inputs)
            outputs = {(sep, "out%d" % port, "output") :
                       ports[sep] for port in range(ports[sep]["num_data_o"])}
            self.block_ports.update(outputs)

    def _collect_io_ports(self):
        """
        Create lookup table for io ports. The key is a tuple of block name
        (_device_ for io ports of the bsp), the io port name and flow
        direction.
        """
        for name, block in self.noc_blocks.items():
            desc = self.blocks[block["block_desc"]]
            if hasattr(desc, "io_ports"):
                self.io_ports.update({
                    (name, io, desc.io_ports[io]["drive"]):
                    desc.io_ports[io] for io in desc.io_ports})
        self.io_ports.update({
            ("_device_", io, self.device.io_ports[io]["drive"]):
            self.device.io_ports[io] for io in self.device.io_ports})

    def _collect_clocks(self):
        """
        Create lookup table for clocks. The key is a tuple of block name
        (_device_ for clocks of the bsp), the clock name and flow
        direction
        """
        for name, block in self.noc_blocks.items():
            desc = self.blocks[block["block_desc"]]
            if hasattr(desc, "clocks"):
                self.clocks.update({
                    (name, clk["name"]): clk for clk in desc.clocks})
        if hasattr(self.device, "clocks"):
            self.clocks.update({
                ("_device_", clk["name"]): clk for clk in self.device.clocks})
        # Add the implied clocks for the BSP
        self.clocks[("_device_", "rfnoc_ctrl")] = {"freq": '[]', "name": "rfnoc_ctrl"}
        self.clocks[("_device_", "rfnoc_chdr")] = {"freq": '[]', "name": "rfnoc_chdr"}

    def pick_clk_domains(self):
        """
        Filter clock domain list into a local list for easier access.
        Remaining connection items are printed as error and execution is
        aborted. Likewise, checks for unconnected clocks.
        """
        (self.clk_domain_con, self.clk_domains) = split(
            self.clk_domains, lambda con:
            (con["srcblk"], con["srcport"]) in self.clocks and
            (con["dstblk"], con["dstport"]) in self.clocks)

        # Check if there are unconnected clocks
        connected = [(con["dstblk"], con["dstport"]) for con in self.clk_domain_con]
        unconnected = []
        for clk in self.clocks:
            if clk[0] != "_device_" and \
               clk[1] not in ["rfnoc_ctrl", "rfnoc_chdr"] and \
               clk not in connected:
                unconnected.append(clk)
        if unconnected:
            logging.error("%d unresolved clk domain(s)", len(unconnected))
            for clk in unconnected:
                logging.error("    %s:%s", clk[0], clk[1])
            logging.error("Please specify the clock(s) to connect")
            sys.exit(1)

        if self.clk_domains:
            logging.error("%d Unresolved clk domain(s)", len(self.clk_domains))

            for connection in self.clk_domains:
                logging.error("    (%s-%s -> %s-%s)",
                              connection["srcblk"], connection["srcport"],
                              connection["dstblk"], connection["dstport"])
            logging.error("Source or destination domain not found")
            sys.exit(1)

    def pick_connections(self):
        """
        Sort connection list into three local lists for
         * input => output (block port to block port)
         * master => slave (io port to io port)
         * broadcaster => listener (io port to io port)
        Remaining connection items are printed as error and execution is
        aborted. Possible reasons are
         * undeclared block or io port
         * connection direction wrong (e.g. output => input)
         * mixed connection type (e.g. master => listener)
        """
        block_types = lambda type: filter(lambda key: key[2] == type, self.block_ports)
        io_types = lambda type: filter(lambda key: key[2] == type, self.io_ports)
        (self.block_con, self.connections) = split(
            self.connections, lambda con:
            (con["srcblk"], con["srcport"], "output") in block_types("output") and
            (con["dstblk"], con["dstport"], "input") in block_types("input"))
        (self.io_port_con_ms, self.connections) = split(
            self.connections, lambda con:
            (con["srcblk"], con["srcport"], "master") in io_types("master") and
            (con["dstblk"], con["dstport"], "slave") in  io_types("slave"))
        (self.io_port_con_bl, self.connections) = split(
            self.connections, lambda con:
            (con["srcblk"], con["srcport"], "broadcaster") in io_types("broadcaster") and
            (con["dstblk"], con["dstport"], "listener") in io_types("listener"))

        if self.connections:
            logging.error("%d Unresolved connection(s)", len(self.connections))

            for connection in self.connections:
                logging.error("    (%s-%s -> %s-%s)",
                              connection["srcblk"], connection["srcport"],
                              connection["dstblk"], connection["dstport"])
            logging.debug("    Make sure block ports are connected output "
                          "(src) to input (dst)")
            logging.debug("    Available block ports for connections:")
            for block in self.block_ports:
                logging.debug("        %s", (block,))
            logging.debug("    Make sure io ports are connected master      "
                          "(src) to slave    (dst)")
            logging.debug("                                  or broadcaster "
                          "(src) to listener (dst)")
            logging.debug("    Available io ports for connections:")
            for io_port in self.io_ports:
                logging.info("        %s", (io_port,))
            sys.exit(1)
Esempio n. 37
0
    """Apply `parse_file` to a directory"""
    for fn in os.listdir(dpath):
        ffn = os.path.join(dpath, fn)
        mdd.update(parse_file(ffn, mdd))
    return mdd


if __name__ == "__main__":
    print("# Table 1 - Author metadata - Running [{}]".format(sys.argv[0]))
    print("- Writing author metadata to [{}]".format(outf))
    # delete old version as write in append
    if os.path.exists(outf):
        os.remove(outf)
    # collect table data
    corpus_md = OrderedDict()
    for period_name, dname in period_paths.items():
        md = {u"Europa": {"F": 0, "M": 0},
              u"América": {"F": 0, "M": 0},
              "sonnets": 0}
        if DBG:
            print("- {}".format(dname))
        corpus_md[period_name] = parse_dir(dname, md)
    # write out table data
    with open(outf, "a") as oufh:
        for per, vals in corpus_md.items():
            am = sum(vals[u"América"].values())
            eu = sum(vals["Europa"].values())
            oufh.write("# {}\n".format(per))
            oufh.write("- Sonnets: {}\n".format(vals["sonnets"]))
            oufh.write("- Authors: {}\n".format(am + eu))
            oufh.write("- Female: {}\n".format(vals["Europa"]["F"] + vals[u"América"]["F"]))
Esempio n. 38
0
import json
import argparse
from collections import OrderedDict

# Parser for the script's arguments
parser = argparse.ArgumentParser()
parser.add_argument("directory", help="The path to the categories' dir")
parser.add_argument("-o", "--output", help="The file path to save the result")
args = parser.parse_args()

data = OrderedDict()
for root, dirs, files in os.walk(args.directory):
    if not dirs:
        subcategory = root.split('/').pop()
        category = root.split('/').pop(-2)
        if category not in data.keys():
            data.setdefault(category, OrderedDict())
        data[category].setdefault(subcategory, 0)
        for file in files:
            extension = file.split('.')[-1].lower()
            if extension in ['jpg', 'jpeg', 'png', 'gif']:
                data[category][subcategory] += 1
            else:
                print(root, file)

print(json.dumps(data, ensure_ascii=False, indent=4))
print('total', sum([c for k, s in data.items() for x, c in s.items()]))
if args.output:
    with open(args.output, 'w') as fp:
        json.dump(data, fp, ensure_ascii=False, indent=4)
Esempio n. 39
0
class Network:
    def __init__(
        self,
        name=None,  # Network name. Used to select TensorFlow name and variable scopes.
        func=None,  # Fully qualified name of the underlying network construction function.
        **static_kwargs
    ):  # Keyword arguments to be passed in to the network construction function.

        self._init_fields()
        self.name = name
        self.static_kwargs = dict(static_kwargs)

        # Init build func.
        module, self._build_func_name = import_module(func)
        self._build_module_src = inspect.getsource(module)
        self._build_func = find_obj_in_module(module, self._build_func_name)

        # Init graph.
        self._init_graph()
        self.reset_vars()

    def _init_fields(self):
        self.name = None  # User-specified name, defaults to build func name if None.
        self.scope = None  # Unique TF graph scope, derived from the user-specified name.
        self.static_kwargs = dict(
        )  # Arguments passed to the user-supplied build func.
        self.num_inputs = 0  # Number of input tensors.
        self.num_outputs = 0  # Number of output tensors.
        self.input_shapes = [
            []
        ]  # Input tensor shapes (NC or NCHW), including minibatch dimension.
        self.output_shapes = [
            []
        ]  # Output tensor shapes (NC or NCHW), including minibatch dimension.
        self.input_shape = []  # Short-hand for input_shapes[0].
        self.output_shape = []  # Short-hand for output_shapes[0].
        self.input_templates = []  # Input placeholders in the template graph.
        self.output_templates = []  # Output tensors in the template graph.
        self.input_names = []  # Name string for each input.
        self.output_names = []  # Name string for each output.
        self.vars = OrderedDict()  # All variables (localname => var).
        self.trainables = OrderedDict(
        )  # Trainable variables (localname => var).
        self._build_func = None  # User-supplied build function that constructs the network.
        self._build_func_name = None  # Name of the build function.
        self._build_module_src = None  # Full source code of the module containing the build function.
        self._run_cache = dict()  # Cached graph data for Network.run().

    def _init_graph(self):
        # Collect inputs.
        self.input_names = []
        for param in inspect.signature(self._build_func).parameters.values():
            if param.kind == param.POSITIONAL_OR_KEYWORD and param.default is param.empty:
                self.input_names.append(param.name)
        self.num_inputs = len(self.input_names)
        assert self.num_inputs >= 1

        # Choose name and scope.
        if self.name is None:
            self.name = self._build_func_name
        self.scope = tf.get_default_graph().unique_name(self.name.replace(
            '/', '_'),
                                                        mark_as_used=False)

        # Build template graph.
        with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):
            assert tf.get_variable_scope().name == self.scope
            with absolute_name_scope(
                    self.scope):  # ignore surrounding name_scope
                with tf.control_dependencies(
                        None):  # ignore surrounding control_dependencies
                    self.input_templates = [
                        tf.placeholder(tf.float32, name=name)
                        for name in self.input_names
                    ]
                    out_expr = self._build_func(*self.input_templates,
                                                is_template_graph=True,
                                                **self.static_kwargs)

        # Collect outputs.
        assert is_tf_expression(out_expr) or isinstance(out_expr, tuple)
        self.output_templates = [
            out_expr
        ] if is_tf_expression(out_expr) else list(out_expr)
        self.output_names = [
            t.name.split('/')[-1].split(':')[0] for t in self.output_templates
        ]
        self.num_outputs = len(self.output_templates)
        assert self.num_outputs >= 1

        # Populate remaining fields.
        self.input_shapes = [
            shape_to_list(t.shape) for t in self.input_templates
        ]
        self.output_shapes = [
            shape_to_list(t.shape) for t in self.output_templates
        ]
        self.input_shape = self.input_shapes[0]
        self.output_shape = self.output_shapes[0]
        self.vars = OrderedDict([
            (self.get_var_localname(var), var)
            for var in tf.global_variables(self.scope + '/')
        ])
        self.trainables = OrderedDict([
            (self.get_var_localname(var), var)
            for var in tf.trainable_variables(self.scope + '/')
        ])

    # Run initializers for all variables defined by this network.
    def reset_vars(self):
        run([var.initializer for var in self.vars.values()])

    # Run initializers for all trainable variables defined by this network.
    def reset_trainables(self):
        run([var.initializer for var in self.trainables.values()])

    # Get TensorFlow expression(s) for the output(s) of this network, given the inputs.
    def get_output_for(self, *in_expr, return_as_list=False, **dynamic_kwargs):
        assert len(in_expr) == self.num_inputs
        all_kwargs = dict(self.static_kwargs)
        all_kwargs.update(dynamic_kwargs)
        with tf.variable_scope(self.scope, reuse=True):
            assert tf.get_variable_scope().name == self.scope
            named_inputs = [
                tf.identity(expr, name=name)
                for expr, name in zip(in_expr, self.input_names)
            ]
            out_expr = self._build_func(*named_inputs, **all_kwargs)
        assert is_tf_expression(out_expr) or isinstance(out_expr, tuple)
        if return_as_list:
            out_expr = [out_expr
                        ] if is_tf_expression(out_expr) else list(out_expr)
        return out_expr

    # Get the local name of a given variable, excluding any surrounding name scopes.
    def get_var_localname(self, var_or_globalname):
        assert is_tf_expression(var_or_globalname) or isinstance(
            var_or_globalname, str)
        globalname = var_or_globalname if isinstance(
            var_or_globalname, str) else var_or_globalname.name
        assert globalname.startswith(self.scope + '/')
        localname = globalname[len(self.scope) + 1:]
        localname = localname.split(':')[0]
        return localname

    # Find variable by local or global name.
    def find_var(self, var_or_localname):
        assert is_tf_expression(var_or_localname) or isinstance(
            var_or_localname, str)
        return self.vars[var_or_localname] if isinstance(
            var_or_localname, str) else var_or_localname

    # Get the value of a given variable as NumPy array.
    # Note: This method is very inefficient -- prefer to use tfutil.run(list_of_vars) whenever possible.
    def get_var(self, var_or_localname):
        return self.find_var(var_or_localname).eval()

    # Set the value of a given variable based on the given NumPy array.
    # Note: This method is very inefficient -- prefer to use tfutil.set_vars() whenever possible.
    def set_var(self, var_or_localname, new_value):
        return set_vars({self.find_var(var_or_localname): new_value})

    # Pickle export.
    def __getstate__(self):
        return {
            'version': 2,
            'name': self.name,
            'static_kwargs': self.static_kwargs,
            'build_module_src': self._build_module_src,
            'build_func_name': self._build_func_name,
            'variables':
            list(zip(self.vars.keys(), run(list(self.vars.values()))))
        }

    # Pickle import.
    def __setstate__(self, state):
        self._init_fields()

        # Execute custom import handlers.
        for handler in network_import_handlers:
            state = handler(state)

        # Set basic fields.
        assert state['version'] == 2
        self.name = state['name']
        self.static_kwargs = state['static_kwargs']
        self._build_module_src = state['build_module_src']
        self._build_func_name = state['build_func_name']

        # Parse imported module.
        module = imp.new_module('_tfutil_network_import_module_%d' %
                                len(_network_import_modules))
        exec(self._build_module_src, module.__dict__)
        self._build_func = find_obj_in_module(module, self._build_func_name)
        _network_import_modules.append(module)  # avoid gc

        # Init graph.
        self._init_graph()
        self.reset_vars()
        set_vars(
            {self.find_var(name): value
             for name, value in state['variables']})

    # Create a clone of this network with its own copy of the variables.
    def clone(self, name=None):
        net = object.__new__(Network)
        net._init_fields()
        net.name = name if name is not None else self.name
        net.static_kwargs = dict(self.static_kwargs)
        net._build_module_src = self._build_module_src
        net._build_func_name = self._build_func_name
        net._build_func = self._build_func
        net._init_graph()
        net.copy_vars_from(self)
        return net

    # Copy the values of all variables from the given network.
    def copy_vars_from(self, src_net):
        assert isinstance(src_net, Network)
        name_to_value = run(
            {name: src_net.find_var(name)
             for name in self.vars.keys()})
        set_vars({
            self.find_var(name): value
            for name, value in name_to_value.items()
        })

    # Copy the values of all trainable variables from the given network.
    def copy_trainables_from(self, src_net):
        assert isinstance(src_net, Network)
        name_to_value = run(
            {name: src_net.find_var(name)
             for name in self.trainables.keys()})
        set_vars({
            self.find_var(name): value
            for name, value in name_to_value.items()
        })

    # Construct a TensorFlow op that updates the variables of this network
    # to be slightly closer to those of the given network.
    def setup_as_moving_average_of(self,
                                   src_net,
                                   beta=0.99,
                                   beta_nontrainable=0.0):
        assert isinstance(src_net, Network)
        with absolute_name_scope(self.scope):
            with tf.name_scope('MovingAvg'):
                ops = []
                for name, var in self.vars.items():
                    if name in src_net.vars:
                        cur_beta = beta if name in self.trainables else beta_nontrainable
                        new_value = lerp(src_net.vars[name], var, cur_beta)
                        ops.append(var.assign(new_value))
                return tf.group(*ops)

    # Run this network for the given NumPy array(s), and return the output(s) as NumPy array(s).
    def run(
        self,
        *in_arrays,
        return_as_list=False,  # True = return a list of NumPy arrays, False = return a single NumPy array, or a tuple if there are multiple outputs.
        print_progress=False,  # Print progress to the console? Useful for very large input arrays.
        minibatch_size=None,  # Maximum minibatch size to use, None = disable batching.
        num_gpus=1,  # Number of GPUs to use.
        out_mul=1.0,  # Multiplicative constant to apply to the output(s).
        out_add=0.0,  # Additive constant to apply to the output(s).
        out_shrink=1,  # Shrink the spatial dimensions of the output(s) by the given factor.
        out_dtype=None,  # Convert the output to the specified data type.
        **dynamic_kwargs
    ):  # Additional keyword arguments to pass into the network construction function.

        assert len(in_arrays) == self.num_inputs
        num_items = in_arrays[0].shape[0]
        if minibatch_size is None:
            minibatch_size = num_items
        key = str([
            list(sorted(dynamic_kwargs.items())), num_gpus, out_mul, out_add,
            out_shrink, out_dtype
        ])

        # Build graph.
        if key not in self._run_cache:
            with absolute_name_scope(self.scope +
                                     '/Run'), tf.control_dependencies(None):
                in_split = list(
                    zip(*[tf.split(x, num_gpus)
                          for x in self.input_templates]))
                out_split = []
                for gpu in range(num_gpus):
                    with tf.device('/gpu:%d' % gpu):
                        out_expr = self.get_output_for(*in_split[gpu],
                                                       return_as_list=True,
                                                       **dynamic_kwargs)
                        if out_mul != 1.0:
                            out_expr = [x * out_mul for x in out_expr]
                        if out_add != 0.0:
                            out_expr = [x + out_add for x in out_expr]
                        if out_shrink > 1:
                            ksize = [1, 1, out_shrink, out_shrink]
                            out_expr = [
                                tf.nn.avg_pool(x,
                                               ksize=ksize,
                                               strides=ksize,
                                               padding='VALID',
                                               data_format='NCHW')
                                for x in out_expr
                            ]
                        if out_dtype is not None:
                            if tf.as_dtype(out_dtype).is_integer:
                                out_expr = [tf.round(x) for x in out_expr]
                            out_expr = [
                                tf.saturate_cast(x, out_dtype)
                                for x in out_expr
                            ]
                        out_split.append(out_expr)
                self._run_cache[key] = [
                    tf.concat(outputs, axis=0) for outputs in zip(*out_split)
                ]

        # Run minibatches.
        out_expr = self._run_cache[key]
        out_arrays = [
            np.empty([num_items] + shape_to_list(expr.shape)[1:],
                     expr.dtype.name) for expr in out_expr
        ]
        for mb_begin in range(0, num_items, minibatch_size):
            if print_progress:
                print('\r%d / %d' % (mb_begin, num_items), end='')
            mb_end = min(mb_begin + minibatch_size, num_items)
            mb_in = [src[mb_begin:mb_end] for src in in_arrays]
            mb_out = tf.get_default_session().run(
                out_expr, dict(zip(self.input_templates, mb_in)))
            for dst, src in zip(out_arrays, mb_out):
                dst[mb_begin:mb_end] = src

        # Done.
        if print_progress:
            print('\r%d / %d' % (num_items, num_items))
        if not return_as_list:
            out_arrays = out_arrays[0] if len(out_arrays) == 1 else tuple(
                out_arrays)
        return out_arrays

    # Returns a list of (name, output_expr, trainable_vars) tuples corresponding to
    # individual layers of the network. Mainly intended to be used for reporting.
    def list_layers(self):
        patterns_to_ignore = [
            '/Setter', '/new_value', '/Shape', '/strided_slice', '/Cast',
            '/concat'
        ]
        all_ops = tf.get_default_graph().get_operations()
        all_ops = [
            op for op in all_ops
            if not any(p in op.name for p in patterns_to_ignore)
        ]
        layers = []

        def recurse(scope, parent_ops, level):
            prefix = scope + '/'
            ops = [
                op for op in parent_ops
                if op.name == scope or op.name.startswith(prefix)
            ]

            # Does not contain leaf nodes => expand immediate children.
            if level == 0 or all('/' in op.name[len(prefix):] for op in ops):
                visited = set()
                for op in ops:
                    suffix = op.name[len(prefix):]
                    if '/' in suffix:
                        suffix = suffix[:suffix.index('/')]
                    if suffix not in visited:
                        recurse(prefix + suffix, ops, level + 1)
                        visited.add(suffix)

            # Otherwise => interpret as a layer.
            else:
                layer_name = scope[len(self.scope) + 1:]
                layer_output = ops[-1].outputs[0]
                layer_trainables = [
                    op.outputs[0] for op in ops
                    if op.type.startswith('Variable')
                    and self.get_var_localname(op.name) in self.trainables
                ]
                layers.append((layer_name, layer_output, layer_trainables))

        recurse(self.scope, all_ops, 0)
        return layers

    # Print a summary table of the network structure.
    def print_layers(self, title=None, hide_layers_with_no_params=False):
        if title is None: title = self.name
        print()
        print('%-28s%-12s%-24s%-24s' %
              (title, 'Params', 'OutputShape', 'WeightShape'))
        print('%-28s%-12s%-24s%-24s' % (('---', ) * 4))

        total_params = 0
        for layer_name, layer_output, layer_trainables in self.list_layers():
            weights = [
                var for var in layer_trainables
                if var.name.endswith('/weight:0')
            ]
            num_params = sum(
                np.prod(shape_to_list(var.shape)) for var in layer_trainables)
            total_params += num_params
            if hide_layers_with_no_params and num_params == 0:
                continue

            print('%-28s%-12s%-24s%-24s' %
                  (layer_name, num_params if num_params else '-',
                   layer_output.shape,
                   weights[0].shape if len(weights) == 1 else '-'))

        print('%-28s%-12s%-24s%-24s' % (('---', ) * 4))
        print('%-28s%-12s%-24s%-24s' % ('Total', total_params, '', ''))
        print()

    # Construct summary ops to include histograms of all trainable parameters in TensorBoard.
    def setup_weight_histograms(self, title=None):
        if title is None: title = self.name
        with tf.name_scope(None), tf.device(None), tf.control_dependencies(
                None):
            for localname, var in self.trainables.items():
                if '/' in localname:
                    p = localname.split('/')
                    name = title + '_' + p[-1] + '/' + '_'.join(p[:-1])
                else:
                    name = title + '_toplevel/' + localname
                tf.summary.histogram(name, var)


#----------------------------------------------------------------------------
Esempio n. 40
0
class Emitter:
    '''
    '''
    def __init__(self, event=None, area=None, dispatcher=None):

        self.listeners = OrderedDict()

        self._event_clz = Event if event is None else event

        self._dispatcher = Dispatcher() if dispatcher is None else dispatcher

        self.area = area

    @property
    def dispatcher(self):
        return self._dispatcher

    @dispatcher.setter
    def dispatcher(self, new_dispatcher):

        self._dispatcher = new_dispatcher

    def subscribe(self, observer, key=None):

        self.listeners.setdefault(key, []).append(observer)

    def bind(self, subscriber):

        # get all methods of subscriber
        methods = inspect.getmembers(subscriber, inspect.ismethod)

        # trim down the list to only those with our decorator attrib
        methods = [(name, method) for name, method in methods
                   if hasattr(method, Listener.DECORATOR_MARK)]

        # subscribe all of the ones that match our event type
        for method_name, method in methods:

            for listener in getattr(method, Listener.DECORATOR_MARK):

                event_clz = listener.event_clz
                key = listener.key
                area = listener.area

                if event_clz == self._event_clz and area == self.area:
                    self.subscribe(method, key=key)

    def __call__(self, key=None, **kwargs):

        event = self._event_clz(emitter=self, **kwargs)

        self._dispatcher.enqueue(emitter=self, event=event, key=key)

    def notify(self, event, key):

        for observer_key, observers in self.listeners.items():

            if observer_key != key:
                continue

            for observer in observers:
                observer(event)
Esempio n. 41
0
class Step(object):
    item_result_class = ItemResult
    successful_statuses = []
    error_statuses = []

    def __init__(self, name):
        self.name = name
        self._results = OrderedDict()
        self.start_time = 0
        self.finish_time = 0

    @property
    def time_taken(self):
        return self.finish_time - self.start_time

    @property
    def number_of_items(self):
        return len(self._results)

    def add_item(self, item):
        result = self.item_result_class(item)
        self._results[item] = result
        return result

    def start(self):
        self.start_time = time.time()

    def finish(self):
        self.finish_time = time.time()

    def items_for_status(self, status):
        return [result for item, result in self._results.items()
                if result.status == status]

    def items_for_statuses(self, statuses):
        items = []
        for status in statuses:
            items += self.items_for_status(status)
        return items

    @property
    def all_items(self):
        return list(itervalues(self._results))

    def get_infos(self):
        raise NotImplementedError

    def __getitem__(self, key):
        return self._results[key]

    @property
    def successful_results(self):
        results = []
        for successful_status in self.successful_statuses:
            results += self.items_for_status(successful_status)
        return results

    @property
    def error_results(self):
        results = []
        for error_status in self.error_statuses:
            results += self.items_for_status(error_status)
        return results

    @property
    def successful(self):
        return not self.has_errors

    @property
    def has_errors(self):
        return len(self.error_results) > 0
Esempio n. 42
0
class Space(Makeable, metaclass=ABCMeta):
    """
    Space class (based on and compatible with openAI Spaces).
    Provides a classification for state-, action-, reward- and other core.
    """
    # Global unique Space ID.
    _ID = -1

    def __init__(self, shape=None, value=None, main_axes=None):
        """
        Args:
            shape (Optional[Tuple[]]):

            value (any): A value to directly assign to this Space. Use "zeros" for all zeros initialization, "random",
                for a random-sample initialization.

            main_axes (Optional[List[Str]]): A list of names of main axes for this Space in the correct order.
                E.g. ["B", "T"] for adding a batch and a time rank.
                Alternatively to pure names, a tuple can be passed in for a name/dimension pair giving the exact
                dimension of the axis, e.g. [("B", 500), "T"] would create a Space with batch size 500 and a time
                axis of unknown dimension.
        """
        super().__init__()

        self.shape = shape

        # Parent Space for usage in nested ContainerSpace structures.
        self.parent = None

        # Convenience flag to quickly check, whether a Space is possibly time-major
        # (only if "T" axis comes before "B" axis)
        #self.time_major = None
        # The main axes of this Space (can be customized, but usually contain "B" (batch), and/or "T" (time)).
        self.main_axes = OrderedDict()
        if main_axes is not None:
            for main_axis in force_list(main_axes):
                if isinstance(main_axis, (tuple, list)):
                    self._add_main_axis(main_axis[0], position=-1, dimension=main_axis[1])
                elif isinstance(main_axis, dict):
                    assert len(main_axis) == 1
                    self._add_main_axis(list(main_axis.keys())[0], position=-1, dimension=list(main_axis.values())[0])
                else:
                    self._add_main_axis(main_axis, position=-1)

        # Each space has an optional value, that can store data of that space.
        self.value = None
        # Always double-check initial values if given.
        if value is not None:
            if value == "zeros":
                self.assign(self.sample(fill_value=0), check=True)
            elif value == "random":
                self.assign(self.sample(), check=True)
            else:
                self.assign(value, check=True)

    @abstractmethod
    def get_shape(self, include_main_axes=False, main_axis_value=None, **kwargs):
        """
        Returns the shape of this Space as a tuple with certain additional axes at the front (main-axes) or the back
        (e.g. categories in Int Spaces).

        Args:
            include_main_axes (bool): Whether to include all main-axes in the returned tuple as None.
            main_axis_value (any): The value to use for the main-axes iff `include_main_axes` is True.

        Returns:
            tuple: The shape of this Space as a tuple.
        """
        raise NotImplementedError

    @property
    def rank(self):
        """
        Returns:
            int: The rank of the Space, not including main-axes
            (e.g. 3 for a space with shape=(10, 7, 5) OR 2 for a space with shape=(1,2) and main-axes "B" and "T").
        """
        return len(self.shape)

    @property
    def reduction_axes(self):
        """
        Returns:
            List[int]: A list of axes to be reduced by any tf.reduce... operation sparing out the main-axes.
                E.g.: [-1, -2, -3] for a space with shape=(2,4,6) and any number of main_axes.
        """
        return list(reversed(range(-self.rank, 0)))

    @abstractmethod
    def structure(self):
        """
        Returns a corresponding (possibly nested primitive) structure (dict, tuple) with 0 values at the leaves
        (primitive Spaces).
        """
        raise NotImplementedError

    @property
    @abstractmethod
    def flat_dim(self):
        """
        Returns:
            int: The length of a flattened vector derived from this Space.
        """
        raise NotImplementedError

    def as_one_hot_float_space(self):
        """
        Returns:
            This Space except that all int elements/sub-components, etc.. have been replaced by their corresponding
            one-hot Float counterparts. E.g. An Int(3, shape=(2,)) will convert to Float(0.0, 1.0, shape=(2,3))).
            A Dict/Tuple convert each of their child Space through this method as well.
        """
        return copy.deepcopy(self)  # default: return copy of self

    @abstractmethod
    def sample(self, size=None, fill_value=None, **kwargs):
        """
        Uniformly randomly samples an element from this space. This is for testing purposes, e.g. to simulate
        a random environment.

        Args:
            size (Optional[int,Tuple[int]): The size of the `main_axes` to use for sampling.
                E.g.:
                - main_axes=["B"] + `size`=None -> return a batch of 1.
                - main_axes=["B"] + `size`=5 -> return a batch of 5.
                - main_axes={"B": 10} + `size`=5 -> return a batch of 5 (meaning: ignores axis' fixed-size).
                - main_axes=["T"] + `size`=5 -> return a time-series of len 5.
                - main_axes={"T": 10} + `size`=5 -> return a time-series of len 5 (meaning: ignores axis' fixed-size).
                - main_axes=["B", "T"] + `size`=5 -> ERROR (must provide both main_axes).
                - main_axes=["B", "T"] + `size`=(5, 2) -> return a batch of 5 with time-series of len 2.
                - main_axes=["T", "B"] + `size`=(5, 2) -> return a time-series of len 5 with batches of 2 (time major).
                - main_axes={"B": 5, "T": 2} + `size`=None -> return a batch of 5 with time-series of len 2.
                - main_axes=["B", "T"] + `size`=None -> return a batch of 1 with time-series of len 1.

            fill_value (Optional[any]): The number or initializer specifier to fill the sample. Can be used to create
                a (non-random) sample with a certain fill value in all elements.
                TODO: support initializer spec-strings like 'normal', 'truncated_normal', etc..

        Returns:
            any: The sampled element(s).
        """
        raise NotImplementedError

    @abstractmethod
    def contains(self, sample):
        """
        Checks whether this space contains the given sample (including all main-axes).

        Args:
            sample: The element to check.

        Returns:
            bool: Whether sample is a valid member of this space.
        """
        raise NotImplementedError

    @abstractmethod
    def zeros(self, size=None):
        """
        Args:
            size (Optional): See `Space.sample()`.

        Returns:
            np.ndarray: `size` zero samples where all values are zero and have the correct type.
        """
        raise NotImplementedError

    def assign(self, value, check=False):
        """
        Overrides our value with the given one.

        Args:
            value (any): The new value to assign to `self.value`.
            check (bool): If True, double check the new value against this Space.
        """
        if check is True:
            assert self.contains(value)
        self.value = value

    def with_batch(self, position=0, dimension=None):
        """
        Returns a deepcopy of this Space, but with "B" added to the given position and set to the provided dimension.

        Args:
            position (int): The position at which to add the batch axis.
            dimension (Optional[int]): The dimension of the batch axis, None for no particular dimension.

        Returns:
            Space: The deepcopy of this Space, but with "B" axis.
        """
        cp = self.copy()
        cp._add_main_axis("B", position=position, dimension=dimension)
        return cp

    def with_time(self, position=0, dimension=None):
        """
        Returns a deepcopy of this Space, but with "T" added to the given position and set to the provided dimension.

        Args:
            position (int): The position at which to add the time axis.
            dimension (Optional[int]): The dimension of the time axis, None for no particular dimension.

        Returns:
            Space: The deepcopy of this Space, but with "T" axis.
        """
        cp = self.copy()
        cp._add_main_axis("T", position=position, dimension=dimension)
        return cp

    def with_axes(self, main_axes):
        """
        Returns a deepcopy of this Space, but with "T" added to the given position and set to the provided dimension.

        Args:
            main_axes (Optional[List[Str]]): A list of names of main axes for this Space in the correct order.
                E.g. ["B", "T"] for adding a batch and a time rank.
                Alternatively to pure names, a tuple can be passed in for a name/dimension pair giving the exact
                dimension of the axis, e.g. [("B", 500), "T"] would create a Space with batch size 500 and a time
                axis of unknown dimension.

        Returns:
            Space: The deepcopy of this Space, but with "T" axis.
        """
        cp = self.copy()
        if main_axes is not None:
            # If `main_axes` is already taken from another Space.
            if isinstance(main_axes, OrderedDict):
                main_axes = list(main_axes.items())
            for main_axis in force_list(main_axes):
                if isinstance(main_axis, (tuple, list)):
                    cp._add_main_axis(main_axis[0], position=-1, dimension=main_axis[1])
                else:
                    cp._add_main_axis(main_axis, position=-1)
        return cp

    def strip_axes(self):
        """
        Returns a deepcopy of this Space, but with all main axes removed.

        Returns:
            Space: The deepcopy of this Space, but without any main axis.
        """
        cp = self.copy()
        if hasattr(self, "main_axes"):
            for axis in self.main_axes:
                cp._remove_main_axis(axis)
        return cp

    @abstractmethod
    def create_variable(self):  #, name, is_input_feed=False, is_python=False, local=False, **kwargs):
        """
        Returns a numpy variable that matches the space's shape.

        #Args:
        #    name (str): The name for the variable.

        #    is_input_feed (bool): Whether the returned object should be an input placeholder,
        #        instead of a full variable.

        #    is_python (bool): Whether to create a python-based (np) variable (list) or a backend-specific one.
        #        Note: When using pytorch or tf, `is_python` should be False.

        #    local (bool): Whether the variable must not be shared across the network.
        #        Default: False.

        #Keyword Args:
        #    To be passed on to backend-specific methods (e.g. trainable, initializer, etc..).

        Returns:
            any: A numpy/python variable.
        """
        raise NotImplementedError

    @abstractmethod
    def create_keras_input(self):
        raise NotImplementedError

    def get_top_level_container(self):
        """
        Returns:
            Space: The top-most container containing this Space. This returned top-level container has no more
                parents above it. None if this Space does not belong to a ContainerSpace.
        """
        top_level = top_level_check = self
        while top_level_check is not None:
            top_level = top_level_check
            top_level_check = top_level.parent
        return top_level

    def copy(self):
        """
        Copies this Space safely and returns the copy.

        Returns:
            Space: A copy of this Space, including the stored value (if any).
        """
        parent_safe = None
        if hasattr(self, "parent"):
            parent_safe = self.parent
            self.parent = None

        ret = copy.deepcopy(self)

        if hasattr(self, "parent"):
            self.parent = parent_safe
        return ret

    def _add_main_axis(self, name, position=-1, dimension=None):
        """
        Adds a main_axis for this Space (and of all child Spaces in a ContainerSpace).

        Args:
            name (str): The name of the axis, e.g. "batch".

            position (int): At which position (within the main-axes) shall we add this new one? Negative numbers will
                add the new axis at the nth position before the end.

            dimension (Optional[int]): The exact dimension of this axis (or None for unspecified).
        """
        # Do not allow to insert a main axis within the value-body of the space. All main-axes must come at the
        # beginning of the Space.
        assert position <= len(self.main_axes), \
            "ERROR: Main-axis of {} must be inserted within first {} positions.".format(self, len(self.main_axes))

        #new_axis = name in self.main_axes
        #new_shape = []
        #if hasattr(self, "value"):
        #    new_shape = list(self.get_shape(include_main_axes=True))

        new_main_axes = OrderedDict()
        for i, (key, value) in enumerate(self.main_axes.items()):
            if i == position or (position < 0 and i == len(self.main_axes) + position):
                new_main_axes[name] = dimension or True
                #new_shape = new_shape[:i] + [dimension or 1] + new_shape[i:]
            # In case axis already exists, do not add twice or override with old dimension.
            if key != name:
                new_main_axes[key] = value
        # Special case, add to very end.
        if (position == -1 and len(self.main_axes) == 0) or position == len(self.main_axes):
            new_main_axes[name] = dimension or True
            #if not new_axis:
            #    new_shape.append(dimension or 1)
        self.main_axes = new_main_axes
        # Recheck time-major flag.
        #self.time_major = True if "T" in self.main_axes and list(self.main_axes.keys()).index("T") == 0 else False
        # Change our value (add axis at given position).
        if hasattr(self, "value") and self.value is not None:
            new_shape = list(self.get_shape(include_main_axes=True, main_axis_value=-1))
            self.value = np.reshape(self.value, newshape=new_shape)

    def _remove_main_axis(self, name):
        if name not in self.main_axes:
            return
        del self.main_axes[name]

        # Recheck time-major flag.
        #self.time_major = True if "T" in self.main_axes and list(self.main_axes.keys()).index("T") == 0 else False

        # Change our value (remove axis at given position -> can only remove if it's dimension is 1?).
        if hasattr(self, "value") and self.value is not None:
            new_shape = [i if i is not None else -1 for i in self.get_shape(include_main_axes=True)]
            self.value = np.reshape(self.value, newshape=new_shape)

    @abstractmethod
    def __repr__(self):
        raise NotImplementedError

    @abstractmethod
    def __eq__(self, other):
        raise NotImplementedError

    def _get_np_shape(self, size=None):
        """
        Helper to determine, which shape one should pass to the numpy random funcs for sampling from a Space.
        Depends on `size`, the `shape` of this Space and the `self.has_batch_rank/has_time_rank` settings.

        Args:
            size: See `self.sample()`.

        Returns:
            Tuple[int]: Shape to use for numpy random sampling.
        """
        # Default dims according to self.main_axes (use one for undefined dimensions).
        if size is None:
            return tuple([i if i is not None else 1 for i in self.get_shape(include_main_axes=True)])

        # With one axis.
        if isinstance(size, int):
            assert len(self.main_axes) == 1,\
                "ERROR: `size` must be a tuple of len {} (number of main-axes)!".format(len(self.main_axes))
            return (size,) + self.shape

        # With one or more axes (given as tuple).
        elif isinstance(size, (tuple, list)):
            assert len(size) == len(self.main_axes),\
                "ERROR: `size` must be of len {} (number of main-axes)!".format(len(self.main_axes))
            return tuple([i if i is not None else 1 for i in self.get_shape(include_main_axes=True)])

        raise SurrealError("`size` must be int or tuple/list!")

    @classmethod
    def from_spec(cls, spec=None, **kwargs):
        """
        Handles special case that we are trying to construct a Space from a not-yet ready "variables:.." specification.
        In this case, returns None, in all other cases, constructs the Space from_spec as usual.
        """
        if isinstance(spec, str) and re.search(r'^variables:', spec):
            return None
        return super(Space, cls).make(spec, **kwargs)

    # TODO: Same procedure as for DataOpRecords. Maybe unify somehow (misc ancestor class: IDable).
    @staticmethod
    def get_id():
        Space._ID += 1
        return Space._ID

    def __hash__(self):
        return hash(self.id)
Esempio n. 43
0
class ChangeModuleAmmo(ContextMenuCombined):

    def __init__(self):
        self.mainFrame = gui.mainFrame.MainFrame.getInstance()
        # Format: {type ID: set(loadable, charges)}
        self.loadableChargesCache = {}
        # Translations for the missile categories, as the text here is auto-generated via damage attributes
        self.ddMissileChargeCatTrans = {
            'em': _t('EM'),
            'thermal': _t('Thermal'),
            'explosive': _t('Explosive'),
            'kinetic': _t('Kinetic'),
            'mixed': _t('Mixed')}
        self.oreChargeCatTrans = OrderedDict([
            ('a1', _t('Asteroid Simple')),
            ('a2', _t('Asteroid Coherent')),
            ('a3', _t('Asteroid Variegated')),
            ('a4', _t('Asteroid Complex')),
            ('a5', _t('Asteroid Abyssal')),
            ('a6', _t('Asteroid Mercoxit')),
            ('r4', _t('Moon Ubiquitous')),
            ('r8', _t('Moon Common')),
            ('r16', _t('Moon Uncommon')),
            ('r32', _t('Moon Rare')),
            ('r64', _t('Moon Exceptional')),
            ('misc', _t('Misc'))])

    def display(self, callingWindow, srcContext, mainItem, selection):
        if srcContext not in ('fittingModule', 'projectedModule'):
            return False

        if self.mainFrame.getActiveFit() is None:
            return False

        self.mainCharges = self._getAmmo(mainItem)
        if not self.mainCharges:
            return False

        self.module = mainItem
        self.selection = selection
        self.srcContext = srcContext
        return True

    def getText(self, callingWindow, itmContext, mainItem, selection):
        return _t('Charge')

    def _getAmmo(self, mod):
        if mod.itemID is None:
            return set()
        if mod.itemID not in self.loadableChargesCache:
            self.loadableChargesCache[mod.itemID] = Ammo.getInstance().getModuleFlatAmmo(mod)
        return self.loadableChargesCache[mod.itemID]

    def _addCharge(self, menu, charge):
        id_ = ContextMenuCombined.nextID()
        name = charge.name if charge is not None else _t('Empty')
        self.chargeEventMap[id_] = charge
        item = wx.MenuItem(menu, id_, name)
        menu.Bind(wx.EVT_MENU, self.handleAmmoSwitch, item)
        item.charge = charge
        if charge is not None and charge.iconID is not None:
            bitmap = BitmapLoader.getBitmap(charge.iconID, 'icons')
            if bitmap is not None:
                item.SetBitmap(bitmap)
        return item

    @staticmethod
    def _addSeparator(m, text):
        id_ = ContextMenuCombined.nextID()
        m.Append(id_, '─ %s ─' % text)
        m.Enable(id_, False)

    def getSubMenu(self, callingWindow, context, mainItem, selection, rootMenu, i, pitem):
        msw = True if 'wxMSW' in wx.PlatformInfo else False
        menu = wx.Menu()
        self.chargeEventMap = {}
        modType, chargeDict = Ammo.getInstance().getModuleStructuredAmmo(self.module, ammo=self.mainCharges)
        if modType == 'ddTurret':
            self._addSeparator(menu, _t('Long Range'))
            menuItems = []
            for charges in chargeDict.values():
                if len(charges) == 1:
                    menuItems.append(self._addCharge(rootMenu if msw else menu, charges[0]))
                else:
                    baseCharge = charges[0]
                    menuItem = self._addCharge(rootMenu if msw else menu, baseCharge)
                    menuItems.append(menuItem)
                    subMenu = wx.Menu()
                    subMenu.Bind(wx.EVT_MENU, self.handleAmmoSwitch)
                    menuItem.SetSubMenu(subMenu)
                    self._addSeparator(subMenu, _t('Less Damage'))
                    for charge in charges:
                        subMenu.Append(self._addCharge(rootMenu if msw else subMenu, charge))
                    self._addSeparator(subMenu, _t('More Damage'))
            for menuItem in menuItems:
                menu.Append(menuItem)
            self._addSeparator(menu, _t('Short Range'))
        elif modType == 'ddMissile':
            menuItems = []
            for chargeCatName, charges in chargeDict.items():
                menuItem = wx.MenuItem(menu, wx.ID_ANY, self.ddMissileChargeCatTrans.get(chargeCatName, chargeCatName))
                bitmap = BitmapLoader.getBitmap("%s_small" % chargeCatName, "gui")
                if bitmap is not None:
                    menuItem.SetBitmap(bitmap)
                menuItems.append(menuItem)
                subMenu = wx.Menu()
                subMenu.Bind(wx.EVT_MENU, self.handleAmmoSwitch)
                menuItem.SetSubMenu(subMenu)
                self._addSeparator(subMenu, _t('Less Damage'))
                for charge in charges:
                    subMenu.Append(self._addCharge(rootMenu if msw else subMenu, charge))
                self._addSeparator(subMenu, _t('More Damage'))
            for menuItem in menuItems:
                menu.Append(menuItem)
        elif modType == 'miner':
            menuItems = []
            for catHandle, catLabel in self.oreChargeCatTrans.items():
                charges = chargeDict.get(catHandle)
                if not charges:
                    continue
                if len(charges) == 1:
                    menuItems.append(self._addCharge(rootMenu if msw else menu, charges[0]))
                else:
                    menuItem = wx.MenuItem(menu, wx.ID_ANY, catLabel)
                    menuItems.append(menuItem)
                    subMenu = wx.Menu()
                    subMenu.Bind(wx.EVT_MENU, self.handleAmmoSwitch)
                    menuItem.SetSubMenu(subMenu)
                    for charge in charges:
                        subMenu.Append(self._addCharge(rootMenu if msw else subMenu, charge))
            for menuItem in menuItems:
                menu.Append(menuItem)
        elif modType == 'general':
            for charge in chargeDict['general']:
                menu.Append(self._addCharge(rootMenu if msw else menu, charge))
        menu.Append(self._addCharge(rootMenu if msw else menu, None))
        return menu

    def handleAmmoSwitch(self, event):
        charge = self.chargeEventMap.get(event.Id, False)
        if charge is False:
            event.Skip()
            return

        fitID = self.mainFrame.getActiveFit()
        sFit = Fit.getInstance()
        fit = sFit.getFit(fitID)
        # Switch in selection or all modules, depending on modifier key state and settings
        switchAll = sFit.serviceFittingOptions['ammoChangeAll'] is not (wx.GetMouseState().GetModifiers() in (wx.MOD_ALT, wx.MOD_CONTROL))
        if switchAll:
            if self.srcContext == 'fittingModule':
                command = cmd.GuiChangeLocalModuleChargesCommand
                modContainer = fit.modules
            elif self.srcContext == 'projectedModule':
                command = cmd.GuiChangeProjectedModuleChargesCommand
                modContainer = fit.projectedModules
            else:
                return
            positions = getSimilarModPositions(modContainer, self.module)
            self.mainFrame.command.Submit(command(
                    fitID=fitID,
                    positions=positions,
                    chargeItemID=charge.ID if charge is not None else None))
        else:
            if self.srcContext == 'fittingModule':
                command = cmd.GuiChangeLocalModuleChargesCommand
                modContainer = fit.modules
            elif self.srcContext == 'projectedModule':
                command = cmd.GuiChangeProjectedModuleChargesCommand
                modContainer = fit.projectedModules
            else:
                return
            positions = []
            for position, mod in enumerate(modContainer):
                if mod in self.selection:
                    modCharges = self._getAmmo(mod)
                    if modCharges.issubset(self.mainCharges):
                        positions.append(position)
            self.mainFrame.command.Submit(command(
                    fitID=fitID,
                    positions=positions,
                    chargeItemID=charge.ID if charge is not None else None))
Esempio n. 44
0
    def apply_updates(self):
        devices = list(self._dev_grads.keys())
        total_grads = sum(len(grads) for grads in self._dev_grads.values())
        assert len(devices) >= 1 and total_grads >= 1
        ops = []
        with absolute_name_scope(self.scope):

            # Cast gradients to FP32 and calculate partial sum within each device.
            dev_grads = OrderedDict()  # device => [(grad, var), ...]
            for dev_idx, dev in enumerate(devices):
                with tf.name_scope('ProcessGrads%d' % dev_idx), tf.device(dev):
                    sums = []
                    for gv in zip(*self._dev_grads[dev]):
                        assert all(v is gv[0][1] for g, v in gv)
                        g = [tf.cast(g, tf.float32) for g, v in gv]
                        g = g[0] if len(g) == 1 else tf.add_n(g)
                        sums.append((g, gv[0][1]))
                    dev_grads[dev] = sums

            # Sum gradients across devices.
            if len(devices) > 1:
                with tf.name_scope('SumAcrossGPUs'), tf.device(None):
                    for var_idx in range(len(self._grad_shapes)):
                        g = [dev_grads[dev][var_idx][0] for dev in devices]
                        g = tf.contrib.nccl.all_sum(g)
                        for dev, gg in zip(devices, g):
                            dev_grads[dev][var_idx] = (
                                gg, dev_grads[dev][var_idx][1])

            # Apply updates separately on each device.
            for dev_idx, (dev, grads) in enumerate(dev_grads.items()):
                with tf.name_scope('ApplyGrads%d' % dev_idx), tf.device(dev):

                    # Scale gradients as needed.
                    if self.use_loss_scaling or total_grads > 1:
                        with tf.name_scope('Scale'):
                            coef = tf.constant(np.float32(1.0 / total_grads),
                                               name='coef')
                            coef = self.undo_loss_scaling(coef)
                            grads = [(g * coef, v) for g, v in grads]

                    # Check for overflows.
                    with tf.name_scope('CheckOverflow'):
                        grad_ok = tf.reduce_all(
                            tf.stack([
                                tf.reduce_all(tf.is_finite(g))
                                for g, v in grads
                            ]))

                    # Update weights and adjust loss scaling.
                    with tf.name_scope('UpdateWeights'):
                        opt = self._dev_opt[dev]
                        ls_var = self.get_loss_scaling_var(dev)
                        if not self.use_loss_scaling:
                            ops.append(
                                tf.cond(grad_ok,
                                        lambda: opt.apply_gradients(grads),
                                        tf.no_op))
                        else:
                            ops.append(
                                tf.cond(
                                    grad_ok, lambda: tf.group(
                                        tf.assign_add(ls_var, self.
                                                      loss_scaling_inc),
                                        opt.apply_gradients(grads)),
                                    lambda: tf.group(
                                        tf.assign_sub(ls_var, self.
                                                      loss_scaling_dec))))

                    # Report statistics on the last device.
                    if dev == devices[-1]:
                        with tf.name_scope('Statistics'):
                            ops.append(
                                autosummary(self.id + '/learning_rate',
                                            self.learning_rate))
                            ops.append(
                                autosummary(self.id + '/overflow_frequency',
                                            tf.where(grad_ok, 0, 1)))
                            if self.use_loss_scaling:
                                ops.append(
                                    autosummary(self.id + '/loss_scaling_log2',
                                                ls_var))

            # Group everything into a single op.
            return tf.group(*ops, name='TrainingOp')
Esempio n. 45
0
def read_infile(file_path):

    option_dict = OrderedDict()
    optional_opts = OrderedDict()

    optional_opts['start'] = ['0']
    optional_opts['stop'] = ['-1']
    optional_opts['start'] = ['0']
    optional_opts['unitcell'] = ['unitcell.dat']
    optional_opts['pop'] = ['pop.dat']
    optional_opts['center'] = ['center.dat']
    optional_opts['origin'] = ['origin.dat']
    optional_opts['O_idxs'] = ['O_idxs.dat']
    optional_opts['theta'] = ['theta.dat']
    optional_opts['phi'] = ['phi.dat']
    optional_opts['psi'] = ['psi.dat']
    optional_opts['xx1_wat'] = ['xx1_wat.dat']
    optional_opts['xx2_wat'] = ['xx2_wat.dat']
    optional_opts['yy_wat'] = ['yy_wat.dat']
    optional_opts['zz_wat'] = ['zz_wat.dat']
    optional_opts['O_frac'] = ['O_frac.dat']
    optional_opts['H1_frac'] = ['H1_frac.dat']
    optional_opts['H2_frac'] = ['H2_frac.dat']
    optional_opts['frames'] = 'frames.dat'
    optional_opts['dims'] = ['10', '10', '10']
    optional_opts['xx_ref'] = ['None']
    optional_opts['zz_ref'] = ['None']
    optional_opts['water'] = ['water']
    optional_opts['center_sele'] = ['None']

    required_opts = ['trajin', 'parm', 'xx', 'zz']

    if not os.path.exists(file_path):
        raise IOError("File %s not found." % file_path)

    with open(file_path, 'r') as file:
        for line in file:
            l = line.rstrip().lstrip().split()

            if len(l) == 0:
                continue
            if l[0].startswith('#'):
                continue

            if len(l) == 1:
                raise IOError("Option %s not understood." % l[0])
            else:
                option_dict[l[0]] = list()
                for val in l[1:]:
                    option_dict[l[0]].append(val)

    for key in required_opts:
        if key not in option_dict.keys():
            raise IOError("Keyword %s not found." % key)

    for key, value in optional_opts.items():
        if key not in option_dict.keys():
            option_dict[key] = value

    option_dict['dims'][0] = float(option_dict['dims'][0])
    option_dict['dims'][1] = float(option_dict['dims'][1])
    option_dict['dims'][2] = float(option_dict['dims'][2])

    option_dict['start'][0] = int(option_dict['start'][0])
    option_dict['stop'][0] = int(option_dict['stop'][0])

    sele_opts = ['xx', 'zz', 'center_sele', 'xx_ref', 'zz_ref', 'water']

    for sele_opt in sele_opts:
        for i in range(len(option_dict[sele_opt]) - 1):
            option_dict[sele_opt][i] += " "

    return option_dict
Esempio n. 46
0
 def load_state_dict(self, state_dict: OrderedDict):
     for key, val in state_dict.items():
         self.variables_dict[key].load_data_in_place(val)
Esempio n. 47
0
def sg_ips(accounts):
    '''Collect trusted ips'''

    try:
        from mpl_toolkits.basemap import Basemap
    except:
        print(
            "ERROR: You must install basemap for mpl_toolkits. There is no pip for it."
        )
        print("See https://matplotlib.org/basemap/users/installing.html")
        print("\nSteps:")
        print("mkdir -p tmp; cd tmp")
        print(
            "curl https://codeload.github.com/matplotlib/basemap/tar.gz/v1.1.0 --output basemap-1.1.0.tar.gz"
        )
        print("tar -zxvf basemap-1.1.0.tar.gz")
        print("cd basemap-1.1.0/")
        print("python setup.py install")
        print("cd ..")
        print("rm -rf basemap-1.1.0*")
        print("cd ..")
        exit(-1)

    import geoip2.database
    import matplotlib as mpl
    mpl.use('TkAgg')
    import matplotlib.pyplot as plt

    # Used to sort by country
    cidr_dict = {}

    # Locations for graphing
    latlong = {'longitude': [], 'latitude': []}

    try:
        asn_reader = geoip2.database.Reader('./data/GeoLite2-ASN.mmdb')
        city_reader = geoip2.database.Reader('./data/GeoLite2-City.mmdb')
    except:
        # geoip files do not exist.  Tell the user.
        print(
            "ERROR: You must download the geoip files GeoLite2-ASN.mmdb and GeoLite2-City.mmdb"
        )
        print(
            "from https://dev.maxmind.com/geoip/geoip2/geolite2/ and put them in ./data/"
        )
        print("\nSteps:")
        print("mkdir -p data; cd data")
        print("\n# Get city data")
        print(
            "curl http://geolite.maxmind.com/download/geoip/database/GeoLite2-City.tar.gz --output GeoLite2-City.tar.gz"
        )
        print("tar -zxvf GeoLite2-City.tar.gz")
        print("mv GeoLite2-City_*/GeoLite2-City.mmdb .")
        print("\n# Get ASN data")
        print(
            "curl http://geolite.maxmind.com/download/geoip/database/GeoLite2-ASN.tar.gz --output GeoLite2-ASN.tar.gz"
        )
        print("tar -zxvf GeoLite2-ASN.tar.gz")
        print("mv GeoLite2-ASN*/GeoLite2-ASN.mmdb .")
        print("\n# Clean up")
        print("rm -rf GeoLite2-City_*")
        print("rm -rf GeoLite2-ASN_*")
        print("rm -rf GeoLite2-*.tar.gz")
        print("cd ..")
        exit(-1)

    # Dictionary containing cidr as the key, and the security group descriptions
    # as set for the value
    cidrs = {}

    # Get the cidrs used by each account
    for account in accounts:
        get_cidrs_for_account(account, cidrs)

    # Get info about each cidr
    for cidr in cidrs:
        # Get description text from security groups
        description = ""
        if len(cidrs[cidr]) > 0:
            description = "|".join(cidrs[cidr])
        description = description.encode('ascii', 'ignore').decode('ascii')

        ip = IPNetwork(cidr)
        if ip.size > 2048:
            print('WARNING: Large CIDR {} contains {} IPs in it'.format(
                cidr, ip.size))

        # Look up the cidr in the databases
        location = city_reader.city(str(ip.ip))
        try:
            asn = asn_reader.asn(str(ip.ip))
            isp = asn.autonomous_system_organization
            # Convert to ascii
            isp = isp.encode('ascii', 'ignore').decode('ascii')
        except geoip2.errors.AddressNotFoundError:
            print('WARNING: Unknown CIDR {}'.format(cidr))
            isp = "Unknown"

        # Collect the longitude and latitude locations for graphing
        latlong['longitude'].append(location.location.longitude)
        latlong['latitude'].append(location.location.latitude)

        # Format the place name
        location_name_parts = []
        city = location.city.name
        state = location.subdivisions.most_specific.name
        country = location.country.name

        # Reduce amount of text
        if country == "United States":
            country = "US"
        elif country == "United Kingdom":
            country = "UK"

        if isp == "MCI Communications Services d/b/a Verizon Business":
            isp = "MCI"
        isp = isp.replace(", Inc.", "")
        isp = isp.replace(" Ltd. ", "")
        isp = isp.replace("Group PLC", "")
        isp = isp.replace("Akamai International B.V. ", "Akamai")

        if city is not None:
            city = city.encode('ascii', 'ignore').decode('ascii')
            location_name_parts.append(city)

        if state is not None:
            state = state.encode('ascii', 'ignore').decode('ascii')
            location_name_parts.append(state)

        if country is not None:
            country = country.encode('ascii', 'ignore').decode('ascii')
            location_name_parts.append(country)

        location_name = ', '.join(location_name_parts)
        if location_name == "":
            location_name = "Unknown"
        location_name = location_name.encode('ascii', 'ignore').decode('ascii')

        # Collect information about the cidrs in a way that can be sorted
        cidr_dict["{}-{}-{}-{}".format(country, state, city, cidr)] = {
            'cidr': cidr,
            'description': description,
            'location': location_name,
            'isp': isp
        }

    # Sort the cidrs
    sorted_cidrs = OrderedDict(sorted(cidr_dict.items()))

    # Print them in sorted order
    for _, cidr in sorted_cidrs.items():
        print('{}\t {}\t {}\t {}'.format(cidr['cidr'].ljust(18),
                                         cidr['description'].ljust(20),
                                         cidr['location'].ljust(50),
                                         cidr['isp']))

    # Save image
    fig, ax = plt.subplots()
    earth = Basemap(ax=ax)
    earth.drawcoastlines(color='#778877', linewidth=0.5)
    ax.scatter(
        latlong['longitude'],
        latlong['latitude'],
        15,  # size
        c='red',
        alpha=1,
        zorder=10)
    ax.set_xlabel("Trusted IP locations")
    fig.set_size_inches(8, 6)
    fig.savefig('trusted_ips.png', pad_inches=0, bbox_inches='tight')
    print("Image saved to {}".format(
        path.join(path.dirname(path.realpath('__file__')), 'trusted_ips.png')))
Esempio n. 48
0
def plot_horoscope(time, location):
    fig = plt.figure(figsize=(14, 8))

    plt.axis('equal')
    plt.axis('off')

    asc = get_ascendant(tt, loc_coord)
    print('Ascendant \t\t{:.2f} \t{}'.format(asc, astrol_coords(asc)))

    plt.scatter(
        35 * math.cos(math.radians((asc.value + (180 - asc.value)) % 360)),
        35 * math.sin(math.radians((asc.value + (180 - asc.value)) % 360)),
        marker='o',
        color='white',
        s=600,
        zorder=88)
    plt.text(35 * math.cos(math.radians(
        (asc.value + (180 - asc.value)) % 360)),
             35 * math.sin(math.radians(
                 (asc.value + (180 - asc.value)) % 360)),
             'Asc',
             fontsize=14,
             ha='center',
             va='center',
             color='0.5',
             zorder=89)

    dsc = ((asc.value + 180) % 360) * u.deg
    print('Descendant \t\t{:.2f} \t{}'.format(dsc, astrol_coords(dsc)))

    mid = get_midheaven(tt, loc_coord)
    print('Midheaven \t\t{:.2f} \t{}'.format(mid, astrol_coords(mid)))

    plt.scatter(
        35 * math.cos(math.radians((mid.value + (180 - asc.value)) % 360)),
        35 * math.sin(math.radians((mid.value + (180 - asc.value)) % 360)),
        marker='o',
        color='white',
        s=600,
        zorder=88)
    plt.text(35 * math.cos(math.radians(
        (mid.value + (180 - asc.value)) % 360)),
             35 * math.sin(math.radians(
                 (mid.value + (180 - asc.value)) % 360)),
             'Mᶜ',
             fontsize=14,
             ha='center',
             va='center',
             color='0.5',
             zorder=89)

    ic = ((mid.value + 180) % 360) * u.deg
    print('Imum coeli \t\t{:.2f} \t{}'.format(ic, astrol_coords(ic)))

    # Cycle over houses
    cusps = get_cusps(asc, mid)
    for i in range(len(cusps[:-1])):
        c = cusps[i]
        if i + 1 in [1, 4, 7, 10]:
            style = '-'
        else:
            style = ':'
        plt.plot([
            10 * math.cos(math.radians(c) % 360),
            40 * math.cos(math.radians((c) % 360))
        ], [
            10 * math.sin(math.radians(c) % 360),
            40 * math.sin(math.radians((c) % 360))
        ],
                 linestyle=style,
                 color='0.5')
        plt.text(20 * math.cos(math.radians((c + 15) % 360)),
                 20 * math.sin(math.radians((c + 15) % 360)),
                 int(i + 1) % 13,
                 fontsize=10,
                 ha='center',
                 va='center',
                 color='0.5')

    # Cycle over zodiac
    for i, s in zip([0, 30, 60, 90, 120, 150, 180, 210, 240, 270, 300, 330], [
            ' ♈ ', ' ♉ ', ' ♊ ', ' ♋ ', ' ♌ ', ' ♍ ', ' ♎ ', ' ♏ ', ' ♐ ',
            ' ♑ ', ' ♒ ', ' ♓ '
    ]):
        i += 180 - asc.value
        plt.plot([
            40 * math.cos(math.radians(
                (i) % 360)), 60 * math.cos(math.radians((i) % 360))
        ], [
            40 * math.sin(math.radians(
                (i) % 360)), 60 * math.sin(math.radians((i) % 360))
        ],
                 color='0.5')
        for j in [10, 20]:
            plt.plot([
                40 * math.cos(math.radians(
                    (i + j) % 360)), 44 * math.cos(math.radians((i + j) % 360))
            ], [
                40 * math.sin(math.radians(
                    (i + j) % 360)), 44 * math.sin(math.radians((i + j) % 360))
            ],
                     linestyle='-',
                     color='0.5')
        for j in [5, 15, 25]:
            plt.plot([
                40 * math.cos(math.radians(
                    (i + j) % 360)), 42 * math.cos(math.radians((i + j) % 360))
            ], [
                40 * math.sin(math.radians(
                    (i + j) % 360)), 42 * math.sin(math.radians((i + j) % 360))
            ],
                     linestyle='-',
                     color='0.5')
        for j in [
                1, 2, 3, 4, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17, 18, 19, 21, 22,
                23, 24, 26, 27, 28, 29
        ]:
            plt.plot([
                40 * math.cos(math.radians(
                    (i + j) % 360)), 41 * math.cos(math.radians((i + j) % 360))
            ], [
                40 * math.sin(math.radians(
                    (i + j) % 360)), 41 * math.sin(math.radians((i + j) % 360))
            ],
                     linestyle='-',
                     color='0.5')

        if s in [' ♈ ', ' ♌ ', ' ♐ ']:
            element = 'tomato'  # fire
        elif s in [' ♉ ', ' ♍ ', ' ♑ ']:
            element = 'darkseagreen'  # earth
        elif s in [' ♊ ', ' ♎ ', ' ♒ ']:
            element = 'goldenrod'  # air
        elif s in [' ♋ ', ' ♏ ', ' ♓ ']:
            element = 'royalblue'  # water

        plt.text(50 * math.cos(math.radians((i + 15) % 360)),
                 50 * math.sin(math.radians((i + 15) % 360)),
                 s,
                 fontsize=30,
                 ha='center',
                 va='center',
                 color=element)

    # circles
    plot_circle(10)
    plot_circle(40)
    plot_circle(60)

    #planets = ['☉ Sun', '☾ Moon', '☿ Mercury', '♀ Venus', '♂ Mars', '♃ Jupiter', '♄ Saturn', '⛢ Uranus', '♆ Neptune']

    planets = {
        'Sun': {
            'symbol': ' ☉ ',
            'pos': 1
        },
        'Moon': {
            'symbol': ' ☾ ',
            'pos': 2
        },
        'Mercury': {
            'symbol': ' ☿ ',
            'pos': 3
        },
        'Venus': {
            'symbol': ' ♀ ',
            'pos': 4
        },
        'Mars': {
            'symbol': ' ♂ ',
            'pos': 5
        },
        'Jupiter': {
            'symbol': ' ♃ ',
            'pos': 6
        },
        'Saturn': {
            'symbol': ' ♄ ',
            'pos': 7
        },
        'Uranus': {
            'symbol': ' ♅ ',
            'pos': 8
        },
        'Neptune': {
            'symbol': ' ♆ ',
            'pos': 9
        },
    }

    planets = OrderedDict(sorted(planets.items(), key=lambda t: t[1]['pos']))

    # Cycle over planets
    for name in planets:
        coord = get_body(name, tt)
        coord.equinox = tt

        planets[name]['coord'] = coord
        planets[name]['ang'] = coord.geocentrictrueecliptic.lon
        planets[name]['sign'] = get_zodiac(planets[name]['ang'])
        planets[name]['astrol'] = astrol_coords(planets[name]['ang'])
        planets[name]['house'] = get_house(planets[name]['ang'], asc, mid)

    txt = ''
    for name in planets:
        print('{: >10}\t\t{:.2f} \t{} \t{:.0f}'.format(name,
                                                       planets[name]['ang'],
                                                       planets[name]['astrol'],
                                                       planets[name]['house']))

        planets[name]['x_pos'] = math.cos(
            math.radians(planets[name]['ang'].deg + (180 - asc.value)))
        planets[name]['y_pos'] = math.sin(
            math.radians(planets[name]['ang'].deg + (180 - asc.value)))

        plt.scatter(40 * planets[name]['x_pos'],
                    40 * planets[name]['y_pos'],
                    marker='o',
                    color='white',
                    s=300,
                    zorder=99)
        # plt.text(40*planets[name]['x_pos']+2,
        #          40*planets[name]['y_pos'],
        #          name+'\n'+planets[name]['astrol']+'\n'+str(planets[name]['house']),
        #          zorder=999)
        plt.text(40 * planets[name]['x_pos'],
                 40 * planets[name]['y_pos'],
                 planets[name]['symbol'],
                 fontsize=15,
                 color='orange',
                 ha='center',
                 va='center',
                 zorder=999)

        #txt += planets[name]['symbol']+' '+name+'\t'+planets[name]['astrol']+'\t'+str(planets[name]['house'])+'\n'
        txt += '{} {: <10}  {}  H{:.0f}\n'.format(planets[name]['symbol'],
                                                  name,
                                                  planets[name]['astrol'],
                                                  planets[name]['house'])

    plt.text(60, 40, txt, zorder=999, fontname='DejaVu Sans Mono')

    # Aspects
    aspects = {
        'conjunction': {
            'angle': 0,
            'color': 'green'
        },
        'sextile': {
            'angle': 60,
            'color': 'lightseagreen'
        },
        'square': {
            'angle': 90,
            'color': 'red'
        },
        'trine': {
            'angle': 120,
            'color': 'blue'
        },
        'opposition': {
            'angle': 180,
            'color': 'purple'
        },
    }
    aspects = OrderedDict(sorted(aspects.items(), key=lambda t: t[1]['angle']))
    all_aspects = []
    for name in planets:
        ang = planets[name]['ang']
        for name2 in planets:
            if name2 == name:
                continue
            ang2 = planets[name2]['ang']
            aspect = abs(ang.value - ang2.value)
            if aspect > 180:
                aspect = 360 - aspect
            olim = 8
            if name in ['Sun', 'Moon'] or name2 in ['Sun', 'Moon']:
                olim = 10

            for aname in aspects:
                if aspects[aname]['angle'] - olim <= abs(
                        aspect) <= aspects[aname]['angle'] + olim:
                    n1, n2 = sorted([name, name2])

                    string = '{} and {} in {} orb {:.0f}°'.format(
                        n1, n2, aname, aspect - aspects[aname]['angle'])

                    if string not in all_aspects:
                        print(string)

                        plt.plot([
                            40 * planets[name]['x_pos'],
                            40 * planets[name2]['x_pos']
                        ], [
                            40 * planets[name]['y_pos'],
                            40 * planets[name2]['y_pos']
                        ],
                                 color=aspects[aname]['color'])

                        all_aspects += [string]

    asp_txt = '\n'.join(sorted(all_aspects))

    plt.text(60, -70, asp_txt, zorder=999, fontname='DejaVu Sans Mono')

    plt.tight_layout()
    plt.show()
Esempio n. 49
0
def dash_report(
    info=None,
    sessions=None,
    tags=None,
    signals=None,
    recreate_plots=None,
    video_only=None,
):
    """Create a web report dash app.

    Parameters
    ----------

    info : dict
        patient info
    sessions : list
        list of session dirs
    tags : list
        tags for dynamic gait trials
    signals : ProgressSignals
        instance of ProgressSignals, used to send progress updates across threads
    recreate_plots : bool
        force recreation of report
    video_only : bool
        Create a video-only report. C3D data will not be read.
    """

    if recreate_plots is None:
        recreate_plots = False

    if video_only is None:
        video_only = False

    # relative width of left panel (1-12)
    # 3-session comparison uses narrower video panel
    # LEFT_WIDTH = 8 if len(sessions) == 3 else 7
    LEFT_WIDTH = 8
    VIDS_TOTAL_HEIGHT = 88  # % of browser window height

    if len(sessions) < 1 or len(sessions) > 3:
        raise ValueError('Need a list of one to three sessions')
    is_comparison = len(sessions) > 1
    report_name = _report_name(sessions)
    info = info or sessionutils.default_info()

    # tags for dynamic trials
    # if doing a comparison, pick representative trials only
    dyn_tags = tags or (cfg.eclipse.repr_tags if is_comparison else cfg.eclipse.tags)
    # this tag will be shown in the menu for static trials
    static_tag = 'Static'

    # get the camera labels
    # reduce to a set, since there may be several labels for given id
    camera_labels = set(cfg.general.camera_labels.values())
    # add camera labels for overlay videos
    # XXX: may cause trouble if labels already contain the string 'overlay'
    camera_labels_overlay = [lbl + ' overlay' for lbl in camera_labels]
    camera_labels.update(camera_labels_overlay)
    # build dict of videos for given tag / camera label
    # videos will be listed in session order
    vid_urls = dict()
    all_tags = dyn_tags + [static_tag] + cfg.eclipse.video_tags
    for tag in all_tags:
        vid_urls[tag] = dict()
        for camera_label in camera_labels:
            vid_urls[tag][camera_label] = list()

    # collect all session enfs into dict
    enfs = {session: dict() for session in sessions}
    data_enfs = list()  # enfs that are used for data
    signals.progress.emit('Collecting trials...', 0)
    for session in sessions:
        if signals.canceled:
            return None
        enfs[session] = dict(dynamic=dict(), static=dict(), vid_only=dict())
        # collect dynamic trials for each tag
        for tag in dyn_tags:
            dyns = sessionutils.get_enfs(session, tags=tag, trial_type='dynamic')
            if len(dyns) > 1:
                logger.warning('multiple tagged trials (%s) for %s' % (tag, session))
            dyn_trial = dyns[-1:]
            enfs[session]['dynamic'][tag] = dyn_trial  # may be empty list
            if dyn_trial:
                data_enfs.extend(dyn_trial)
        # require at least one dynamic trial for each session
        if not any(enfs[session]['dynamic'][tag] for tag in dyn_tags):
            raise GaitDataError('No tagged dynamic trials found for %s' % (session))
        # collect static trial (at most 1 per session)
        # -prefer enfs that have a corresponding c3d file, even for a video-only report
        # (so that the same static gets used for both video-only and full reports)
        # -prefer the newest enf file
        sts = sessionutils.get_enfs(session, trial_type='static')
        for st in reversed(sts):  # newest first
            st_c3d = sessionutils.enf_to_trialfile(st, '.c3d')
            if op.isfile(st_c3d):
                static_trial = [st]
                break
        else:
            # no c3ds were found - just pick the latest static trial
            static_trial = sts[-1:]
        enfs[session]['static'][static_tag] = static_trial
        if static_trial:
            data_enfs.extend(static_trial)
        # collect video-only dynamic trials
        for tag in cfg.eclipse.video_tags:
            dyn_vids = sessionutils.get_enfs(session, tags=tag)
            if len(dyn_vids) > 1:
                logger.warning(
                    'multiple tagged video-only trials (%s) for %s' % (tag, session)
                )
            enfs[session]['vid_only'][tag] = dyn_vids[-1:]

    # collect all videos for given tag and camera, listed in session order
    signals.progress.emit('Finding videos...', 0)
    for session in sessions:
        for trial_type in enfs[session]:
            for tag, enfs_this in enfs[session][trial_type].items():
                if enfs_this:
                    enf = enfs_this[0]  # only one enf per tag and session
                    for camera_label in camera_labels:
                        overlay = 'overlay' in camera_label
                        real_camera_label = (
                            camera_label[: camera_label.find(' overlay')]
                            if overlay
                            else camera_label
                        )
                        c3d = enf_to_trialfile(enf, 'c3d')
                        vids_this = videos.get_trial_videos(
                            c3d,
                            camera_label=real_camera_label,
                            vid_ext='.ogv',
                            overlay=overlay,
                        )
                        if vids_this:
                            vid = vids_this[0]
                            url = '/static/%s' % op.split(vid)[1]
                            vid_urls[tag][camera_label].append(url)

    # build dcc.Dropdown options list for cameras and tags
    # list cameras which have videos for any tag
    opts_cameras = list()
    for camera_label in sorted(camera_labels):
        if any(vid_urls[tag][camera_label] for tag in all_tags):
            opts_cameras.append({'label': camera_label, 'value': camera_label})
    # list tags which have videos for any camera
    opts_tags = list()
    for tag in all_tags:
        if any(vid_urls[tag][camera_label] for camera_label in camera_labels):
            opts_tags.append({'label': '%s' % tag, 'value': tag})
    # add null entry in case we got no videos at all
    if not opts_tags:
        opts_tags.append({'label': 'No videos', 'value': 'no videos', 'disabled': True})

    # this whole section is only needed if we have c3d data
    if not video_only:
        # see whether we can load report figures from disk
        data_c3ds = [enf_to_trialfile(enffile, 'c3d') for enffile in data_enfs]
        digest = numutils.files_digest(data_c3ds)
        logger.debug('report data digest: %s' % digest)
        # data is always saved into alphabetically first session
        data_dir = sorted(sessions)[0]
        data_fn = op.join(data_dir, 'web_report_%s.dat' % digest)
        if op.isfile(data_fn) and not recreate_plots:
            logger.debug('loading saved report data from %s' % data_fn)
            signals.progress.emit('Loading saved report...', 0)
            with open(data_fn, 'rb') as f:
                saved_report_data = pickle.load(f)
        else:
            saved_report_data = dict()
            logger.debug('no saved data found or recreate forced')

        # make Trial instances for all dynamic and static trials
        # this is currently needed even if saved report is used
        trials_dyn = list()
        trials_static = list()
        _trials_avg = dict()
        for session in sessions:
            _trials_avg[session] = list()
            for tag in dyn_tags:
                if enfs[session]['dynamic'][tag]:
                    if signals.canceled:
                        return None
                    c3dfile = enf_to_trialfile(enfs[session]['dynamic'][tag][0], 'c3d')
                    tri = Trial(c3dfile)
                    trials_dyn.append(tri)
                    _trials_avg[session].append(tri)
            if enfs[session]['static'][static_tag]:
                c3dfile = enf_to_trialfile(enfs[session]['static']['Static'][0], 'c3d')
                tri = Trial(c3dfile)
                trials_static.append(tri)

        emg_layout = None
        tibial_torsion = dict()

        # stuff that's needed to (re)create the figures
        if not saved_report_data:
            age = None
            if info['hetu'] is not None:
                # compute subject age at session time
                session_dates = [
                    sessionutils.get_session_date(session) for session in sessions
                ]
                ages = [age_from_hetu(info['hetu'], d) for d in session_dates]
                age = max(ages)

            # create Markdown text for patient info
            patient_info_text = '##### %s ' % (
                info['fullname'] if info['fullname'] else 'Name unknown'
            )
            if info['hetu']:
                patient_info_text += '(%s)' % info['hetu']
            patient_info_text += '\n\n'
            # if age:
            #     patient_info_text += 'Age at measurement time: %d\n\n' % age
            if info['report_notes']:
                patient_info_text += info['report_notes']

            model_normaldata = dict()
            avg_trials = list()

            # load normal data for gait models
            signals.progress.emit('Loading normal data...', 0)
            for fn in cfg.general.normaldata_files:
                ndata = normaldata.read_normaldata(fn)
                model_normaldata.update(ndata)
            if age is not None:
                age_ndata_file = normaldata.normaldata_age(age)
                if age_ndata_file:
                    age_ndata = normaldata.read_normaldata(age_ndata_file)
                    model_normaldata.update(age_ndata)

            # make average trials for each session
            avg_trials = [
                AvgTrial.from_trials(_trials_avg[session], sessionpath=session)
                for session in sessions
            ]
            # read some extra data from trials and create supplementary data
            for tr in trials_dyn:
                # read tibial torsion for each trial and make supplementary traces
                # these will only be shown for KneeAnglesZ (knee rotation) variable
                tors = dict()
                tors['R'], tors['L'] = (
                    tr.subj_params['RTibialTorsion'],
                    tr.subj_params['LTibialTorsion'],
                )
                if tors['R'] is None or tors['L'] is None:
                    logger.warning(
                        'could not read tibial torsion values from %s' % tr.trialname
                    )
                    continue
                # include torsion info for all cycles; this is useful when plotting
                # isolated cycles
                max_cycles = cfg.plot.max_cycles['model']
                cycs = tr.get_cycles(cfg.plot.default_cycles['model'])[:max_cycles]

                for cyc in cycs:
                    tibial_torsion[cyc] = dict()
                    for ctxt in tors:
                        var_ = ctxt + 'KneeAnglesZ'
                        tibial_torsion[cyc][var_] = dict()
                        # x = % of gait cycle
                        tibial_torsion[cyc][var_]['t'] = np.arange(101)
                        # static tibial torsion value as function of x
                        # convert radians -> degrees
                        tibial_torsion[cyc][var_]['data'] = (
                            np.ones(101) * tors[ctxt] / np.pi * 180
                        )
                        tibial_torsion[cyc][var_]['label'] = 'Tib. tors. (%s) % s' % (
                            ctxt,
                            tr.trialname,
                        )

                # in EMG layout, keep chs that are active in any of the trials
                signals.progress.emit('Reading EMG data', 0)
                try:
                    emgs = [tr.emg for tr in trials_dyn]
                    emg_layout = layouts.rm_dead_channels_multitrial(
                        emgs, cfg.layouts.std_emg
                    )
                    if not emg_layout:
                        emg_layout = 'disabled'
                except GaitDataError:
                    emg_layout = 'disabled'

        # define layouts
        # FIXME: should be definable in config
        _layouts = OrderedDict(
            [
                ('Patient info', 'patient_info'),
                ('Kinematics', cfg.layouts.lb_kinematics),
                ('Kinematics average', 'kinematics_average'),
                ('Static kinematics', 'static_kinematics'),
                ('Static EMG', 'static_emg'),
                ('Kinematics + kinetics', cfg.layouts.lb_kin_web),
                ('Kinetics', cfg.layouts.lb_kinetics_web),
                ('EMG', emg_layout),
                ('Kinetics-EMG left', cfg.layouts.lb_kinetics_emg_l),
                ('Kinetics-EMG right', cfg.layouts.lb_kinetics_emg_r),
                ('Muscle length', cfg.layouts.musclelen),
                ('Torso kinematics', cfg.layouts.torso),
                ('Time-distance variables', 'time_dist'),
            ]
        )
        # pick desired single variables from model and append
        # Py2: dict merge below can be done more elegantly once Py2 is dropped
        pig_singlevars_ = models.pig_lowerbody.varlabels_noside.copy()
        pig_singlevars_.update(models.pig_lowerbody_kinetics.varlabels_noside)
        pig_singlevars = sorted(pig_singlevars_.items(), key=lambda item: item[1])
        singlevars = OrderedDict(
            [(varlabel, [[var]]) for var, varlabel in pig_singlevars]
        )
        _layouts.update(singlevars)

        # add supplementary data for normal layouts
        supplementary_default = dict()
        supplementary_default.update(tibial_torsion)

        dd_opts_multi_upper = list()
        dd_opts_multi_lower = list()

        # loop through the layouts, create or load figures
        report_data_new = dict()
        for k, (label, layout) in enumerate(_layouts.items()):
            signals.progress.emit('Creating plot: %s' % label, 100 * k / len(_layouts))
            if signals.canceled:
                return None
            # for comparison report, include session info in plot legends and
            # use session specific line style
            emg_mode = None
            if is_comparison:
                legend_type = cfg.web_report.comparison_legend_type
                style_by = cfg.web_report.comparison_style_by
                color_by = cfg.web_report.comparison_color_by
                if cfg.web_report.comparison_emg_rms:
                    emg_mode = 'rms'
            else:
                legend_type = cfg.web_report.legend_type
                style_by = cfg.web_report.style_by
                color_by = cfg.web_report.color_by

            try:
                if saved_report_data:
                    logger.debug('loading %s from saved report data' % label)
                    if label not in saved_report_data:
                        # will be caught, resulting in empty menu item
                        raise RuntimeError
                    else:
                        figdata = saved_report_data[label]
                else:
                    logger.debug('creating figure data for %s' % label)
                    if isinstance(layout, basestring):  # handle special layout codes
                        if layout == 'time_dist':
                            figdata = timedist.do_comparison_plot(
                                sessions, big_fonts=True, backend='plotly'
                            )
                        elif layout == 'patient_info':
                            figdata = patient_info_text
                        elif layout == 'static_kinematics':
                            layout_ = cfg.layouts.lb_kinematics
                            figdata = plot_trials(
                                trials_static,
                                layout_,
                                model_normaldata=False,
                                cycles='unnormalized',
                                legend_type='short_name_with_cyclename',
                                style_by=style_by,
                                color_by=color_by,
                                big_fonts=True,
                            )
                        elif layout == 'static_emg':
                            layout_ = cfg.layouts.std_emg
                            figdata = plot_trials(
                                trials_static,
                                layout_,
                                model_normaldata=False,
                                cycles='unnormalized',
                                legend_type='short_name_with_cyclename',
                                style_by=style_by,
                                color_by=color_by,
                                big_fonts=True,
                            )
                        elif layout == 'kinematics_average':
                            layout_ = cfg.layouts.lb_kinematics
                            figdata = plot_trials(
                                avg_trials,
                                layout_,
                                style_by=style_by,
                                color_by=color_by,
                                model_normaldata=model_normaldata,
                                big_fonts=True,
                            )
                        elif layout == 'disabled':
                            # will be caught, resulting in empty menu item
                            raise RuntimeError
                        else:  # unrecognized layout; this is not caught by us
                            raise Exception('Unrecognized layout: %s' % layout)

                    else:  # regular gaitutils layout
                        figdata = plot_trials(
                            trials_dyn,
                            layout,
                            model_normaldata=model_normaldata,
                            emg_mode=emg_mode,
                            legend_type=legend_type,
                            style_by=style_by,
                            color_by=color_by,
                            supplementary_data=supplementary_default,
                            big_fonts=True,
                        )
                # save newly created data
                if not saved_report_data:
                    if isinstance(figdata, go.Figure):
                        # serialize go.Figures before saving
                        # this makes them much faster for pickle to handle
                        # apparently dcc.Graph can eat the serialized json directly,
                        # so no need to do anything on load
                        figdata_ = figdata.to_plotly_json()
                    else:
                        figdata_ = figdata
                    report_data_new[label] = figdata_

                # make the upper and lower panel graphs from figdata, depending
                # on data type
                def _is_base64(s):
                    try:
                        return base64.b64encode(base64.b64decode(s)) == s
                    except Exception:
                        return False

                # this is for old style timedist figures that were in base64
                # encoded svg
                if layout == 'time_dist' and _is_base64(figdata):
                    graph_upper = html.Img(
                        src='data:image/svg+xml;base64,{}'.format(figdata),
                        id='gaitgraph%d' % k,
                        style={'height': '100%'},
                    )
                    graph_lower = html.Img(
                        src='data:image/svg+xml;base64,{}'.format(figdata),
                        id='gaitgraph%d' % (len(_layouts) + k),
                        style={'height': '100%'},
                    )
                elif layout == 'patient_info':
                    graph_upper = dcc.Markdown(figdata)
                    graph_lower = graph_upper
                else:
                    # plotly fig -> dcc.Graph
                    graph_upper = dcc.Graph(
                        figure=figdata, id='gaitgraph%d' % k, style={'height': '100%'}
                    )
                    graph_lower = dcc.Graph(
                        figure=figdata,
                        id='gaitgraph%d' % (len(_layouts) + k),
                        style={'height': '100%'},
                    )
                dd_opts_multi_upper.append({'label': label, 'value': graph_upper})
                dd_opts_multi_lower.append({'label': label, 'value': graph_lower})

            except (RuntimeError, GaitDataError) as e:  # could not create a figure
                logger.warning(u'failed to create figure for %s: %s' % (label, e))
                # insert the menu options but make them disabled
                dd_opts_multi_upper.append(
                    {'label': label, 'value': label, 'disabled': True}
                )
                dd_opts_multi_lower.append(
                    {'label': label, 'value': label, 'disabled': True}
                )
                continue

        opts_multi, mapper_multi_upper = _make_dropdown_lists(dd_opts_multi_upper)
        opts_multi, mapper_multi_lower = _make_dropdown_lists(dd_opts_multi_lower)

        # if plots were newly created, save them to disk
        if not saved_report_data:
            logger.debug('saving report data into %s' % data_fn)
            signals.progress.emit('Saving report data to disk...', 99)
            with open(data_fn, 'wb') as f:
                pickle.dump(report_data_new, f, protocol=-1)

    def make_left_panel(split=True, upper_value='Kinematics', lower_value='Kinematics'):
        """Helper to make the left graph panels. If split=True, make two stacked panels"""

        # the upper graph & dropdown
        items = [
            dcc.Dropdown(
                id='dd-vars-upper-multi',
                clearable=False,
                options=opts_multi,
                value=upper_value,
            ),
            html.Div(
                id='div-upper', style={'height': '50%'} if split else {'height': '100%'}
            ),
        ]

        if split:
            # add the lower one
            items.extend(
                [
                    dcc.Dropdown(
                        id='dd-vars-lower-multi',
                        clearable=False,
                        options=opts_multi,
                        value=lower_value,
                    ),
                    html.Div(id='div-lower', style={'height': '50%'}),
                ]
            )

        return html.Div(items, style={'height': '80vh'})

    # create the app
    app = dash.Dash('gaitutils')
    # use local packaged versions of JavaScript libs etc. (no internet needed)
    app.css.config.serve_locally = True
    app.scripts.config.serve_locally = True
    app.title = _report_name(sessions, long_name=False)

    # this is for generating the classnames in the CSS
    num2words = {
        1: 'one',
        2: 'two',
        3: 'three',
        4: 'four',
        5: 'five',
        6: 'six',
        7: 'seven',
        8: 'eight',
        9: 'nine',
        10: 'ten',
        11: 'eleven',
        12: 'twelve',
    }
    classname_left = '%s columns' % num2words[LEFT_WIDTH]
    classname_right = '%s columns' % num2words[12 - LEFT_WIDTH]

    if video_only:
        app.layout = html.Div(
            [  # row
                html.Div(
                    [  # single main div
                        dcc.Dropdown(
                            id='dd-camera',
                            clearable=False,
                            options=opts_cameras,
                            value='Front camera',
                        ),
                        dcc.Dropdown(
                            id='dd-video-tag',
                            clearable=False,
                            options=opts_tags,
                            value=opts_tags[0]['value'],
                        ),
                        html.Div(id='videos'),
                    ],
                    className='12 columns',
                ),
            ],
            className='row',
        )
    else:  # the two-panel layout with graphs and video
        app.layout = html.Div(
            [  # row
                html.Div(
                    [  # left main div
                        html.H6(report_name),
                        dcc.Checklist(
                            id='split-left',
                            options=[{'label': 'Two panels', 'value': 'split'}],
                            value=[],
                        ),
                        # need split=True so that both panels are in initial layout
                        html.Div(make_left_panel(split=True), id='div-left-main'),
                    ],
                    className=classname_left,
                ),
                html.Div(
                    [  # right main div
                        dcc.Dropdown(
                            id='dd-camera',
                            clearable=False,
                            options=opts_cameras,
                            value='Front camera',
                        ),
                        dcc.Dropdown(
                            id='dd-video-tag',
                            clearable=False,
                            options=opts_tags,
                            value=opts_tags[0]['value'],
                        ),
                        html.Div(id='videos'),
                    ],
                    className=classname_right,
                ),
            ],
            className='row',
        )

        @app.callback(
            Output('div-left-main', 'children'),
            [Input('split-left', 'value')],
            [State('dd-vars-upper-multi', 'value')],
        )
        def update_panel_layout(split_panels, upper_value):
            split = 'split' in split_panels
            return make_left_panel(split, upper_value=upper_value)

        @app.callback(
            Output('div-upper', 'children'), [Input('dd-vars-upper-multi', 'value')]
        )
        def update_contents_upper_multi(sel_var):
            return mapper_multi_upper[sel_var]

        @app.callback(
            Output('div-lower', 'children'), [Input('dd-vars-lower-multi', 'value')]
        )
        def update_contents_lower_multi(sel_var):
            return mapper_multi_lower[sel_var]

    def _video_elem(title, url, max_height):
        """Create a video element with title"""
        if not url:
            return 'No video found'
        vid_el = html.Video(
            src=url,
            controls=True,
            loop=True,
            preload='auto',
            title=title,
            style={'max-height': max_height, 'max-width': '100%'},
        )
        # return html.Div([title, vid_el])  # titles above videos
        return vid_el

    @app.callback(
        Output('videos', 'children'),
        [Input('dd-camera', 'value'), Input('dd-video-tag', 'value')],
    )
    def update_videos(camera_label, tag):
        """Create a list of video divs according to camera and tag selection"""
        if tag == 'no videos':
            return 'No videos found'
        vid_urls_ = vid_urls[tag][camera_label]
        if not vid_urls_:
            return 'No videos found'
        nvids = len(vid_urls_)
        max_height = str(int(VIDS_TOTAL_HEIGHT / nvids)) + 'vh'
        return [_video_elem('video', url, max_height) for url in vid_urls_]

    # add a static route to serve session data. be careful outside firewalls
    @app.server.route('/static/<resource>')
    def serve_file(resource):
        for session in sessions:
            filepath = op.join(session, resource)
            if op.isfile(filepath):
                return flask.send_from_directory(session, resource)
        return None

    # add shutdown method - see http://flask.pocoo.org/snippets/67/
    @app.server.route('/shutdown')
    def shutdown():
        logger.debug('Received shutdown request...')
        _shutdown_server()
        return 'Server shutting down...'

    # inject some info of our own
    app._gaitutils_report_name = report_name

    # XXX: the Flask app ends up with a logger by the name of 'gaitutils', which has a default
    # stderr handler. since logger hierarchy corresponds to package hierarchy,
    # this creates a bug where all gaitutils package loggers propagate their messages into
    # the app logger and they get shown multiple times. as a dirty fix, we disable the
    # handlers for the app logger (they still get shown since they propagate to the root logger)
    app.logger.handlers = []

    return app
Esempio n. 50
0
def try_randomize_boss_rewards(self):
    if not self.options.get("progression_dungeons"):
        raise Exception(
            "Cannot randomize boss rewards when progress items are not allowed in dungeons."
        )

    boss_reward_items = []
    total_num_rewards = int(self.options.get("num_race_mode_dungeons"))

    unplaced_progress_items_degrouped = []
    for item_name in self.logic.unplaced_progress_items:
        if item_name in self.logic.progress_item_groups:
            unplaced_progress_items_degrouped += self.logic.progress_item_groups[
                item_name]
        else:
            unplaced_progress_items_degrouped.append(item_name)

    # Try to make all the rewards be Triforce Shards.
    # May not be possible if the player chose to start with too many shards.
    num_additional_rewards_needed = total_num_rewards
    triforce_shards = [
        item_name for item_name in unplaced_progress_items_degrouped
        if item_name.startswith("Triforce Shard ")
    ]
    self.rng.shuffle(triforce_shards)
    boss_reward_items += triforce_shards[0:num_additional_rewards_needed]

    # If we still need more rewards, use sword upgrades.
    # May still not fill up all 4 slots if the player starts with 8 shards and a sword.
    num_additional_rewards_needed = total_num_rewards - len(boss_reward_items)
    if num_additional_rewards_needed > 0:
        sword_upgrades = [
            item_name for item_name in unplaced_progress_items_degrouped
            if item_name == "Progressive Sword"
        ]
        boss_reward_items += sword_upgrades[0:num_additional_rewards_needed]

    # If we still need more rewards, use bow upgrades.
    # May still not fill up all 4 slots if the player starts with 8 shards and is in swordless mode.
    num_additional_rewards_needed = total_num_rewards - len(boss_reward_items)
    if num_additional_rewards_needed > 0:
        bow_upgrades = [
            item_name for item_name in unplaced_progress_items_degrouped
            if item_name == "Progressive Bow"
        ]
        boss_reward_items += bow_upgrades[0:num_additional_rewards_needed]

    possible_additional_rewards = [
        "Hookshot", "Progressive Shield", "Boomerang"
    ]

    # If we STILL need more rewards, use the Hookshot, Mirror Shield, and Boomerang.
    num_additional_rewards_needed = total_num_rewards - len(boss_reward_items)
    if num_additional_rewards_needed > 0:
        additional_rewards = [
            item_name for item_name in unplaced_progress_items_degrouped
            if item_name in possible_additional_rewards
        ]
        boss_reward_items += additional_rewards[
            0:num_additional_rewards_needed]

    self.rng.shuffle(boss_reward_items)

    if len(boss_reward_items) != total_num_rewards:
        raise Exception("Number of boss reward items is incorrect: " +
                        ", ".join(boss_reward_items))

    possible_boss_locations = [
        loc for loc in self.logic.remaining_item_locations
        if self.logic.item_locations[loc]["Original item"] == "Heart Container"
    ]

    if len(possible_boss_locations) != 6:
        raise Exception("Number of boss item locations is incorrect: " +
                        ", ".join(possible_boss_locations))

    boss_reward_locations = OrderedDict()

    # Decide what reward item to place in each boss location.
    for item_name in boss_reward_items:
        if self.dungeons_only_start and "Dragon Roost Cavern - Gohma Heart Container" in possible_boss_locations:
            location_name = "Dragon Roost Cavern - Gohma Heart Container"
        elif self.dungeons_only_start and "Forbidden Woods - Kalle Demos Heart Container" in possible_boss_locations:
            location_name = "Forbidden Woods - Kalle Demos Heart Container"
        else:
            location_name = self.rng.choice(possible_boss_locations)
        possible_boss_locations.remove(location_name)
        boss_reward_locations[location_name] = item_name

    # Verify that the dungeon boss rewards were placed in a way that allows them all to be accessible.
    locations_valid = validate_boss_reward_locations(self,
                                                     boss_reward_locations)

    # If the dungeon boss reward locations are not valid, a new set of dungeon boss reward locations will be generated.
    if not locations_valid:
        return False

    # Remove any Triforce Shards we're about to use from the progress item group, and add them as ungrouped progress items instead.
    for group_name, group_item_names in self.logic.progress_item_groups.items(
    ):
        items_to_remove_from_group = [
            item_name for item_name in group_item_names
            if item_name in boss_reward_items
        ]
        for item_name in items_to_remove_from_group:
            self.logic.progress_item_groups[group_name].remove(item_name)
        if group_name in self.logic.unplaced_progress_items:
            for item_name in items_to_remove_from_group:
                self.logic.unplaced_progress_items.append(item_name)

        if len(self.logic.progress_item_groups[group_name]) == 0:
            if group_name in self.logic.unplaced_progress_items:
                self.logic.unplaced_progress_items.remove(group_name)

    for location_name, item_name in boss_reward_locations.items():
        self.logic.set_prerandomization_item_location(location_name, item_name)
        self.race_mode_required_locations.append(location_name)

        dungeon_name, _ = self.logic.split_location_name_by_zone(location_name)
        self.race_mode_required_dungeons.append(dungeon_name)

    banned_dungeons = []
    for boss_location_name in possible_boss_locations:
        dungeon_name, _ = self.logic.split_location_name_by_zone(
            boss_location_name)
        banned_dungeons.append(dungeon_name)

    for location_name in self.logic.item_locations:
        zone_name, _ = self.logic.split_location_name_by_zone(location_name)
        if self.logic.is_dungeon_location(
                location_name) and zone_name in banned_dungeons:
            self.race_mode_banned_locations.append(location_name)
        elif location_name == "Mailbox - Letter from Orca" and "Forbidden Woods" in banned_dungeons:
            self.race_mode_banned_locations.append(location_name)
        elif location_name == "Mailbox - Letter from Baito" and "Earth Temple" in banned_dungeons:
            self.race_mode_banned_locations.append(location_name)
        elif location_name == "Mailbox - Letter from Aryll" and "Forsaken Fortress" in banned_dungeons:
            self.race_mode_banned_locations.append(location_name)
        elif location_name == "Mailbox - Letter from Tingle" and "Forsaken Fortress" in banned_dungeons:
            self.race_mode_banned_locations.append(location_name)

    return True
Esempio n. 51
0
def init_func_preproc_wf(
    aroma_melodic_dim,
    bold2t1w_dof,
    bold_file,
    cifti_output,
    debug,
    dummy_scans,
    err_on_aroma_warn,
    fmap_bspline,
    fmap_demean,
    force_syn,
    freesurfer,
    ignore,
    low_mem,
    medial_surface_nan,
    omp_nthreads,
    output_dir,
    output_spaces,
    regressors_all_comps,
    regressors_dvars_th,
    regressors_fd_th,
    reportlets_dir,
    t2s_coreg,
    use_aroma,
    use_bbr,
    use_syn,
    layout=None,
    num_bold=1,
):
    """
    This workflow controls the functional preprocessing stages of FMRIPREP.

    .. workflow::
        :graph2use: orig
        :simple_form: yes

        from fmriprep.workflows.bold import init_func_preproc_wf
        from collections import namedtuple, OrderedDict
        BIDSLayout = namedtuple('BIDSLayout', ['root'])
        wf = init_func_preproc_wf(
            aroma_melodic_dim=-200,
            bold2t1w_dof=9,
            bold_file='/completely/made/up/path/sub-01_task-nback_bold.nii.gz',
            cifti_output=False,
            debug=False,
            dummy_scans=None,
            err_on_aroma_warn=False,
            fmap_bspline=True,
            fmap_demean=True,
            force_syn=True,
            freesurfer=True,
            ignore=[],
            low_mem=False,
            medial_surface_nan=False,
            omp_nthreads=1,
            output_dir='.',
            output_spaces=OrderedDict([
                ('MNI152Lin', {}), ('fsaverage', {'density': '10k'}),
                ('T1w', {}), ('fsnative', {})]),
            regressors_all_comps=False,
            regressors_dvars_th=1.5,
            regressors_fd_th=0.5,
            reportlets_dir='.',
            t2s_coreg=False,
            use_aroma=False,
            use_bbr=True,
            use_syn=True,
            layout=BIDSLayout('.'),
            num_bold=1,
        )

    **Parameters**

        aroma_melodic_dim : int
            Maximum number of components identified by MELODIC within ICA-AROMA
            (default is -200, ie. no limitation).
        bold2t1w_dof : 6, 9 or 12
            Degrees-of-freedom for BOLD-T1w registration
        bold_file : str
            BOLD series NIfTI file
        cifti_output : bool
            Generate bold CIFTI file in output spaces
        debug : bool
            Enable debugging outputs
        dummy_scans : int or None
            Number of volumes to consider as non steady state
        err_on_aroma_warn : bool
            Do not crash on ICA-AROMA errors
        fmap_bspline : bool
            **Experimental**: Fit B-Spline field using least-squares
        fmap_demean : bool
            Demean voxel-shift map during unwarp
        force_syn : bool
            **Temporary**: Always run SyN-based SDC
        freesurfer : bool
            Enable FreeSurfer functional registration (bbregister) and resampling
            BOLD series to FreeSurfer surface meshes.
        ignore : list
            Preprocessing steps to skip (may include "slicetiming", "fieldmaps")
        low_mem : bool
            Write uncompressed .nii files in some cases to reduce memory usage
        medial_surface_nan : bool
            Replace medial wall values with NaNs on functional GIFTI files
        omp_nthreads : int
            Maximum number of threads an individual process may use
        output_dir : str
            Directory in which to save derivatives
        output_spaces : OrderedDict
            Ordered dictionary where keys are TemplateFlow ID strings (e.g. ``MNI152Lin``,
            ``MNI152NLin6Asym``, ``MNI152NLin2009cAsym``, or ``fsLR``) strings designating
            nonstandard references (e.g. ``T1w`` or ``anat``, ``sbref``, ``run``, etc.),
            or paths pointing to custom templates organized in a TemplateFlow-like structure.
            Values of the dictionary aggregate modifiers (e.g. the value for the key ``MNI152Lin``
            could be ``{'resolution': 2}`` if one wants the resampling to be done on the 2mm
            resolution version of the selected template).
        regressors_all_comps
            Return all CompCor component time series instead of the top fraction
        regressors_dvars_th
            Criterion for flagging DVARS outliers
        regressors_fd_th
            Criterion for flagging framewise displacement outliers
        reportlets_dir : str
            Absolute path of a directory in which reportlets will be temporarily stored
        t2s_coreg : bool
            For multiecho EPI, use the calculated T2*-map for T2*-driven coregistration
        use_aroma : bool
            Perform ICA-AROMA on MNI-resampled functional series
        use_bbr : bool or None
            Enable/disable boundary-based registration refinement.
            If ``None``, test BBR result for distortion before accepting.
            When using ``t2s_coreg``, BBR will be enabled by default unless
            explicitly specified otherwise.
        use_syn : bool
            **Experimental**: Enable ANTs SyN-based susceptibility distortion correction (SDC).
            If fieldmaps are present and enabled, this is not run, by default.
        layout : BIDSLayout
            BIDSLayout structure to enable metadata retrieval
        num_bold : int
            Total number of BOLD files that have been set for preprocessing
            (default is 1)

    **Inputs**

        bold_file
            BOLD series NIfTI file
        t1_preproc
            Bias-corrected structural template image
        t1_brain
            Skull-stripped ``t1_preproc``
        t1_mask
            Mask of the skull-stripped template image
        t1_seg
            Segmentation of preprocessed structural image, including
            gray-matter (GM), white-matter (WM) and cerebrospinal fluid (CSF)
        t1_tpms
            List of tissue probability maps in T1w space
        anat2std_xfm
            ANTs-compatible affine-and-warp transform file
        std2anat_xfm
            ANTs-compatible affine-and-warp transform file (inverse)
        subjects_dir
            FreeSurfer SUBJECTS_DIR
        subject_id
            FreeSurfer subject ID
        t1_2_fsnative_forward_transform
            LTA-style affine matrix translating from T1w to FreeSurfer-conformed subject space
        t1_2_fsnative_reverse_transform
            LTA-style affine matrix translating from FreeSurfer-conformed subject space to T1w


    **Outputs**

        bold_t1
            BOLD series, resampled to T1w space
        bold_mask_t1
            BOLD series mask in T1w space
        bold_std
            BOLD series, resampled to template space
        bold_mask_std
            BOLD series mask in template space
        confounds
            TSV of confounds
        surfaces
            BOLD series, resampled to FreeSurfer surfaces
        aroma_noise_ics
            Noise components identified by ICA-AROMA
        melodic_mix
            FSL MELODIC mixing matrix
        bold_cifti
            BOLD CIFTI image
        cifti_variant
            combination of target spaces for `bold_cifti`


    **Subworkflows**

        * :py:func:`~fmriprep.workflows.bold.util.init_bold_reference_wf`
        * :py:func:`~fmriprep.workflows.bold.stc.init_bold_stc_wf`
        * :py:func:`~fmriprep.workflows.bold.hmc.init_bold_hmc_wf`
        * :py:func:`~fmriprep.workflows.bold.t2s.init_bold_t2s_wf`
        * :py:func:`~fmriprep.workflows.bold.registration.init_bold_t1_trans_wf`
        * :py:func:`~fmriprep.workflows.bold.registration.init_bold_reg_wf`
        * :py:func:`~fmriprep.workflows.bold.confounds.init_bold_confounds_wf`
        * :py:func:`~fmriprep.workflows.bold.confounds.init_ica_aroma_wf`
        * :py:func:`~fmriprep.workflows.bold.resampling.init_bold_std_trans_wf`
        * :py:func:`~fmriprep.workflows.bold.resampling.init_bold_preproc_trans_wf`
        * :py:func:`~fmriprep.workflows.bold.resampling.init_bold_surf_wf`
        * :py:func:`~fmriprep.workflows.fieldmap.pepolar.init_pepolar_unwarp_wf`
        * :py:func:`~fmriprep.workflows.fieldmap.init_fmap_estimator_wf`
        * :py:func:`~fmriprep.workflows.fieldmap.init_sdc_unwarp_wf`
        * :py:func:`~fmriprep.workflows.fieldmap.init_nonlinear_sdc_wf`

    """
    from .resampling import NONSTANDARD_REFERENCES
    from ..fieldmap.base import init_sdc_wf  # Avoid circular dependency (#1066)

    # Filter out standard spaces to a separate dict
    std_spaces = OrderedDict([(key, modifiers)
                              for key, modifiers in output_spaces.items()
                              if key not in NONSTANDARD_REFERENCES])
    volume_std_spaces = OrderedDict([(key, modifiers)
                                     for key, modifiers in std_spaces.items()
                                     if not key.startswith('fs')])

    ref_file = bold_file
    mem_gb = {'filesize': 1, 'resampled': 1, 'largemem': 1}
    bold_tlen = 10
    multiecho = isinstance(bold_file, list)

    if multiecho:
        tes = [layout.get_metadata(echo)['EchoTime'] for echo in bold_file]
        ref_file = dict(zip(tes, bold_file))[min(tes)]

    if os.path.isfile(ref_file):
        bold_tlen, mem_gb = _create_mem_gb(ref_file)

    wf_name = _get_wf_name(ref_file)
    LOGGER.log(
        25, ('Creating bold processing workflow for "%s" (%.2f GB / %d TRs). '
             'Memory resampled/largemem=%.2f/%.2f GB.'), ref_file,
        mem_gb['filesize'], bold_tlen, mem_gb['resampled'], mem_gb['largemem'])

    sbref_file = None
    # For doc building purposes
    if not hasattr(layout, 'parse_file_entities'):
        LOGGER.log(25, 'No valid layout: building empty workflow.')
        metadata = {
            'RepetitionTime': 2.0,
            'SliceTiming': [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
            'PhaseEncodingDirection': 'j',
        }
        fmaps = [{
            'suffix':
            'phasediff',
            'phasediff':
            'sub-03/ses-2/fmap/sub-03_ses-2_run-1_phasediff.nii.gz',
            'magnitude1':
            'sub-03/ses-2/fmap/sub-03_ses-2_run-1_magnitude1.nii.gz',
            'magnitude2':
            'sub-03/ses-2/fmap/sub-03_ses-2_run-1_magnitude2.nii.gz',
        }]
        run_stc = True
        multiecho = False
    else:
        # Find associated sbref, if possible
        entities = layout.parse_file_entities(ref_file)
        entities['suffix'] = 'sbref'
        entities['extension'] = ['nii', 'nii.gz']  # Overwrite extensions
        files = layout.get(return_type='file', **entities)
        refbase = os.path.basename(ref_file)
        if 'sbref' in ignore:
            LOGGER.info("Single-band reference files ignored.")
        elif files and multiecho:
            LOGGER.warning("Single-band reference found, but not supported in "
                           "multi-echo workflows at this time. Ignoring.")
        elif files:
            sbref_file = files[0]
            sbbase = os.path.basename(sbref_file)
            if len(files) > 1:
                LOGGER.warning(
                    "Multiple single-band reference files found for {}; using "
                    "{}".format(refbase, sbbase))
            else:
                LOGGER.log(
                    25, "Using single-band reference file {}".format(sbbase))
        else:
            LOGGER.log(25,
                       "No single-band-reference found for {}".format(refbase))

        metadata = layout.get_metadata(ref_file)

        # Find fieldmaps. Options: (phase1|phase2|phasediff|epi|fieldmap|syn)
        fmaps = []
        if 'fieldmaps' not in ignore:
            for fmap in layout.get_fieldmap(ref_file, return_list=True):
                if fmap['suffix'] == 'phase':
                    LOGGER.warning("""\
Found phase1/2 type of fieldmaps, which are not currently supported. \
fMRIPrep will discard them for susceptibility distortion correction. \
Please, follow up on this issue at \
https://github.com/poldracklab/fmriprep/issues/1655.""")
                else:
                    fmap['metadata'] = layout.get_metadata(
                        fmap[fmap['suffix']])
                    fmaps.append(fmap)

        # Run SyN if forced or in the absence of fieldmap correction
        if force_syn or (use_syn and not fmaps):
            fmaps.append({'suffix': 'syn'})

        # Short circuits: (True and True and (False or 'TooShort')) == 'TooShort'
        run_stc = ("SliceTiming" in metadata and 'slicetiming' not in ignore
                   and (_get_series_len(ref_file) > 4 or "TooShort"))

    # Check if MEEPI for T2* coregistration target
    if t2s_coreg and not multiecho:
        LOGGER.warning(
            "No multiecho BOLD images found for T2* coregistration. "
            "Using standard EPI-T1 coregistration.")
        t2s_coreg = False

    # By default, force-bbr for t2s_coreg unless user specifies otherwise
    if t2s_coreg and use_bbr is None:
        use_bbr = True

    # Build workflow
    workflow = Workflow(name=wf_name)
    workflow.__desc__ = """

Functional data preprocessing

: For each of the {num_bold} BOLD runs found per subject (across all
tasks and sessions), the following preprocessing was performed.
""".format(num_bold=num_bold)

    workflow.__postdesc__ = """\
All resamplings can be performed with *a single interpolation
step* by composing all the pertinent transformations (i.e. head-motion
transform matrices, susceptibility distortion correction when available,
and co-registrations to anatomical and output spaces).
Gridded (volumetric) resamplings were performed using `antsApplyTransforms` (ANTs),
configured with Lanczos interpolation to minimize the smoothing
effects of other kernels [@lanczos].
Non-gridded (surface) resamplings were performed using `mri_vol2surf`
(FreeSurfer).
"""

    inputnode = pe.Node(niu.IdentityInterface(fields=[
        'bold_file', 'subjects_dir', 'subject_id', 't1_preproc', 't1_brain',
        't1_mask', 't1_seg', 't1_tpms', 't1_aseg', 't1_aparc', 'anat2std_xfm',
        'std2anat_xfm', 'template', 'joint_anat2std_xfm', 'joint_std2anat_xfm',
        'joint_template', 't1_2_fsnative_forward_transform',
        't1_2_fsnative_reverse_transform'
    ]),
                        name='inputnode')
    inputnode.inputs.bold_file = bold_file
    if sbref_file is not None:
        from niworkflows.interfaces.images import ValidateImage
        val_sbref = pe.Node(ValidateImage(in_file=sbref_file),
                            name='val_sbref')

    outputnode = pe.Node(niu.IdentityInterface(fields=[
        'bold_t1', 'bold_t1_ref', 'bold_mask_t1', 'bold_aseg_t1',
        'bold_aparc_t1', 'bold_std', 'bold_std_ref', 'bold_mask_std',
        'bold_aseg_std', 'bold_aparc_std', 'bold_native', 'bold_cifti',
        'cifti_variant', 'cifti_variant_key', 'surfaces', 'confounds',
        'aroma_noise_ics', 'melodic_mix', 'nonaggr_denoised_file',
        'confounds_metadata'
    ]),
                         name='outputnode')

    # BOLD buffer: an identity used as a pointer to either the original BOLD
    # or the STC'ed one for further use.
    boldbuffer = pe.Node(niu.IdentityInterface(fields=['bold_file']),
                         name='boldbuffer')

    summary = pe.Node(FunctionalSummary(
        slice_timing=run_stc,
        registration=('FSL', 'FreeSurfer')[freesurfer],
        registration_dof=bold2t1w_dof,
        pe_direction=metadata.get("PhaseEncodingDirection"),
        tr=metadata.get("RepetitionTime")),
                      name='summary',
                      mem_gb=DEFAULT_MEMORY_MIN_GB,
                      run_without_submitting=True)
    summary.inputs.dummy_scans = dummy_scans

    # CIfTI output: currently, we only support fsaverage{5,6}
    cifti_spaces = set(s for s in output_spaces.keys()
                       if s in ('fsaverage5', 'fsaverage6'))
    fsaverage_den = output_spaces.get('fsaverage', {}).get('den')
    if fsaverage_den:
        cifti_spaces.add(FSAVERAGE_DENSITY[fsaverage_den])
    cifti_output = cifti_output and cifti_spaces
    func_derivatives_wf = init_func_derivatives_wf(
        bids_root=layout.root,
        cifti_output=cifti_output,
        freesurfer=freesurfer,
        metadata=metadata,
        output_dir=output_dir,
        output_spaces=output_spaces,
        standard_spaces=list(std_spaces.keys()),
        use_aroma=use_aroma,
    )

    workflow.connect([
        (outputnode, func_derivatives_wf, [
            ('bold_t1', 'inputnode.bold_t1'),
            ('bold_t1_ref', 'inputnode.bold_t1_ref'),
            ('bold_aseg_t1', 'inputnode.bold_aseg_t1'),
            ('bold_aparc_t1', 'inputnode.bold_aparc_t1'),
            ('bold_mask_t1', 'inputnode.bold_mask_t1'),
            ('bold_native', 'inputnode.bold_native'),
            ('confounds', 'inputnode.confounds'),
            ('surfaces', 'inputnode.surfaces'),
            ('aroma_noise_ics', 'inputnode.aroma_noise_ics'),
            ('melodic_mix', 'inputnode.melodic_mix'),
            ('nonaggr_denoised_file', 'inputnode.nonaggr_denoised_file'),
            ('bold_cifti', 'inputnode.bold_cifti'),
            ('cifti_variant', 'inputnode.cifti_variant'),
            ('cifti_variant_key', 'inputnode.cifti_variant_key'),
            ('confounds_metadata', 'inputnode.confounds_metadata'),
        ]),
    ])

    # Generate a tentative boldref
    bold_reference_wf = init_bold_reference_wf(omp_nthreads=omp_nthreads)
    bold_reference_wf.inputs.inputnode.dummy_scans = dummy_scans
    if sbref_file is not None:
        workflow.connect([
            (val_sbref, bold_reference_wf, [('out_file',
                                             'inputnode.sbref_file')]),
        ])

    # Top-level BOLD splitter
    bold_split = pe.Node(FSLSplit(dimension='t'),
                         name='bold_split',
                         mem_gb=mem_gb['filesize'] * 3)

    # HMC on the BOLD
    bold_hmc_wf = init_bold_hmc_wf(name='bold_hmc_wf',
                                   mem_gb=mem_gb['filesize'],
                                   omp_nthreads=omp_nthreads)

    # calculate BOLD registration to T1w
    bold_reg_wf = init_bold_reg_wf(name='bold_reg_wf',
                                   freesurfer=freesurfer,
                                   use_bbr=use_bbr,
                                   bold2t1w_dof=bold2t1w_dof,
                                   mem_gb=mem_gb['resampled'],
                                   omp_nthreads=omp_nthreads,
                                   use_compression=False)

    # apply BOLD registration to T1w
    bold_t1_trans_wf = init_bold_t1_trans_wf(name='bold_t1_trans_wf',
                                             freesurfer=freesurfer,
                                             use_fieldwarp=(fmaps is not None
                                                            or use_syn),
                                             multiecho=multiecho,
                                             mem_gb=mem_gb['resampled'],
                                             omp_nthreads=omp_nthreads,
                                             use_compression=False)

    # get confounds
    bold_confounds_wf = init_bold_confs_wf(
        mem_gb=mem_gb['largemem'],
        metadata=metadata,
        regressors_all_comps=regressors_all_comps,
        regressors_fd_th=regressors_fd_th,
        regressors_dvars_th=regressors_dvars_th,
        name='bold_confounds_wf')
    bold_confounds_wf.get_node('inputnode').inputs.t1_transform_flags = [False]

    # Apply transforms in 1 shot
    # Only use uncompressed output if AROMA is to be run
    bold_bold_trans_wf = init_bold_preproc_trans_wf(
        mem_gb=mem_gb['resampled'],
        omp_nthreads=omp_nthreads,
        use_compression=not low_mem,
        use_fieldwarp=(fmaps is not None or use_syn),
        name='bold_bold_trans_wf')
    bold_bold_trans_wf.inputs.inputnode.name_source = ref_file

    # SLICE-TIME CORRECTION (or bypass) #############################################
    if run_stc is True:  # bool('TooShort') == True, so check True explicitly
        bold_stc_wf = init_bold_stc_wf(name='bold_stc_wf', metadata=metadata)
        workflow.connect([
            (bold_reference_wf, bold_stc_wf, [('outputnode.skip_vols',
                                               'inputnode.skip_vols')]),
            (bold_stc_wf, boldbuffer, [('outputnode.stc_file', 'bold_file')]),
        ])
        if not multiecho:
            workflow.connect([(bold_reference_wf, bold_stc_wf, [
                ('outputnode.bold_file', 'inputnode.bold_file')
            ])])
        else:  # for meepi, iterate through stc_wf for all workflows
            meepi_echos = boldbuffer.clone(name='meepi_echos')
            meepi_echos.iterables = ('bold_file', bold_file)
            workflow.connect([(meepi_echos, bold_stc_wf,
                               [('bold_file', 'inputnode.bold_file')])])
    elif not multiecho:  # STC is too short or False
        # bypass STC from original BOLD to the splitter through boldbuffer
        workflow.connect([(bold_reference_wf, boldbuffer,
                           [('outputnode.bold_file', 'bold_file')])])
    else:
        # for meepi, iterate over all meepi echos to boldbuffer
        boldbuffer.iterables = ('bold_file', bold_file)

    # SDC (SUSCEPTIBILITY DISTORTION CORRECTION) or bypass ##########################
    bold_sdc_wf = init_sdc_wf(fmaps,
                              metadata,
                              omp_nthreads=omp_nthreads,
                              debug=debug,
                              fmap_demean=fmap_demean,
                              fmap_bspline=fmap_bspline)
    # If no standard space is given, use the default for SyN-SDC
    if not volume_std_spaces or 'MNI152NLin2009cAsym' in volume_std_spaces:
        bold_sdc_wf.inputs.inputnode.template = 'MNI152NLin2009cAsym'
    else:
        bold_sdc_wf.inputs.inputnode.template = next(iter(volume_std_spaces))

    if not fmaps:
        LOGGER.warning('SDC: no fieldmaps found or they were ignored (%s).',
                       ref_file)
    elif fmaps[0]['suffix'] == 'syn':
        LOGGER.warning(
            'SDC: no fieldmaps found or they were ignored. '
            'Using EXPERIMENTAL "fieldmap-less SyN" correction '
            'for dataset %s.', ref_file)
    else:
        LOGGER.log(
            25, 'SDC: fieldmap estimation of type "%s" intended for %s found.',
            fmaps[0]['suffix'], ref_file)

    # Overwrite ``out_path_base`` of sdcflows' DataSinks
    for node in bold_sdc_wf.list_node_names():
        if node.split('.')[-1].startswith('ds_'):
            bold_sdc_wf.get_node(node).interface.out_path_base = 'fmriprep'

    # MULTI-ECHO EPI DATA #############################################
    if multiecho:
        from .util import init_skullstrip_bold_wf
        skullstrip_bold_wf = init_skullstrip_bold_wf(name='skullstrip_bold_wf')

        inputnode.inputs.bold_file = ref_file  # Replace reference w first echo

        join_echos = pe.JoinNode(
            niu.IdentityInterface(fields=['bold_files']),
            joinsource=('meepi_echos' if run_stc is True else 'boldbuffer'),
            joinfield=['bold_files'],
            name='join_echos')

        # create optimal combination, adaptive T2* map
        bold_t2s_wf = init_bold_t2s_wf(echo_times=tes,
                                       mem_gb=mem_gb['resampled'],
                                       omp_nthreads=omp_nthreads,
                                       t2s_coreg=t2s_coreg,
                                       name='bold_t2smap_wf')

        workflow.connect([
            (skullstrip_bold_wf, join_echos,
             [('outputnode.skull_stripped_file', 'bold_files')]),
            (join_echos, bold_t2s_wf, [('bold_files', 'inputnode.bold_file')]),
        ])

    # MAIN WORKFLOW STRUCTURE #######################################################
    workflow.connect([
        # Generate early reference
        (inputnode, bold_reference_wf, [('bold_file', 'inputnode.bold_file')]),
        # BOLD buffer has slice-time corrected if it was run, original otherwise
        (boldbuffer, bold_split, [('bold_file', 'in_file')]),
        # HMC
        (bold_reference_wf, bold_hmc_wf,
         [('outputnode.raw_ref_image', 'inputnode.raw_ref_image'),
          ('outputnode.bold_file', 'inputnode.bold_file')]),
        (bold_reference_wf, summary, [('outputnode.algo_dummy_scans',
                                       'algo_dummy_scans')]),
        # EPI-T1 registration workflow
        (
            inputnode,
            bold_reg_wf,
            [
                ('t1_brain', 'inputnode.t1_brain'),
                ('t1_seg', 'inputnode.t1_seg'),
                # Undefined if --no-freesurfer, but this is safe
                ('subjects_dir', 'inputnode.subjects_dir'),
                ('subject_id', 'inputnode.subject_id'),
                ('t1_2_fsnative_reverse_transform',
                 'inputnode.t1_2_fsnative_reverse_transform')
            ]),
        (inputnode, bold_t1_trans_wf, [('bold_file', 'inputnode.name_source'),
                                       ('t1_brain', 'inputnode.t1_brain'),
                                       ('t1_mask', 'inputnode.t1_mask'),
                                       ('t1_aseg', 'inputnode.t1_aseg'),
                                       ('t1_aparc', 'inputnode.t1_aparc')]),
        # unused if multiecho, but this is safe
        (bold_hmc_wf, bold_t1_trans_wf, [('outputnode.xforms',
                                          'inputnode.hmc_xforms')]),
        (bold_reg_wf, bold_t1_trans_wf, [('outputnode.itk_bold_to_t1',
                                          'inputnode.itk_bold_to_t1')]),
        (bold_t1_trans_wf, outputnode,
         [('outputnode.bold_t1', 'bold_t1'),
          ('outputnode.bold_t1_ref', 'bold_t1_ref'),
          ('outputnode.bold_aseg_t1', 'bold_aseg_t1'),
          ('outputnode.bold_aparc_t1', 'bold_aparc_t1')]),
        (bold_reg_wf, summary, [('outputnode.fallback', 'fallback')]),
        # SDC (or pass-through workflow)
        (inputnode, bold_sdc_wf, [('joint_template', 'inputnode.templates'),
                                  ('joint_std2anat_xfm',
                                   'inputnode.std2anat_xfm')]),
        (inputnode, bold_sdc_wf, [('t1_brain', 'inputnode.t1_brain')]),
        (bold_reference_wf, bold_sdc_wf,
         [('outputnode.ref_image', 'inputnode.bold_ref'),
          ('outputnode.ref_image_brain', 'inputnode.bold_ref_brain'),
          ('outputnode.bold_mask', 'inputnode.bold_mask')]),
        # For t2s_coreg, replace EPI-to-T1w registration inputs
        (bold_sdc_wf if not t2s_coreg else bold_t2s_wf, bold_reg_wf,
         [('outputnode.bold_ref_brain', 'inputnode.ref_bold_brain')]),
        (bold_sdc_wf if not t2s_coreg else bold_t2s_wf, bold_t1_trans_wf,
         [('outputnode.bold_ref_brain', 'inputnode.ref_bold_brain'),
          ('outputnode.bold_mask', 'inputnode.ref_bold_mask')]),
        (bold_sdc_wf, bold_t1_trans_wf, [('outputnode.out_warp',
                                          'inputnode.fieldwarp')]),
        (bold_sdc_wf, bold_bold_trans_wf,
         [('outputnode.out_warp', 'inputnode.fieldwarp'),
          ('outputnode.bold_mask', 'inputnode.bold_mask')]),
        (bold_sdc_wf, summary, [('outputnode.method', 'distortion_correction')
                                ]),
        # Connect bold_confounds_wf
        (inputnode, bold_confounds_wf, [('t1_tpms', 'inputnode.t1_tpms'),
                                        ('t1_mask', 'inputnode.t1_mask')]),
        (bold_hmc_wf, bold_confounds_wf, [('outputnode.movpar_file',
                                           'inputnode.movpar_file')]),
        (bold_reg_wf, bold_confounds_wf, [('outputnode.itk_t1_to_bold',
                                           'inputnode.t1_bold_xform')]),
        (bold_reference_wf, bold_confounds_wf, [('outputnode.skip_vols',
                                                 'inputnode.skip_vols')]),
        (bold_confounds_wf, outputnode, [
            ('outputnode.confounds_file', 'confounds'),
        ]),
        (bold_confounds_wf, outputnode, [
            ('outputnode.confounds_metadata', 'confounds_metadata'),
        ]),
        # Connect bold_bold_trans_wf
        (bold_split, bold_bold_trans_wf, [('out_files', 'inputnode.bold_file')]
         ),
        (bold_hmc_wf, bold_bold_trans_wf, [('outputnode.xforms',
                                            'inputnode.hmc_xforms')]),
        # Summary
        (outputnode, summary, [('confounds', 'confounds_file')]),
    ])

    # for standard EPI data, pass along correct file
    if not multiecho:
        workflow.connect([
            (inputnode, func_derivatives_wf, [('bold_file',
                                               'inputnode.source_file')]),
            (bold_bold_trans_wf, bold_confounds_wf,
             [('outputnode.bold', 'inputnode.bold'),
              ('outputnode.bold_mask', 'inputnode.bold_mask')]),
            (bold_split, bold_t1_trans_wf, [('out_files',
                                             'inputnode.bold_split')]),
        ])
    else:  # for meepi, create and use optimal combination
        workflow.connect([
            # update name source for optimal combination
            (inputnode, func_derivatives_wf,
             [(('bold_file', combine_meepi_source), 'inputnode.source_file')]),
            (bold_bold_trans_wf, skullstrip_bold_wf, [('outputnode.bold',
                                                       'inputnode.in_file')]),
            (bold_t2s_wf, bold_confounds_wf,
             [('outputnode.bold', 'inputnode.bold'),
              ('outputnode.bold_mask', 'inputnode.bold_mask')]),
            (bold_t2s_wf, bold_t1_trans_wf, [('outputnode.bold',
                                              'inputnode.bold_split')]),
        ])

    if fmaps:
        from ..fieldmap.unwarp import init_fmap_unwarp_report_wf
        # Report on BOLD correction
        fmap_unwarp_report_wf = init_fmap_unwarp_report_wf()
        workflow.connect([
            (inputnode, fmap_unwarp_report_wf, [('t1_seg', 'inputnode.in_seg')
                                                ]),
            (bold_reference_wf, fmap_unwarp_report_wf,
             [('outputnode.ref_image', 'inputnode.in_pre')]),
            (bold_reg_wf, fmap_unwarp_report_wf, [('outputnode.itk_t1_to_bold',
                                                   'inputnode.in_xfm')]),
            (bold_sdc_wf, fmap_unwarp_report_wf, [('outputnode.bold_ref',
                                                   'inputnode.in_post')]),
        ])

        # Overwrite ``out_path_base`` of unwarping DataSinks
        for node in fmap_unwarp_report_wf.list_node_names():
            if node.split('.')[-1].startswith('ds_'):
                fmap_unwarp_report_wf.get_node(
                    node).interface.out_path_base = 'fmriprep'

        if force_syn and fmaps[0]['suffix'] != 'syn':
            syn_unwarp_report_wf = init_fmap_unwarp_report_wf(
                name='syn_unwarp_report_wf', forcedsyn=True)
            workflow.connect([
                (inputnode, syn_unwarp_report_wf, [('t1_seg',
                                                    'inputnode.in_seg')]),
                (bold_reference_wf, syn_unwarp_report_wf,
                 [('outputnode.ref_image', 'inputnode.in_pre')]),
                (bold_reg_wf, syn_unwarp_report_wf,
                 [('outputnode.itk_t1_to_bold', 'inputnode.in_xfm')]),
                (bold_sdc_wf, syn_unwarp_report_wf,
                 [('outputnode.syn_bold_ref', 'inputnode.in_post')]),
            ])

            # Overwrite ``out_path_base`` of unwarping DataSinks
            for node in syn_unwarp_report_wf.list_node_names():
                if node.split('.')[-1].startswith('ds_'):
                    syn_unwarp_report_wf.get_node(
                        node).interface.out_path_base = 'fmriprep'

    # Map final BOLD mask into T1w space (if required)
    if 'T1w' in output_spaces or 'anat' in output_spaces:
        from niworkflows.interfaces.fixes import (FixHeaderApplyTransforms as
                                                  ApplyTransforms)

        boldmask_to_t1w = pe.Node(ApplyTransforms(interpolation='MultiLabel',
                                                  float=True),
                                  name='boldmask_to_t1w',
                                  mem_gb=0.1)
        workflow.connect([
            (bold_reg_wf, boldmask_to_t1w, [('outputnode.itk_bold_to_t1',
                                             'transforms')]),
            (bold_t1_trans_wf, boldmask_to_t1w, [('outputnode.bold_mask_t1',
                                                  'reference_image')]),
            (bold_bold_trans_wf if not multiecho else bold_t2s_wf,
             boldmask_to_t1w, [('outputnode.bold_mask', 'input_image')]),
            (boldmask_to_t1w, outputnode, [('output_image', 'bold_mask_t1')]),
        ])

    if set(['func', 'run', 'bold', 'boldref',
            'sbref']).intersection(output_spaces):
        workflow.connect([
            (bold_bold_trans_wf, outputnode, [('outputnode.bold',
                                               'bold_native')]),
            (bold_bold_trans_wf, func_derivatives_wf,
             [('outputnode.bold_ref', 'inputnode.bold_native_ref'),
              ('outputnode.bold_mask', 'inputnode.bold_mask_native')]),
        ])

    if volume_std_spaces:
        # Apply transforms in 1 shot
        # Only use uncompressed output if AROMA is to be run
        bold_std_trans_wf = init_bold_std_trans_wf(
            freesurfer=freesurfer,
            mem_gb=mem_gb['resampled'],
            omp_nthreads=omp_nthreads,
            standard_spaces=volume_std_spaces,
            name='bold_std_trans_wf',
            use_compression=not low_mem,
            use_fieldwarp=fmaps is not None,
        )
        workflow.connect([
            (inputnode, bold_std_trans_wf,
             [('joint_template', 'inputnode.templates'),
              ('joint_anat2std_xfm', 'inputnode.anat2std_xfm'),
              ('bold_file', 'inputnode.name_source'),
              ('t1_aseg', 'inputnode.bold_aseg'),
              ('t1_aparc', 'inputnode.bold_aparc')]),
            (bold_hmc_wf, bold_std_trans_wf, [('outputnode.xforms',
                                               'inputnode.hmc_xforms')]),
            (bold_reg_wf, bold_std_trans_wf, [('outputnode.itk_bold_to_t1',
                                               'inputnode.itk_bold_to_t1')]),
            (bold_bold_trans_wf if not multiecho else bold_t2s_wf,
             bold_std_trans_wf, [('outputnode.bold_mask',
                                  'inputnode.bold_mask')]),
            (bold_sdc_wf, bold_std_trans_wf, [('outputnode.out_warp',
                                               'inputnode.fieldwarp')]),
            (bold_std_trans_wf, outputnode,
             [('outputnode.bold_std', 'bold_std'),
              ('outputnode.bold_std_ref', 'bold_std_ref'),
              ('outputnode.bold_mask_std', 'bold_mask_std')]),
        ])

        if freesurfer:
            workflow.connect([
                (bold_std_trans_wf, func_derivatives_wf, [
                    ('poutputnode.bold_aseg_std', 'inputnode.bold_aseg_std'),
                    ('poutputnode.bold_aparc_std', 'inputnode.bold_aparc_std'),
                ]),
                (bold_std_trans_wf, outputnode,
                 [('outputnode.bold_aseg_std', 'bold_aseg_std'),
                  ('outputnode.bold_aparc_std', 'bold_aparc_std')]),
            ])

        if 'MNI152NLin2009cAsym' in std_spaces:
            carpetplot_wf = init_carpetplot_wf(standard_spaces=std_spaces,
                                               mem_gb=mem_gb['resampled'],
                                               metadata=metadata,
                                               name='carpetplot_wf')
            workflow.connect([
                (inputnode, carpetplot_wf, [('joint_std2anat_xfm',
                                             'inputnode.std2anat_xfm')]),
                (bold_bold_trans_wf if not multiecho else bold_t2s_wf,
                 carpetplot_wf, [('outputnode.bold', 'inputnode.bold'),
                                 ('outputnode.bold_mask',
                                  'inputnode.bold_mask')]),
                (bold_reg_wf, carpetplot_wf, [('outputnode.itk_t1_to_bold',
                                               'inputnode.t1_bold_xform')]),
                (bold_confounds_wf, carpetplot_wf,
                 [('outputnode.confounds_file', 'inputnode.confounds_file')]),
            ])

        if not multiecho:
            workflow.connect([(bold_split, bold_std_trans_wf,
                               [('out_files', 'inputnode.bold_split')])])
        else:
            split_opt_comb = bold_split.clone(name='split_opt_comb')
            workflow.connect([(bold_t2s_wf, split_opt_comb,
                               [('outputnode.bold', 'in_file')]),
                              (split_opt_comb, bold_std_trans_wf,
                               [('out_files', 'inputnode.bold_split')])])

        # Artifacts resampled in MNI space can only be sinked if they
        # were actually generated. See #1348.
        # Uses the parameterized outputnode to generate all outputs
        workflow.connect([
            (bold_std_trans_wf, func_derivatives_wf, [
                ('poutputnode.templates', 'inputnode.template'),
                ('poutputnode.bold_std_ref', 'inputnode.bold_std_ref'),
                ('poutputnode.bold_std', 'inputnode.bold_std'),
                ('poutputnode.bold_mask_std', 'inputnode.bold_mask_std'),
            ]),
        ])

        if use_aroma and 'MNI152NLin6Asym' in std_spaces:  # ICA-AROMA workflow
            from .confounds import init_ica_aroma_wf

            ica_aroma_wf = init_ica_aroma_wf(
                metadata=metadata,
                mem_gb=mem_gb['resampled'],
                omp_nthreads=omp_nthreads,
                use_fieldwarp=fmaps is not None,
                err_on_aroma_warn=err_on_aroma_warn,
                aroma_melodic_dim=aroma_melodic_dim,
                name='ica_aroma_wf')

            join = pe.Node(niu.Function(output_names=["out_file"],
                                        function=_to_join),
                           name='aroma_confounds')

            mrg_conf_metadata = pe.Node(niu.Merge(2),
                                        name='merge_confound_metadata',
                                        run_without_submitting=True)
            mrg_conf_metadata2 = pe.Node(DictMerge(),
                                         name='merge_confound_metadata2',
                                         run_without_submitting=True)
            workflow.disconnect([
                (bold_confounds_wf, outputnode, [
                    ('outputnode.confounds_file', 'confounds'),
                ]),
                (bold_confounds_wf, outputnode, [
                    ('outputnode.confounds_metadata', 'confounds_metadata'),
                ]),
            ])
            workflow.connect([
                (bold_std_trans_wf, ica_aroma_wf,
                 [('outputnode.bold_std', 'inputnode.bold_std'),
                  ('outputnode.bold_mask_std', 'inputnode.bold_mask_std'),
                  ('outputnode.templates', 'inputnode.templates')]),
                (inputnode, ica_aroma_wf, [('bold_file',
                                            'inputnode.name_source')]),
                (bold_hmc_wf, ica_aroma_wf, [('outputnode.movpar_file',
                                              'inputnode.movpar_file')]),
                (bold_reference_wf, ica_aroma_wf, [('outputnode.skip_vols',
                                                    'inputnode.skip_vols')]),
                (bold_confounds_wf, join, [('outputnode.confounds_file',
                                            'in_file')]),
                (bold_confounds_wf, mrg_conf_metadata,
                 [('outputnode.confounds_metadata', 'in1')]),
                (ica_aroma_wf, join, [('outputnode.aroma_confounds',
                                       'join_file')]),
                (ica_aroma_wf, mrg_conf_metadata,
                 [('outputnode.aroma_metadata', 'in2')]),
                (mrg_conf_metadata, mrg_conf_metadata2, [('out', 'in_dicts')]),
                (ica_aroma_wf, outputnode,
                 [('outputnode.aroma_noise_ics', 'aroma_noise_ics'),
                  ('outputnode.melodic_mix', 'melodic_mix'),
                  ('outputnode.nonaggr_denoised_file', 'nonaggr_denoised_file')
                  ]),
                (join, outputnode, [('out_file', 'confounds')]),
                (mrg_conf_metadata2, outputnode, [('out_dict',
                                                   'confounds_metadata')]),
            ])

    # SURFACES ##################################################################################
    surface_spaces = [
        space for space in output_spaces.keys() if space.startswith('fs')
    ]
    if freesurfer and surface_spaces:
        LOGGER.log(25, 'Creating BOLD surface-sampling workflow.')
        bold_surf_wf = init_bold_surf_wf(mem_gb=mem_gb['resampled'],
                                         output_spaces=surface_spaces,
                                         medial_surface_nan=medial_surface_nan,
                                         name='bold_surf_wf')
        workflow.connect([
            (inputnode, bold_surf_wf,
             [('t1_preproc', 'inputnode.t1_preproc'),
              ('subjects_dir', 'inputnode.subjects_dir'),
              ('subject_id', 'inputnode.subject_id'),
              ('t1_2_fsnative_forward_transform',
               'inputnode.t1_2_fsnative_forward_transform')]),
            (bold_t1_trans_wf, bold_surf_wf, [('outputnode.bold_t1',
                                               'inputnode.source_file')]),
            (bold_surf_wf, outputnode, [('outputnode.surfaces', 'surfaces')]),
        ])

        if cifti_output:
            from niworkflows.interfaces.utility import KeySelect
            bold_surf_wf.__desc__ += """\
*Grayordinates* files [@hcppipelines], which combine surface-sampled
data and volume-sampled data, were also generated.
"""
            select_std = pe.Node(KeySelect(fields=['bold_std']),
                                 name='select_std',
                                 run_without_submitting=True)
            select_std.inputs.key = 'MNI152NLin2009cAsym'

            gen_cifti = pe.MapNode(GenerateCifti(),
                                   iterfield=["surface_target", "gifti_files"],
                                   name="gen_cifti")
            gen_cifti.inputs.TR = metadata.get("RepetitionTime")
            gen_cifti.inputs.surface_target = list(cifti_spaces)

            workflow.connect([
                (bold_std_trans_wf, select_std,
                 [('outputnode.templates', 'keys'),
                  ('outputnode.bold_std', 'bold_std')]),
                (bold_surf_wf, gen_cifti, [('outputnode.surfaces',
                                            'gifti_files')]),
                (inputnode, gen_cifti, [('subjects_dir', 'subjects_dir')]),
                (select_std, gen_cifti, [('bold_std', 'bold_file')]),
                (gen_cifti, outputnode, [('out_file', 'bold_cifti'),
                                         ('variant', 'cifti_variant'),
                                         ('variant_key', 'cifti_variant_key')
                                         ]),
            ])

    # REPORTING ############################################################
    ds_report_summary = pe.Node(DerivativesDataSink(desc='summary',
                                                    keep_dtype=True),
                                name='ds_report_summary',
                                run_without_submitting=True,
                                mem_gb=DEFAULT_MEMORY_MIN_GB)

    ds_report_validation = pe.Node(DerivativesDataSink(
        base_directory=reportlets_dir, desc='validation', keep_dtype=True),
                                   name='ds_report_validation',
                                   run_without_submitting=True,
                                   mem_gb=DEFAULT_MEMORY_MIN_GB)

    workflow.connect([
        (summary, ds_report_summary, [('out_report', 'in_file')]),
        (bold_reference_wf, ds_report_validation,
         [('outputnode.validation_report', 'in_file')]),
    ])

    # Fill-in datasinks of reportlets seen so far
    for node in workflow.list_node_names():
        if node.split('.')[-1].startswith('ds_report'):
            workflow.get_node(node).inputs.base_directory = reportlets_dir
            workflow.get_node(node).inputs.source_file = ref_file

    return workflow
Esempio n. 52
0
with open(ABI_FILEPATH, 'r', encoding='utf-8') as abi_file:
    abi_json = json.loads(abi_file.read())
    Contract_Address = web3.Web3.toChecksumAddress(
        abi_json["contractAddress"].lower())
    contract_abi = abi_json['abi']

w3 = web3.Web3(
    WebsocketProvider(ROPSTEN_ENDPOINT, websocket_timeout=WEBSOCKET_TIMEOUT))
contract = w3.eth.contract(address=Contract_Address, abi=contract_abi)

tests = OrderedDict()  # {from_block: message}
tests[
    6807400] = 'works when toBlock-fromBlock <= 112, timeout when toBlock-fromBlock > 112'

for from_block, msg in tests.items():
    print(f'fromBlock={from_block}, {msg}', flush=True)

    print('when toBlock-fromBlock <= 112')
    event_filter = contract.events.EthUpdate.createFilter(fromBlock=from_block,
                                                          toBlock=from_block +
                                                          112)
    all_entries = event_filter.get_all_entries()
    for event in all_entries[:2]:
        print(f'EthUpdate event transactionHash {event.transactionHash.hex()}',
              flush=True)
    print()

    print('when toBlock-fromBlock > 112')
    event_filter = contract.events.EthUpdate.createFilter(fromBlock=from_block,
                                                          toBlock=from_block +
Esempio n. 53
0
class View(BaseCanvas):
    """High-level plotting canvas."""
    _default_box_index = (0,)

    def __init__(self, layout=None, shape=None, n_plots=None, origin=None,
                 box_bounds=None, box_pos=None, box_size=None,
                 enable_lasso=False,
                 **kwargs):
        if not kwargs.get('keys', None):
            kwargs['keys'] = None
        super(View, self).__init__(**kwargs)
        self.layout = layout

        if layout == 'grid':
            self._default_box_index = (0, 0)
            self.grid = Grid(shape)
            self.grid.attach(self)
            self.interact = self.grid

        elif layout == 'boxed':
            self.n_plots = (len(box_bounds)
                            if box_bounds is not None else len(box_pos))
            self.boxed = Boxed(box_bounds=box_bounds,
                               box_pos=box_pos,
                               box_size=box_size)
            self.boxed.attach(self)
            self.interact = self.boxed

        elif layout == 'stacked':
            self.n_plots = n_plots
            self.stacked = Stacked(n_plots, margin=.1, origin=origin)
            self.stacked.attach(self)
            self.interact = self.stacked

        else:
            self.interact = None

        self.panzoom = PanZoom(aspect=None,
                               constrain_bounds=[-2, -2, +2, +2])
        self.panzoom.attach(self)

        if enable_lasso:
            self.lasso = Lasso()
            self.lasso.attach(self)
        else:
            self.lasso = None

        self.clear()

    def clear(self):
        """Reset the view."""
        self._items = OrderedDict()
        self.visuals = []
        self.update()

    def _add_item(self, cls, *args, **kwargs):
        """Add a plot item."""
        box_index = kwargs.pop('box_index', self._default_box_index)

        data = cls.validate(*args, **kwargs)
        n = cls.vertex_count(**data)

        if not isinstance(box_index, np.ndarray):
            k = len(self._default_box_index)
            box_index = _get_array(box_index, (n, k))
        data['box_index'] = box_index

        if cls not in self._items:
            self._items[cls] = []
        self._items[cls].append(data)
        return data

    def uplot(self, *args, **kwargs):
        cls = _make_class(UniformPlotVisual,
                          _default_color=kwargs.pop('color', None),
                          )
        return self._add_item(cls, *args, **kwargs)

    def plot(self, *args, **kwargs):
        """Add a line plot."""
        return self._add_item(PlotVisual, *args, **kwargs)

    def uscatter(self, *args, **kwargs):
        cls = _make_class(UniformScatterVisual,
                          _default_marker=kwargs.pop('marker', None),
                          _default_marker_size=kwargs.pop('size', None),
                          _default_color=kwargs.pop('color', None),
                          )
        return self._add_item(cls, *args, **kwargs)

    def scatter(self, *args, **kwargs):
        """Add a scatter plot."""
        cls = _make_class(ScatterVisual,
                          _default_marker=kwargs.pop('marker', None),
                          )
        return self._add_item(cls, *args, **kwargs)

    def hist(self, *args, **kwargs):
        """Add some histograms."""
        return self._add_item(HistogramVisual, *args, **kwargs)

    def text(self, *args, **kwargs):
        """Add text."""
        return self._add_item(TextVisual, *args, **kwargs)

    def lines(self, *args, **kwargs):
        """Add some lines."""
        return self._add_item(LineVisual, *args, **kwargs)

    def __getitem__(self, box_index):
        self._default_box_index = _as_tuple(box_index)
        return self

    def build(self):
        """Build all added items.

        Visuals are created, added, and built. The `set_data()` methods can
        be called afterwards.

        """
        for cls, data_list in self._items.items():
            # Some variables are not concatenated. They are specified
            # in `allow_list`.
            data = _accumulate(data_list, cls.allow_list)
            box_index = data.pop('box_index')
            visual = cls()
            self.add_visual(visual)
            visual.set_data(**data)
            # NOTE: visual.program.__contains__ is implemented in vispy master
            # so we can replace this with `if 'a_box_index' in visual.program`
            # after the next VisPy release.
            if 'a_box_index' in visual.program._code_variables:
                visual.program['a_box_index'] = box_index.astype(np.float32)
        # TODO: refactor this when there is the possibility to update existing
        # visuals without recreating the whole scene.
        if self.lasso:
            self.lasso.create_visual()
        self.update()

    def get_pos_from_mouse(self, pos, box):
        # From window coordinates to NDC (pan & zoom taken into account).
        pos = self.panzoom.get_mouse_pos(pos)
        # From NDC to data coordinates.
        pos = self.interact.imap(pos, box) if self.interact else pos
        return pos

    @contextmanager
    def building(self):
        """Context manager to specify the plots."""
        self.clear()
        yield
        self.build()
Esempio n. 54
0
    for index in sortedDf.index:
        text = sortedDf.iloc[index]["text"]
        text = textrip(text)
        if text == "":
            pass
        elif len(text) > 100:
            pass
        elif text.lower() in sortedSumDictKeyDict:
            sortedSumDict[sortedSumDictKeyDict[
                text.lower()]] += sortedDf.iloc[index]["probability"]
        else:
            sortedSumDictKeyDict[text.lower()] = text
            sortedSumDict[sortedSumDictKeyDict[
                text.lower()]] = sortedDf.iloc[index]["probability"]
    finalSorted = sorted(
        sortedSumDict.items(), key=operator.itemgetter(1), reverse=True
    )  # for python 2, use sortedSumDict.iteritems() instead of sortedSumDict.items()

    entry = {
        u"type": "factoid",
        #u"body":qas,
        u"id": qid,  # must be 24 char
        u"ideal_answer": ["Dummy"],
        u"exact_answer": [[ans[0]] for ans in finalSorted[:5]],
        # I think enough?
    }
    entryList.append(entry)

    entryWithProb = {
        u"type": "factoid",
        u"id": qid,  # must be 24 char
Esempio n. 55
0
class DQN_FCNet(nn.Module):
    """
    Simple Fully-Connected Network (FCN) for Deep Q Learning Network (DQN)
    """
    def __init__(self, run_id, num_layers, hidden_units_per_layer, state_size,
                 num_actions, loss_fn, activation_fn, learning_rate, beta1,
                 beta2, weight_decay, device):
        # General housekeeping
        super().__init__()
        self.run_id = run_id
        self.epoch = 0
        self.device = device

        # RL parameters
        self.state_size = state_size
        self.num_actions = num_actions

        # Layers
        self.num_layers = num_layers
        self.hidden_units_per_layer = hidden_units_per_layer
        self.architecture = OrderedDict()
        self.assemble_architecture()
        self.assemble_modules()

        # Training parameters
        self.act_fn = activation_fn
        self.loss_fn = loss_fn
        self.opt = optim.Adam(self.parameters(),
                              lr=learning_rate,
                              betas=(beta1, beta2),
                              weight_decay=weight_decay)

        # Initialize weights
        self.weights_init()

        # Track history of network in separate object
        self.history = NetHistory(_modules=self._modules)

    def forward(self, states):
        for i, (name, layer) in enumerate(self.architecture.items()):
            if i == 0:
                x = self.act_fn(layer(states))
            elif i < (len(self.architecture) - 2):
                x = self.act_fn(layer(x))
            else:
                break

        return self.architecture['fc_out_trick_head'](
            x), self.architecture['fc_out_meld_head'](x)

    def assemble_architecture(self):
        """
        Assembles architecture of linear layers based on entries in init into an OrderedDict titled self.architecture
        """
        self.architecture['fc_1'] = nn.Linear(
            in_features=self.state_size,
            out_features=self.hidden_units_per_layer,
            bias=True)

        for i in range(1, self.num_layers - 1):
            self.architecture['fc_' + str(i + 1)] = nn.Linear(
                in_features=self.hidden_units_per_layer,
                out_features=self.hidden_units_per_layer,
                bias=True)

        self.architecture['fc_out_trick_head'] = nn.Linear(
            in_features=self.hidden_units_per_layer,
            out_features=self.num_actions,
            bias=True)
        self.architecture['fc_out_meld_head'] = nn.Linear(
            in_features=self.hidden_units_per_layer,
            out_features=self.num_actions,
            bias=True)

    def assemble_modules(self):
        for name, layer in self.architecture.items():
            self.add_module(name=name, module=layer)

    def weights_init(self):
        """
        Custom weights initialization for subnets
        Should only be run when first creating net. Will reset effects of training if run after training.
        """
        for layer_name in self._modules:
            m = self._modules[layer_name]
            classname = m.__class__.__name__

            if classname.find('Linear') != -1:
                nn.init.normal_(m.weight.data, 0.0, 0.02)
                nn.init.constant_(m.bias.data, 0)
            elif classname.find('BatchNorm') != -1:
                nn.init.normal_(m.weight.data, 1.0, 0.02)
                nn.init.constant_(m.bias.data, 0)

    def next_epoch(self):
        """Resets internal storage of detailed training history to stream next epoch"""
        self.epoch += 1
        self.history.next_epoch()

    def store_history(self):
        """Stores and resets all training history"""
        self.history.save(title=self.run_id)
        self.history = NetHistory(_modules=self._modules)
 def items(self, *args, **kwargs):
     return UnsortableList(OrderedDict.items(self, *args, **kwargs))
Esempio n. 57
0
class ComponentPage(object):
    """
    Describes a component with one or more gallery groups, each with one or
    more galleries, and a list of "quick links" to the gallery groups

    Attributes
    ----------
    config : ``MpasAnalysisConfigParser``
        Config options

    controlConfig : ``MpasAnalysisConfigParser``
        Config options for a control run

    name : str
        The name of the component as it should appear in the list of
        components, at the top of the component webpage and in the page
        title (e.g "Sea Ice" as opposed to "sea_ice" or "seaIce")

    subdirecory : str
        The subdirectory for the component's webpage

    templates : OrderedDict of str
        The contents of templates used to construct the page

    groups : tree of OrederdDict
        A tree of information describing the the gallery groups in the page,
        the galleries in each group and the images in each gallery.
    """
    # Authors
    # -------
    # Xylar Asay-Davis

    def __init__(self, config, name, subdirectory, controlConfig=None):
        """
        Create a ComponentPage object, reading in the templates

        Parameters
        ----------
        config : ``MpasAnalysisConfigParser``
            Config options

        name : str
            The name of the component as it should appear in the list of
            components, at the top of the component webpage and in the page
            title (e.g "Sea Ice" as opposed to "sea_ice" or "seaIce")

        subdirecory : str
            The subdirectory for the component's webpage

        controlConfig : ``MpasAnalysisConfigParser``, optional
            Config options for a control run
        """
        # Authors
        # -------
        # Xylar Asay-Davis

        self.config = config
        self.controlConfig = controlConfig
        self.name = name
        self.subdirectory = subdirectory

        htmlBaseDirectory = build_config_full_path(self.config, 'output',
                                                   'htmlSubdirectory')
        self.directory = '{}/{}'.format(htmlBaseDirectory, self.subdirectory)

        self.templates = OrderedDict()

        for templateName in ['page', 'quicklink', 'group', 'gallery', 'image',
                             'subtitle']:

            # get template text
            fileName = pkg_resources.resource_filename(
                __name__,
                "templates/component_{}.html".format(templateName))

            with open(fileName, 'r') as templateFile:
                self.templates[templateName] = templateFile.read()

        # start with no groups
        self.groups = OrderedDict()

    @staticmethod
    def add_image(xmlFileName, config, components, controlConfig=None):
        """
        Add the image to the appropriate component.  Note: this is a static
        method because we do not know which component to add the image to
        until we have read the XML file.

        Parameters
        ----------
        xmlFileName : str
            The full path to the XML file describing the image to be added

        config : ``MpasAnalysisConfigParser`` object
            contains config options

        components : OrederdDict of dict
            A dictionary of components to which the image will be added.  If
            the appropriate component is not yet in the dictionary, it will
            be added. ``components`` should be viewed as an input and output
            parameter, since it is modified by this function.

        controlConfig : ``MpasAnalysisConfigParser``, optional
            Config options for a control run
        """
        # Authors
        # -------
        # Xylar Asay-Davis

        xmlRoot = etree.parse(xmlFileName).getroot()

        componentName = ComponentPage._get_required_xml_text(xmlRoot,
                                                             'componentName',
                                                             xmlFileName)
        componentSubdirectory = ComponentPage._get_required_xml_text(
            xmlRoot, 'componentSubdirectory', xmlFileName)

        imageFileName = ComponentPage._get_required_xml_text(xmlRoot,
                                                             'imageFileName',
                                                             xmlFileName)
        groupName = ComponentPage._get_required_xml_text(xmlRoot,
                                                         'galleryGroup',
                                                         xmlFileName)
        groupLink = ComponentPage._get_required_xml_text(xmlRoot,
                                                         'groupLink',
                                                         xmlFileName)

        if componentName not in components:
            components[componentName] = ComponentPage(config, componentName,
                                                      componentSubdirectory,
                                                      controlConfig)

        component = components[componentName]

        if groupName not in component.groups:
            component.groups[groupName] = {'galleries': OrderedDict(),
                                           'link': groupLink}
            group = component.groups[groupName]
            node = xmlRoot.find('groupSubtitle')
            if node is not None:
                group['subtitle'] = node.text

        node = xmlRoot.find('gallery')
        if node is None:
            galleryName = 'None'
        else:
            galleryName = node.text

        galleries = component.groups[groupName]['galleries']
        if galleryName not in galleries:
            galleries[galleryName] = {'images': OrderedDict()}

        images = galleries[galleryName]['images']
        if imageFileName in images:
            raise ValueError('image {} already added to component page '
                             '{}'.format(imageFileName, componentName))

        images[imageFileName] = OrderedDict()
        image = images[imageFileName]
        for tag in ['thumbnailDescription', 'imageDescription',
                    'imageCaption', 'imageSize', 'thumbnailWidth',
                    'thumbnailHeight', 'orientation']:
            node = xmlRoot.find(tag)
            if node is None or node.text is None:
                image[tag] = ''
            else:
                image[tag] = node.text

    def generate(self):
        """
        Generate the webpage from templates and groups, and write it out to
        the HTML directory.
        """
        # Authors
        # -------
        # Xylar Asay-Davis

        runName = self.config.get('runs', 'mainRunName')

        if self.controlConfig is None:
            controlRunText = ''
        else:
            controlRunText = '<br> Control: {}'.format(
                self.controlConfig.get('runs', 'mainRunName'))

        quickLinkText = ''
        galleriesText = ''
        for groupName, groupDict in self.groups.items():
            quickLinkText = quickLinkText + \
                self._generate_quick_link_text(groupName, groupDict)

            galleriesText = galleriesText + \
                self._generate_group_text(groupName, groupDict)

        replacements = {'@runName': runName,
                        '@controlRunText': controlRunText,
                        '@componentName': self.name,
                        '@quickLinks': quickLinkText,
                        '@galleries': galleriesText}

        pageText = _replace_tempate_text(self.templates['page'], replacements)

        outFileName = '{}/index.html'.format(self.directory)

        with open(outFileName, mode='w') as componentFile:
            componentFile.write(
                pageText.encode('ascii',
                                'xmlcharrefreplace').decode('ascii'))

    def get_first_image(self):
        """
        Find the first image in the first gallery in this component (typically
        to use it as a thumbnail)

        Returns
        -------
        firstImageFilename : str
            The name (with out path) of the first image in the first gallery
        """
        # Authors
        # -------
        # Xylar Asay-Davis

        # get the first image name
        firstGroup = next(iter(self.groups.values()))
        firstGallery = next(iter(firstGroup['galleries'].values()))
        firstImageFileName = next(iter(firstGallery['images']))
        return firstImageFileName

    @staticmethod
    def _get_required_xml_text(root, tag, fileName):
        """read the value associated with a required tag from the XML root"""
        node = root.find(tag)
        if node is None:
            raise IOError('image descriptor file {} is missing a required'
                          '{} entry'.format(fileName, tag))
        return node.text

    def _generate_image_text(self, imageFileName, imageDict):
        """fill in the template for a given image with the desired content"""
        replacements = {'@imageFileName': imageFileName}
        for tag in ['imageSize', 'imageDescription', 'imageCaption',
                    'thumbnailDescription', 'orientation', 'thumbnailWidth',
                    'thumbnailHeight']:
            replacements['@{}'.format(tag)] = imageDict[tag]

        imageText = _replace_tempate_text(self.templates['image'],
                                          replacements)
        return imageText

    def _generate_gallery_text(self, galleryName, images):
        """fill in the template for a given gallery with the desired content"""
        imagesText = ''
        for imageFileName, imageDict in images.items():
            imagesText = imagesText + \
                self._generate_image_text(imageFileName, imageDict)

        if galleryName == 'None':
            galleryTitle = ''
        else:
            galleryTitle = self._generate_subtitle_text(galleryName)

        replacements = {'@galleryTitle': galleryTitle,
                        '@galleryImages': imagesText}
        galleryText = _replace_tempate_text(self.templates['gallery'],
                                            replacements)
        return galleryText

    def _generate_subtitle_text(self, subtitle):
        """
        fill in the template for a given gallery subtitle with the desired
        content
        """
        replacements = {'@subtitle': subtitle}

        subtitleText = _replace_tempate_text(self.templates['subtitle'],
                                             replacements)
        return subtitleText

    def _generate_group_text(self, groupName, groupDict):
        """
        fill in the template for a given gallery group with the desired
        content
        """
        galleriesText = ''
        for galleryName, galleryDict in groupDict['galleries'].items():
            galleriesText = galleriesText + \
                self._generate_gallery_text(galleryName, galleryDict['images'])

        replacements = {'@analysisGroupName': groupName,
                        '@analysisGroupLink': groupDict['link'],
                        '@groupGalleries': galleriesText}

        if 'subtitle' in groupDict:
            subtitleText = self._generate_subtitle_text(groupDict['subtitle'])
        else:
            subtitleText = ''

        replacements['@groupSubtitle'] = subtitleText

        groupText = _replace_tempate_text(self.templates['group'],
                                          replacements)
        return groupText

    def _generate_quick_link_text(self, groupName, groupDict):
        """
        fill in the template for a given quick link with the desired
        content
        """

        firstGallery = next(iter(groupDict['galleries'].values()))
        firstImageFileName = next(iter(firstGallery['images']))

        replacements = {'@analysisGroupName': groupName,
                        '@analysisGroupLink': groupDict['link'],
                        '@imageFileName': firstImageFileName}

        quickLinkText = _replace_tempate_text(self.templates['quicklink'],
                                              replacements)

        return quickLinkText
Esempio n. 58
0
class MainWindow(QtGui.QMainWindow):

    deletePressed = pyqtSignal(bool)
    quadrotorStateChanged = pyqtSignal(object)
    motorSpeedChanged = pyqtSignal(object)
    quadrotorStateReseted = pyqtSignal(bool)
    SCALE_FACTOR = 100

    def __init__(self):
        """
        Frame of GUI
        =========================
        |_MenuBar_______________|
        |    |                  |
        |plot|     graph1       |
        |list|------------------|
        |----|                  |
        |data|     graph2       |
        |list|                  |
        =========================
        """
        super(MainWindow, self).__init__()

        self.log_data = None
        self.log_file_name = None
        self.data_dict = None

        self.main_widget = QtGui.QWidget(self)
        self.mainlayout = QtGui.QHBoxLayout()
        self.main_widget.setLayout(self.mainlayout)
        # ToolBar
        self.toolbar = self.addToolBar('FileManager')
        loadfile_action = QtGui.QAction(
            QtGui.QIcon(get_source_name('icons/open.gif')), 'Open log file',
            self)
        loadfile_action.setShortcut('Ctrl+O')
        loadfile_action.triggered.connect(self.callback_open_log_file)
        self.toolbar.addAction(loadfile_action)
        self.show_quadrotor_3d = QtGui.QAction(
            QtGui.QIcon(get_source_name('icons/quadrotor.gif')),
            'show 3d viewer', self)
        self.show_quadrotor_3d.setShortcut('Ctrl+Shift+Q')
        self.show_quadrotor_3d.triggered.connect(self.callback_show_quadrotor)
        self.toolbar.addAction(self.show_quadrotor_3d)

        # Left plot item widget
        self.plot_data_frame = QtGui.QFrame(self)
        self.plot_data_frame.setFrameShape(QtGui.QFrame.StyledPanel)
        self.plot_data_layout = QtGui.QVBoxLayout(self.plot_data_frame)

        ## Data Plotting
        self.data_plotting = []
        ### There exist a Default graph
        self.line_ID = 0
        lbl_ploting_data = QtGui.QLabel('Data Plotting')
        self.plotting_data_tableView = TableView(self.plot_data_frame)
        self.plotting_data_tableView.setEditTriggers(
            QtGui.QAbstractItemView.DoubleClicked
            | QtGui.QAbstractItemView.SelectedClicked)
        self.plotting_data_tableView.setSortingEnabled(False)
        self.plotting_data_tableView.horizontalHeader().setStretchLastSection(
            True)
        self.plotting_data_tableView.resizeColumnsToContents()
        self.plotting_data_tableView.setColumnCount(3)
        self.plotting_data_tableView.setHorizontalHeaderLabels(
            ['Label', 'Color', 'Visible'])
        self.id = 0
        lbl_ploting_data.setBuddy(self.plotting_data_tableView)
        self.plot_data_layout.addWidget(lbl_ploting_data)
        self.plot_data_layout.addWidget(self.plotting_data_tableView)

        edit_layout = QtGui.QHBoxLayout()
        self.delete_btn = QtGui.QPushButton('Delete')
        self.delete_btn.clicked.connect(self.callback_del_plotting_data)
        self.clear_btn = QtGui.QPushButton('Clear')
        self.clear_btn.clicked.connect(self.callback_clear_plotting_data)
        edit_layout.addWidget(self.delete_btn)
        edit_layout.addWidget(self.clear_btn)
        self.plot_data_layout.addLayout(edit_layout)

        ## Data in the log file
        self.list_data_frame = QtGui.QFrame(self)
        self.list_data_frame.setMinimumWidth(300)
        self.list_data_frame.setMaximumWidth(600)
        self.list_data_frame.resize(200, 500)
        self.list_data_frame.setFrameShape(QtGui.QFrame.StyledPanel)
        self.list_data_layout = QtGui.QVBoxLayout(self.list_data_frame)
        ### line to search item
        self.choose_item_lineEdit = QtGui.QLineEdit(self.list_data_frame)
        self.choose_item_lineEdit.setPlaceholderText('filter by data name')
        self.choose_item_lineEdit.textChanged.connect(self.callback_filter)
        ### tree to show data to plot
        self.item_list_treeWidget = QtGui.QTreeWidget(self.list_data_frame)
        self.item_list_treeWidget.clear()
        self.item_list_treeWidget.setColumnCount(3)
        self.item_list_treeWidget.setHeaderLabels(
            ['Flight Data', 'Type', 'Length'])
        self.item_list_treeWidget.itemDoubleClicked.connect(
            self.callback_tree_double_clicked)
        self.item_list_treeWidget.resizeColumnToContents(2)
        self.list_data_layout.addWidget(self.choose_item_lineEdit)
        self.list_data_layout.addWidget(self.item_list_treeWidget)

        # Right plot item
        self.graph_frame = QtGui.QFrame(self)
        self.graph_frame.setFrameShape(QtGui.QFrame.StyledPanel)
        self.animation_layout = QtGui.QVBoxLayout(self.graph_frame)

        ## quadrotor 3d
        self.quadrotor_win = QuadrotorWin(self)
        self.quadrotor_win.closed.connect(self.quadrotor_win_closed_event)
        self.quadrotor_win.hide()
        self.first_load = True
        self.quadrotor_widget_isshow = False

        ## default plot
        self.default_graph_widget = pg.GraphicsLayoutWidget()
        ### a hidable ROI region
        self.detail_graph = self.default_graph_widget.addPlot(row=0, col=0)
        self.detail_graph.setAutoVisible(True)
        self.detail_graph.hide()
        ### main graph to plot curves
        self.main_graph = self.default_graph_widget.addPlot(row=1, col=0)
        self.main_graph.keyPressEvent = self.keyPressed
        self.deletePressed.connect(self.callback_del_plotting_data)
        self.main_graph.scene().sigMouseClicked.connect(
            self.callback_graph_clicked)
        self.main_graph.addLegend()
        ROI_action = QtGui.QAction('show/hide ROI graph', self.main_graph)
        ROI_action.triggered.connect(self.callback_ROI_triggered)
        self.main_graph.scene().contextMenu.append(ROI_action)
        self.ROI_region = pg.LinearRegionItem()
        self.ROI_region.setZValue(10)
        self.ROI_region.hide()
        self.ROI_showed = False

        def update():
            self.ROI_region.setZValue(10)
            minX, maxX = self.ROI_region.getRegion()
            self.detail_graph.setXRange(minX, maxX, padding=0)

        self.ROI_region.sigRegionChanged.connect(update)

        def updateRegion(window, viewRange):
            rgn = viewRange[0]
            self.ROI_region.setRegion(rgn)

        self.detail_graph.sigRangeChanged.connect(updateRegion)

        self.main_graph.addItem(self.ROI_region, ignoreBounds=True)

        ## vertical line
        self.vLine = pg.InfiniteLine(angle=90, movable=False)
        self.vLine.hide()
        self.main_graph.addItem(self.vLine, ignoreBounds=True)
        self.vLine_detail = pg.InfiniteLine(angle=90, movable=False)
        self.vLine_detail.hide()
        self.detail_graph.addItem(self.vLine_detail, ignoreBounds=True)

        ## flag whether there is a curve clicked after last clicked event
        self.curve_clicked = False
        self.curve_highlighted = []
        self.animation_layout.addWidget(self.default_graph_widget)
        ## time line
        self.time_line_frame = QtGui.QFrame(self)
        self.time_line_frame.setMaximumHeight(45)
        self.time_line_frame.setMinimumHeight(45)
        self.time_line_layout = QtGui.QHBoxLayout(self.time_line_frame)
        time_line_lbl = QtGui.QLabel('x')
        time_line_lbl.setToolTip('set play speed')
        speed_combo = QtGui.QComboBox()
        speed_combo.addItems(['1', '2', '4', '8'])
        self.speed_factor = 500
        self.time_line_layout.addWidget(time_line_lbl)
        self.time_line_layout.addWidget(speed_combo)
        speed_combo.currentIndexChanged.connect(
            self.callback_speed_combo_indexChanged)
        self.current_factor = 500 / 1
        self.time_line_button_play = QtGui.QPushButton(self.time_line_frame)
        self.time_line_button_play.setEnabled(False)
        self.time_line_button_play.setIcon(
            QtGui.QIcon(get_source_name("icons/play.jpg")))
        self.time_line_play = False
        self.time_line_button_play.clicked.connect(self.callback_play_clicked)
        self.time_line_button_stop = QtGui.QPushButton(self.time_line_frame)
        self.time_line_button_stop.setEnabled(False)
        self.time_line_button_stop.setIcon(
            QtGui.QIcon(get_source_name("icons/stop.jpg")))
        self.time_line_button_stop.clicked.connect(self.callback_stop_clicked)
        self.time_line_layout.addWidget(self.time_line_button_play)
        self.time_line_layout.addWidget(self.time_line_button_stop)
        self.time_slider = QtGui.QSlider(QtCore.Qt.Horizontal)
        self.time_slider.setRange(0, 100)
        #### index for time_stamp
        self.time_line_layout.addWidget(self.time_slider)
        ## timer
        self.timer = QtCore.QTimer()
        self.timer.timeout.connect(self.animation_update)
        self.current_time = 0
        self.dt = 50

        self.splitter1 = QtGui.QSplitter(QtCore.Qt.Vertical)
        self.splitter1.addWidget(self.plot_data_frame)
        self.splitter1.addWidget(self.list_data_frame)

        self.splitter2 = QtGui.QSplitter(QtCore.Qt.Vertical)
        self.splitter2.addWidget(self.graph_frame)
        self.splitter2.addWidget(self.time_line_frame)

        self.splitter3 = QtGui.QSplitter(QtCore.Qt.Horizontal)
        self.splitter3.addWidget(self.splitter1)
        self.splitter3.addWidget(self.splitter2)

        self.mainlayout.addWidget(self.splitter3)

        self.setCentralWidget(self.main_widget)
        self.setGeometry(200, 200, 800, 800)
        self.setWindowTitle("pyFlightAnalysis")

        self.quadrotorStateChanged.connect(
            self.quadrotor_win.callback_update_quadrotor_pos)
        self.quadrotorStateReseted.connect(
            self.quadrotor_win.callback_quadrotor_state_reset)

    def keyPressed(self, event):
        """Key Pressed function for graph"""
        if event.key() == QtCore.Qt.Key_Delete:
            self.deletePressed.emit(True)
        elif event.key() == QtCore.Qt.Key_R:
            # ROI graph can also be triggered by press 'r'
            self.callback_ROI_triggered()

    @staticmethod
    def getIndex(data, item):
        for ind, d in enumerate(data):
            if d > item:
                return ind

        return len(data) - 1

    @staticmethod
    # ref:https://github.com/PX4/Firmware/blob/master/src/lib/mathlib/math/Quaternion.hpp
    def quat_to_euler(q0, q1, q2, q3):
        #321
        angles = []
        for i in range(len(q0)):
            yaw = 180 / np.pi * np.arctan2(
                2.0 * (q0[i] * q1[i] + q2[i] * q3[i]), 1.0 - 2.0 *
                (q1[i]**2 + q2[i]**2))
            roll = 180 / np.pi * np.arcsin(2.0 *
                                           (q0[i] * q2[i] - q3[i] * q1[i]))
            pitch = 180 / np.pi * np.arctan2(
                2.0 * (q0[i] * q3[i] + q1[i] * q2[i]), 1.0 - 2.0 *
                (q2[i]**2 + q3[i]**2))
            angles.append([yaw, roll, pitch])
        return angles

    def callback_open_log_file(self):
        from os.path import expanduser
        home_path = expanduser('~')
        filename = QtGui.QFileDialog.getOpenFileName(self, 'Open Log File',
                                                     home_path,
                                                     'Log Files (*.ulg)')

        if filename:
            try:
                self.log_file_name = filename
                self.load_data()
                self.load_data_tree()
                self.time_line_button_play.setEnabled(True)
            except Exception as ex:
                print(ex)

    def callback_play_clicked(self):
        """Time line play"""
        self.time_line_play = not self.time_line_play
        if self.log_file_name is not None:
            if self.time_line_play:
                self.time_line_button_play.setIcon(
                    QtGui.QIcon(get_source_name("icons/pause.jpg")))
                self.time_line_button_stop.setEnabled(True)
                if self.ROI_showed:
                    region = self.ROI_region.getRegion()
                    self.vLine.setPos(region[0])
                    self.vLine_detail.setPos(region[0])
                else:
                    self.vLine.setPos(self.time_range[0])
                    self.vLine_detail.setPos(self.time_range[0])
                self.vLine.show()
                self.vLine_detail.show()
                # start timer
                self.timer.start(self.dt)
            else:
                self.time_line_button_play.setIcon(
                    QtGui.QIcon(get_source_name("icons/play.jpg")))
                self.time_line_button_stop.setEnabled(False)
                self.timer.stop()

    def callback_stop_clicked(self):
        self.time_line_play = False
        self.timer.stop()
        self.time_line_button_play.setIcon(
            QtGui.QIcon(get_source_name("icons/play.jpg")))
        self.time_line_button_stop.setEnabled(False)
        self.time_slider.setValue(0)
        self.time_index = 0
        self.vLine.hide()
        self.vLine_detail.hide()
        self.quadrotorStateReseted.emit(True)

    def animation_update(self):
        """update the quadrotor state"""
        dV = 100.0 / (self.time_range[1] - self.time_range[0])

        if self.ROI_showed:
            start, end = self.ROI_region.getRegion()
            t = self.current_time + start
            # emit data
            indexes = map(self.getIndex, [
                self.time_stamp_position, self.time_stamp_attitude,
                self.time_stamp_output
            ], [t, t, t])
            state_data = [
                self.position_history[indexes[0]],
                self.attitude_history[indexes[1]],
                self.output_history[indexes[2]]
            ]
            self.quadrotorStateChanged.emit(state_data)
            # update slider
            self.time_slider.setValue(
                int(dV * (self.current_time + start - self.time_range[0])))
            # update vLine pos
            self.vLine.setPos(t)
            self.vLine_detail.setPos(t)
            if self.current_time > (end - start):
                self.current_time = 0
                self.quadrotorStateReseted.emit(True)
        else:
            t = self.current_time + self.time_range[0]
            self.time_slider.setValue(int(dV * self.current_time))
            # update quadrotor position and attitude and motor speed
            indexes = map(self.getIndex, [
                self.time_stamp_position, self.time_stamp_attitude,
                self.time_stamp_output
            ], [t, t, t])
            state_data = [
                self.position_history[indexes[0]],
                self.attitude_history[indexes[1]],
                self.output_history[indexes[2]]
            ]
            self.quadrotorStateChanged.emit(state_data)
            # update vLine pos
            self.vLine.setPos(t)
            self.vLine_detail.setPos(t)
            # if arrive end just replay
            if self.current_time > (self.time_range[1] - self.time_range[0]):
                self.current_time = 0
                self.quadrotorStateReseted.emit(True)

        self.current_time += self.dt / self.current_factor

    def callback_show_quadrotor(self):
        if self.quadrotor_widget_isshow:
            self.show_quadrotor_3d.setIcon(
                QtGui.QIcon(get_source_name('icons/quadrotor.gif')))
            self.quadrotor_widget_isshow = not self.quadrotor_widget_isshow
            self.quadrotor_win.hide()
            self.update()
        else:
            self.quadrotor_widget_isshow = not self.quadrotor_widget_isshow
            self.show_quadrotor_3d.setIcon(
                QtGui.QIcon(get_source_name('icons/quadrotor_pressed.gif')))
            splash = ThreadQDialog(self.quadrotor_win.quadrotor_widget,
                                   self.quadrotor_win)
            splash.run()
            self.quadrotor_win.show()
            self.update()

    def callback_speed_combo_indexChanged(self, index):
        self.current_factor = self.speed_factor / 2**index

    def callback_filter(self, filtertext):
        """Accept filter and update the tree widget"""
        filtertext = str(filtertext)
        if self.data_dict is not None:
            if filtertext == '':
                self.load_data_tree()
            else:
                self.item_list_treeWidget.clear()
                for key, values_name in self.data_dict.items():
                    values_satisfied = []
                    for value in values_name:
                        if filtertext in value[0]:
                            values_satisfied.append(value)
                    if values_satisfied:
                        param_name = QtGui.QTreeWidgetItem(
                            self.item_list_treeWidget, [key])
                        self.item_list_treeWidget.expandItem(param_name)
                        for data_name in values_satisfied:
                            self.item_list_treeWidget.expandItem(
                                QtGui.QTreeWidgetItem(
                                    param_name,
                                    [data_name[0], data_name[1], data_name[2]
                                     ]))

    def callback_graph_clicked(self, event):
        """ set the curve highlighted to be normal """
        if self.curve_clicked:
            if event.modifiers() == QtCore.Qt.ControlModifier:
                pass
            else:
                for curve in self.curve_highlighted[:-1]:
                    curve.setShadowPen(
                        pg.mkPen((200, 200, 200), width=1, cosmetic=True))
                self.curve_highlighted = self.curve_highlighted[-1:]

        if len(self.curve_highlighted) > 0 and not self.curve_clicked:
            for curve in self.curve_highlighted:
                curve.setShadowPen(
                    pg.mkPen((120, 120, 120), width=1, cosmetic=True))
                self.curve_highlighted = []
                self.plotting_data_tableView.setCurrentCell(0, 0)

        self.curve_clicked = False

    def callback_tree_double_clicked(self, item, col):
        """Add clicked item to Data plotting area"""
        def expand_name(item):
            if item.parent() is None:
                return str(item.text(0))
            else:
                return expand_name(item.parent()) + '->' + str(item.text(0))

        # When click high top label, no action will happened
        if item.parent() is None:
            return
        item_label = expand_name(item)
        row = len(self.data_plotting)
        self.plotting_data_tableView.insertRow(row)

        # Label
        self.plotting_data_tableView.setCellWidget(row, 0,
                                                   QtGui.QLabel(item_label))

        # Curve Color
        ## rgb + a
        color = [random.randint(0, 255) for _ in range(3)]
        btn = ColorPushButton(self.id, self.plotting_data_tableView, color)
        btn.sigColorChanged.connect(self.callback_color_changed)
        self.plotting_data_tableView.setCellWidget(row, 1, btn)
        # Curve Visible
        chk = Checkbox(self.id, '')
        chk.setChecked(True)
        chk.sigStateChanged.connect(self.callback_visible_changed)
        self.plotting_data_tableView.setCellWidget(row, 2, chk)
        data_index = self.data_dict.keys().index(item_label.split('->')[0])
        data_name = item_label.split('->')[-1]

        ## ms to s
        t = self.log_data[data_index].data['timestamp'] / 10**6
        data = self.log_data[data_index].data[data_name]
        curve = self.main_graph.plot(t,
                                     data,
                                     pen=color,
                                     clickable=True,
                                     name=item_label)
        curve.sigClicked.connect(self.callback_curve_clicked)
        curve.curve.setClickable(True)
        # whether show the curve
        showed = True
        self.data_plotting.append(
            [item_label, color, curve, showed, (t, data), self.id])
        # increase the id
        self.id += 1
        self.update_ROI_graph()

    def callback_curve_clicked(self, curve):
        """"""
        self.curve_clicked = True
        curves = [data[2] for data in self.data_plotting]
        ind = curves.index(curve)
        curve.setShadowPen(pg.mkPen((70, 70, 70), width=5, cosmetic=True))
        self.curve_highlighted.append(curve)
        self.plotting_data_tableView.setCurrentCell(ind, 0)

    def callback_del_plotting_data(self):
        """"""
        indexes = self.plotting_data_tableView.selectedIndexes()
        rows_del = set([ind.row() for ind in indexes])
        rows_all = set(range(len(self.data_plotting)))
        rows_reserved = list(rows_all - rows_del)
        data_plotting = []
        for row in rows_reserved:
            data_plotting.append(self.data_plotting[row])
        self.data_plotting = data_plotting
        self.update_graph()

    def callback_visible_changed(self, chk):
        """"""
        state = True if chk.checkState() == QtCore.Qt.Checked else False
        ids = [item[5] for item in self.data_plotting]
        self.data_plotting[ids.index(chk.id)][3] = state
        self.update_graph()

    def callback_color_changed(self, btn):
        color = [c * 255 for c in btn.color('float')[:-1]]
        ids = [item[5] for item in self.data_plotting]
        self.data_plotting[ids.index(btn.id)][1] = color
        self.update_graph()

    def update_graph(self):
        self.plotting_data_tableView.setRowCount(0)
        for ind, item in enumerate(self.data_plotting):
            self.plotting_data_tableView.insertRow(ind)
            self.plotting_data_tableView.setCellWidget(ind, 0,
                                                       QtGui.QLabel(item[0]))
            btn = ColorPushButton(self.id, self.plotting_data_tableView,
                                  item[1])
            btn.sigColorChanged.connect(self.callback_color_changed)
            self.plotting_data_tableView.setCellWidget(ind, 1, btn)
            chkbox = Checkbox(self.id, '')
            chkbox.setChecked(self.data_plotting[ind][3])
            chkbox.sigStateChanged.connect(self.callback_visible_changed)
            self.plotting_data_tableView.setCellWidget(ind, 2, chkbox)
            self.data_plotting[ind][5] = self.id
            self.id += 1
        # remove curves in graph
        items_to_be_removed = []
        for item in self.main_graph.items:
            if isinstance(item, pg.PlotDataItem):
                items_to_be_removed.append(item)
        for item in items_to_be_removed:
            self.main_graph.removeItem(item)

        self.main_graph.legend.scene().removeItem(self.main_graph.legend)
        self.main_graph.addLegend()
        # redraw curves
        for ind, item in enumerate(self.data_plotting):
            label, color, _, showed, data, _ = item
            if showed:
                curve = self.main_graph.plot(data[0],
                                             data[1],
                                             pen=color,
                                             name=label)
                self.data_plotting[ind][2] = curve
        self.update_ROI_graph()

    def callback_clear_plotting_data(self):
        """"""
        self.data_plotting = []
        self.curve_highlighted = []
        self.update_graph()

    def callback_graph_index_combobox_changed(self, index):
        """Add clicked item to Data plotting area"""
        if index == self.graph_number:
            # choose new
            self.graph_number += 1
            # add a graph
            graph_widget = pg.GraphicsLayoutWidget()
            graph_widget.addPlot(row=0, col=0)
            self.graph_lines_dict.setdefault(graph_widget, 0)
            for data in self.data_plotting:
                data[1].clear()
                for i in range(1, self.graph_number + 1):
                    data[1].addItem(str(i))
                data[1].addItem('New')
        else:
            # change current curve's graph
            pass

    def callback_visible_checkBox(self, checked):
        """Set the curve visible or invisible"""
        if checked:
            pass
        else:
            pass

    def callback_ROI_triggered(self):
        """Show the graph"""
        if self.ROI_showed:
            self.detail_graph.hide()
            self.ROI_region.hide()
            self.ROI_showed = not self.ROI_showed
        else:
            self.update_ROI_graph()
            self.detail_graph.show()
            self.ROI_region.show()
            self.ROI_showed = not self.ROI_showed

    def update_ROI_graph(self):
        items_to_be_removed = []
        for item in self.detail_graph.items:
            if isinstance(item, pg.PlotDataItem):
                items_to_be_removed.append(item)

        for item in items_to_be_removed:
            self.detail_graph.removeItem(item)

        items = self.main_graph.items
        for item in items:
            if isinstance(item, pg.PlotDataItem):
                self.detail_graph.plot(item.xData,
                                       item.yData,
                                       pen=item.opts['pen'])

    def load_data(self):
        self.log_data = ULog(str(self.log_file_name)).data_list
        self.data_dict = OrderedDict()
        for d in self.log_data:
            data_items_list = [f.field_name for f in d.field_data]
            data_items_list.remove('timestamp')
            data_items_list.insert(0, 'timestamp')
            data_items = [(item, str(d.data[item].dtype),
                           str(len(d.data[item]))) for item in data_items_list]
            self.data_dict.setdefault(d.name, data_items[1:])

        # attitude
        index = self.data_dict.keys().index('vehicle_attitude')
        self.time_stamp_attitude = self.log_data[index].data[
            'timestamp'] / 10**6
        q0 = self.log_data[index].data['q[0]']
        q1 = self.log_data[index].data['q[1]']
        q2 = self.log_data[index].data['q[2]']
        q3 = self.log_data[index].data['q[3]']
        self.attitude_history = self.quat_to_euler(q0, q1, q2, q3)
        # position
        index = self.data_dict.keys().index('vehicle_local_position')
        self.time_stamp_position = self.log_data[index].data[
            'timestamp'] / 10**6
        x = self.log_data[index].data['x']
        y = self.log_data[index].data['y']
        z = self.log_data[index].data['z']
        self.position_history = [
            (x[i] * self.SCALE_FACTOR, y[i] * self.SCALE_FACTOR,
             z[i] * self.SCALE_FACTOR) for i in range(len(x))
        ]
        # motor rotation
        index = self.data_dict.keys().index('actuator_outputs')
        self.time_stamp_output = self.log_data[index].data['timestamp'] / 10**6
        output0 = self.log_data[index].data['output[0]']
        output1 = self.log_data[index].data['output[1]']
        output2 = self.log_data[index].data['output[2]']
        output3 = self.log_data[index].data['output[3]']
        self.output_history = [(output0[i], output1[i], output2[i], output3[i])
                               for i in range(len(output0))]

        # get common time range
        self.time_range = max([self.time_stamp_attitude[0],self.time_stamp_output[0],self.time_stamp_position[0]]),\
                            min([self.time_stamp_attitude[-1],self.time_stamp_output[-1],self.time_stamp_position[-1]])

    def load_data_tree(self):
        # update the tree list table
        self.item_list_treeWidget.clear()
        for key, values in self.data_dict.items():
            param_name = QtGui.QTreeWidgetItem(self.item_list_treeWidget,
                                               [key])
            self.item_list_treeWidget.expandItem(param_name)
            for data_name in values:
                self.item_list_treeWidget.expandItem(
                    QtGui.QTreeWidgetItem(
                        param_name,
                        [data_name[0], data_name[1], data_name[2]]))
            param_name.setExpanded(False)

    def quadrotor_win_closed_event(self, closed):
        if closed:
            self.quadrotor_widget_isshow = not self.quadrotor_widget_isshow
            self.show_quadrotor_3d.setIcon(
                QtGui.QIcon(get_source_name('icons/quadrotor.gif')))
Esempio n. 59
0
def generate_html(config, analyses, controlConfig, customConfigFiles):
    # {{{
    """
    Generates webpages for diplaying the plots from each analysis task

    Parameters
    ----------
    config : ``MpasAnalysisConfigParser``
        Config options

    analysis : ``OrderedDict`` of ``AnalysisTask`` objects
        the analysis tasks that generated the plots to include in the webpages.
        The ``list_xml_files()`` method will be called on each task to get
        the list of files to include on the webpage for the associated
        component.

    controlConfig : ``MpasAnalysisConfigParser``
        Config options for a control run

    customConfigFiles : list of str
        The names of custom config files to be copied to the HTML directory for
        provenance
    """
    # Authors
    # -------
    # Xylar Asay-Davis

    generateHTML = config.getboolean('html', 'generate')
    if not generateHTML:
        return

    print("Generating webpage for viewing results...")

    page = MainPage(config, controlConfig, customConfigFiles)

    components = OrderedDict()

    # add images from each analysis task, creating ga dictionary of components
    missingCount = 0
    for analysisTask in analyses.values():
        for fileName in analysisTask.xmlFileNames:
            try:
                ComponentPage.add_image(fileName, config, components,
                                        controlConfig)
            except IOError:
                print('  missing file {}'.format(fileName))
                missingCount += 1

    if missingCount > 0:
        print('Warning: {} XML files were missing and the analysis website'
              ' will be incomplete.'.format(missingCount))
    # generate the page for each component and add the component to the main
    # page
    for componentName, component in components.items():
        component.generate()

        firstImageFileName = component.get_first_image()

        page.add_component(componentName, component.subdirectory,
                           firstImageFileName)

    page.generate()

    print("Done.")
Esempio n. 60
0
class MainPage(object):
    """
    Describes a main webpage containg one or more pages for components

    Attributes
    ----------
    config : ``MpasAnalysisConfigParser``
        Config options

    controlConfig : ``MpasAnalysisConfigParser``
        Config options for a control run

    customConfigFiles : list of str
        The names of custom config files to be copied to the HTML directory
        for provenance

    pageTemplate, componentTemplate, configTemplate : str
        The contents of templates used to construct the page

    components : OrederdDict of dict
        Each component has a name, subdirectory and image name used to find
        the appropriate thumbnail.
    """
    # Authors
    # -------
    # Xylar Asay-Davis

    def __init__(self, config, controlConfig, customConfigFiles):
        """
        Create a MainPage object, reading in the templates

        Parameters
        ----------
        config : ``MpasAnalysisConfigParser``
            Config options

        controlConfig : ``MpasAnalysisConfigParser``
            Config options for a control run

        customConfigFiles : list of str
            The names of custom config files to be copied to the HTML directory
            for provenance
       """
        # Authors
        # -------
        # Xylar Asay-Davis

        self.config = config
        self.controlConfig = controlConfig
        self.customConfigFiles = customConfigFiles

        # get template text
        fileName = \
            pkg_resources.resource_filename(__name__,
                                            "templates/main_page.html")

        with open(fileName, 'r') as templateFile:
            self.pageTemplate = templateFile.read()

        fileName = \
            pkg_resources.resource_filename(__name__,
                                            "templates/main_component.html")
        with open(fileName, 'r') as templateFile:
            self.componentTemplate = templateFile.read()

        fileName = \
            pkg_resources.resource_filename(__name__,
                                            "templates/config.html")
        with open(fileName, 'r') as templateFile:
            self.configTemplate = templateFile.read()

        # start with no components
        self.components = OrderedDict()

    def add_component(self, name, subdirectory, imageFileName):
        """
        Create a MainPage object, reading in the templates

        Parameters
        ----------
        name : str
            The name of the component as it should appear in the list of
            components, at the top of the component webpage and in the page
            title (e.g "Sea Ice" as opposed to "sea_ice" or "seaIce")

        subdirecory : str
            The subdirectory for the component's webpage

        imageFileName : str
            The name of an image file (without path) that will be used as the
            thumbnail for the gallery.  Typically, this is the first image
            from the first gallery.
        """
        # Authors
        # -------
        # Xylar Asay-Davis

        self.components[name] = {'subdirectory': subdirectory,
                                 'imageFileName': imageFileName}

    def generate(self):
        """
        Generate the webpage from templates and components, and write it out to
        the HTML directory.
        """
        # Authors
        # -------
        # Xylar Asay-Davis

        runName = self.config.get('runs', 'mainRunName')

        if self.controlConfig is None:
            controlRunText = ''
        else:
            controlRunText = '<br> Control: {}'.format(
                self.controlConfig.get('runs', 'mainRunName'))

        componentsText = ''

        for componentName, componentDict in self.components.items():
            subdirectory = componentDict['subdirectory']
            imageFileName = componentDict['imageFileName']
            replacements = {'@componentDir': subdirectory,
                            '@componentName': componentName,
                            '@firstImage': imageFileName}

            # substitute entries in the template and add the component to
            # the text describing all components
            componentsText = componentsText + \
                _replace_tempate_text(self.componentTemplate, replacements)

        githash = _get_git_hash()
        if githash is None:
            githash = ''
        else:
            githash = 'Git Hash: {}'.format(githash)

        command = ' '.join(sys.argv[:])

        configsText = ''
        for configFileName in self.customConfigFiles:
            shortName = os.path.basename(configFileName)
            if len(shortName) > 30:
                shortName = shortName[0:30]

            replacements = {'@configName': os.path.basename(configFileName),
                            '@configDesc': shortName}

            configsText = configsText + \
                _replace_tempate_text(self.configTemplate, replacements)

        replacements = {'@configName': 'config.complete.{}'.format(runName),
                        '@configDesc': 'Complete Configuration File'}

        configsText = configsText + \
            _replace_tempate_text(self.configTemplate, replacements)

        replacements = {'@runName': runName,
                        '@controlRunText': controlRunText,
                        '@components': componentsText,
                        '@version': mpas_analysis.__version__,
                        '@gitHash': githash,
                        '@command': command,
                        '@configs': configsText}

        pageText = _replace_tempate_text(self.pageTemplate, replacements)

        htmlBaseDirectory = build_config_full_path(self.config, 'output',
                                                   'htmlSubdirectory')

        for subdir in ['css', 'js']:
            try:
                makedirs('{}/{}'.format(htmlBaseDirectory, subdir))
            except OSError:
                pass

        outFileName = '{}/index.html'.format(htmlBaseDirectory)

        with open(outFileName, mode='w') as mainFile:
            mainFile.write(
                pageText.encode('ascii',
                                'xmlcharrefreplace').decode('ascii'))

        # copy the css and js files as well as general images
        fileName = \
            pkg_resources.resource_filename(__name__,
                                            "templates/style.css")
        copyfile(fileName, '{}/css/style.css'.format(htmlBaseDirectory))

        fileName = \
            pkg_resources.resource_filename(__name__,
                                            "templates/index.js")
        copyfile(fileName, '{}/js/index.js'.format(htmlBaseDirectory))

        fileName = \
            pkg_resources.resource_filename(__name__,
                                            "templates/mpas_logo.png")
        copyfile(fileName, '{}/mpas_logo.png'.format(htmlBaseDirectory))

        fileName = \
            pkg_resources.resource_filename(__name__,
                                            "templates/config.png")
        copyfile(fileName, '{}/config.png'.format(htmlBaseDirectory))

        with open('{}/config.complete.{}'.format(htmlBaseDirectory,
                                                 runName), 'w') as configFile:
            self.config.write(configFile)

        for configFileName in self.customConfigFiles:
            copyfile(configFileName, '{}/{}'.format(
                htmlBaseDirectory, os.path.basename(configFileName)))