Exemplo n.º 1
0
def _get_archive_filelist(filename):
    # type: (str) -> List[str]
    """Extract the list of files from a tar or zip archive.

    Args:
        filename: name of the archive

    Returns:
        Sorted list of files in the archive, excluding './'

    Raises:
        ValueError: when the file is neither a zip nor a tar archive
        FileNotFoundError: when the provided file does not exist (for Python 3)
        IOError: when the provided file does not exist (for Python 2)
    """
    names = []  # type: List[str]
    if tarfile.is_tarfile(filename):
        with tarfile.open(filename) as tar_file:
            names = sorted(tar_file.getnames())
    elif zipfile.is_zipfile(filename):
        with zipfile.ZipFile(filename) as zip_file:
            names = sorted(zip_file.namelist())
    else:
        raise ValueError("Can not get filenames from '{!s}'. "
                         "Not a tar or zip file".format(filename))
    if "./" in names:
        names.remove("./")
    return names
Exemplo n.º 2
0
def test_format(obj, precision=6):
    tf = lambda o: test_format(o, precision)
    delimit = lambda o: ', '.join(o)
    otype = type(obj)
    if otype is str:
        return "'%s'" % obj
    elif otype is float or otype is int:
        if otype is int:
            obj = float(obj)
        fstr = '%%.%df' % precision
        return fstr % obj
    elif otype is set:
        if len(obj) == 0:
            return 'set()'
        return '{%s}' % delimit(sorted(map(tf, obj)))
    elif otype is dict:
        return '{%s}' % delimit(sorted(tf(k)+': '+tf(v) for k,v in obj.items()))
    elif otype is list:
        return '[%s]' % delimit(map(tf, obj))
    elif otype is tuple:
        return '(%s%s)' % (delimit(map(tf, obj)), ',' if len(obj) is 1 else '')
    elif otype.__name__ in ['Vec','Mat']:
        entries = tf({x:obj.f[x] for x in obj.f if obj.f[x] != 0})
        return '%s(%s, %s)' % (otype.__name__, test_format(obj.D), entries)
    else:
        return str(obj)
Exemplo n.º 3
0
def combine(**kwargs):
  """Generate combinations based on its keyword arguments.

  Two sets of returned combinations can be concatenated using +.  Their product
  can be computed using `times()`.

  Args:
    **kwargs: keyword arguments of form `option=[possibilities, ...]`
         or `option=the_only_possibility`.

  Returns:
    a list of dictionaries for each combination. Keys in the dictionaries are
    the keyword argument names.  Each key has one value - one of the
    corresponding keyword argument values.
  """
  if not kwargs:
    return [OrderedDict()]

  sort_by_key = lambda k: k[0][0]
  kwargs = OrderedDict(sorted(kwargs.items(), key=sort_by_key))
  first = list(kwargs.items())[0]

  rest = dict(list(kwargs.items())[1:])
  rest_combined = combine(**rest)

  key = first[0]
  values = first[1]
  if not isinstance(values, list):
    values = [values]

  return [
      OrderedDict(sorted(list(combined.items()) + [(key, v)], key=sort_by_key))
      for v in values
      for combined in rest_combined
  ]
Exemplo n.º 4
0
    def __init__(self, **names_vals):
        for name, val in names_vals.iteritems():
            if name.startswith(('_', 'is_valid_')):
                raise NameError('The parameter name %s is not acceptable'
                                % name)
            try:
                convert = self.__class__.params[name]
            except KeyError:
                logging.warn('The parameter %r is unknown, ignoring' % name)
                continue
            try:
                value = convert(val)
            except:
                raise ValueError('Could not convert to %s: %s=%s'
                                 % (convert.__name__, name, val))
            setattr(self, name, value)

        valids = sorted(getattr(self, valid)
                        for valid in dir(self.__class__)
                        if valid.startswith('is_valid_'))
        for is_valid in valids:
            if not is_valid():
                dump = '\n'.join('%s=%s' % (n, v)
                                 for n, v in sorted(self.__dict__.items()))
                raise ValueError(is_valid.__doc__ + 'Got:\n' + dump)
Exemplo n.º 5
0
def knapsack_unbounded_dp(items, C):
    # order by max value per item size
    items = sorted(items, key=lambda item: item[VALUE]/float(item[SIZE]), reverse=True)
 
    # Sack keeps track of max value so far as well as the count of each item in the sack
    print('!')
    sack = [(0, [0 for i in items]) for i in range(0, C+1)]   # value, [item counts]
    print('!')
    for i,item in enumerate(items): 
        name, size, value = item
        for c in range(size, C+1):
            print(sack)
            sackwithout = sack[c-size]  # previous max sack to try adding this item to
            trial = sackwithout[0] + value
            used = sackwithout[1][i]
            if sack[c][0] < trial:
                # old max sack with this added item is better
                sack[c] = (trial, sackwithout[1][:])
                sack[c][1][i] +=1   # use one more
 
    value, bagged = sack[C]
    numbagged = sum(bagged)
    size = sum(items[i][1]*n for i,n in enumerate(bagged))
    # convert to (iten, count) pairs) in name order
    bagged = sorted((items[i][NAME], n) for i,n in enumerate(bagged) if n)
 
    return value, size, numbagged, bagged
Exemplo n.º 6
0
    def print_ratings(self, names, channel, game_type):
        balance = self.plugins["balance"]

        not_cached = balance.not_cached(game_type, names)
        if not_cached:
            with balance.rlock:
                for lookup in balance.lookups:
                    for n in balance.lookups[lookup][1]:
                        if n in not_cached:
                            not_cached.remove(n)
                if not_cached:
                    balance.fetch_player_ratings(not_cached, channel, game_type)
                if (self.print_ratings, (names, channel, game_type)) not in balance.pending:
                    balance.pending.append((self.print_ratings, (names, channel, game_type)))
                return False

        teams = self.teams()
        red_sorted = sorted(teams["red"], key=lambda x: balance.cache[x.clean_name.lower()][game_type]["elo"], reverse=True)
        blue_sorted = sorted(teams["blue"], key=lambda x: balance.cache[x.clean_name.lower()][game_type]["elo"], reverse=True)
        red = "^7" + ", ".join(["{}: ^1{}^7".format(p, balance.cache[p.clean_name.lower()][game_type]["elo"]) for p in red_sorted])
        blue = "^7" + ", ".join(["{}: ^4{}^7".format(p, balance.cache[p.clean_name.lower()][game_type]["elo"]) for p in blue_sorted])

        channel.reply(red)
        channel.reply(blue)
        return True
Exemplo n.º 7
0
 def test_create_defaults(self):
     user = User("test_user")
     user.state_from_dict({"key": "1", "key2": "2"})
     user.state_public_keys = ["key2"]
     assert repr(user) == "<User:test_user, connections:0>"
     assert sorted(user.state.items()) == sorted({"key": "1", "key2": "2"}.items())
     assert user.public_state == {"key2": "2"}
Exemplo n.º 8
0
  def Get(self, user, domain=None):  # pylint: disable=g-bad-name
    """Produces the map listing page."""
    title = 'Maps for all domains'
    if domain:
      title = 'Maps for %s' % domain

    # Get ITEMS_PER_PAGE + 1 items so we know whether there is a next page.
    skip = int(self.request.get('skip', '0'))
    maps = list(itertools.islice(
        model.Map.GetViewable(user, domain), skip, skip + ITEMS_PER_PAGE + 1))
    more_items = len(maps) > ITEMS_PER_PAGE
    maps = maps[:ITEMS_PER_PAGE]

    # Attach to each Map a 'catalog_entries' attribute with a list of the
    # CatalogEntry objects that link to that Map.
    published = {}
    for entry in model.CatalogEntry.GetAll():
      published.setdefault(entry.map_id, []).append(entry)
    for m in maps:
      m.catalog_entries = sorted(
          published.get(m.id, []), key=lambda e: (e.domain, e.label))

    self.response.out.write(self.RenderTemplate('map_list.html', {
        'title': title,
        'maps': maps,
        'first': skip + 1,
        'last': skip + len(maps),
        'more_items': more_items,
        'prev_page_url':
            self.request.path_url + '?skip=%d' % max(0, skip - ITEMS_PER_PAGE),
        'next_page_url':
            self.request.path_url + '?skip=%d' % (skip + ITEMS_PER_PAGE),
        'catalog_domains': sorted(
            perms.GetAccessibleDomains(user, perms.Role.CATALOG_EDITOR))
    }))
Exemplo n.º 9
0
    def _fill_usm3d_case(self, cases, bcs, mapbc, bcmap_to_bc_name, loads):
        self.scalarBar.VisibilityOff()

        ID = 1
        if bcs is not None and self.is_centroidal:
            cases[(ID, 'Region', 1, 'centroid', '%.0f')] = bcs

            mapbc_print = defaultdict(list)
            for region, bcnum in sorted(mapbc.iteritems()):
                mapbc_print[bcnum].append(region)
                try:
                    name = bcmap_to_bc_name[bcnum]
                except KeyError:
                    name = '???'
                #self.log.info('Region=%i BC=%s name=%r' % (region, bcnum, name))

            for bcnum, regions in sorted(mapbc_print.iteritems()):
                try:
                    name = bcmap_to_bc_name[bcnum]
                except KeyError:
                    name = '???'
                self.log.info('BC=%s Regions=%s name=%r' % (bcnum, regions, name))
            self.scalarBar.VisibilityOn()

        #==============================
        ID = 2
        if self.is_nodal and len(loads):
            for key, load in loads.iteritems():
                cases[(ID, key, 1, 'nodal', '%.3f')] = load
            self.scalarBar.VisibilityOn()
        return cases
Exemplo n.º 10
0
 def isAnagram(self, s, t):
     """
     :type s: str
     :type t: str
     :rtype: bool
     """
     return "".join(sorted(s)) == "".join(sorted(t))
Exemplo n.º 11
0
def calculateSparseDictCOO(data_set, data_label_hash, jump=1, valid_flag=False):
	row = []
	col = []
	data = []
	row_valid = []
	col_valid = []
	data_valid = []

	doc_ids = set(sorted(map(lambda row:int(row[0]), data_set)))
	base_ids_list = filter(lambda ids: ids % jump == 0, doc_ids)
	train_ids = base_ids_list
	valid_ids = set()
	if valid_flag:
		valid_index = filter(lambda ids: ids % validation_perc == 0, range(len(base_ids_list)))
		valid_ids = [base_ids_list[i] for i in valid_index]
		base_ids = set(base_ids_list)
		train_ids = sorted(base_ids - set(valid_ids))

	labels = map(lambda trid: int(data_label_hash[trid]), train_ids)
	labels_valid = map(lambda vlid: int(data_label_hash[vlid]), valid_ids)
	for i in range(len(data_set)):
		if int(data_set[i][0]) in train_ids:
			row.append(int(data_set[i][0]))
			col.append(int(data_set[i][1])-1)
			data.append(int(data_set[i][2]))
			# labels.append(int(data_label_hash[int(data_set[i][0])]))
		elif int(data_set[i][0]) in valid_ids:
			row_valid.append(int(data_set[i][0]))
			col_valid.append(int(data_set[i][1])-1)
			data_valid.append(int(data_set[i][2]))
			# labels_valid.append(int(data_label_hash[int(data_set[i][0])]))

	train = translate(row), col, data, labels
	valid = translate(row_valid), col_valid, data_valid, labels_valid
	return train, valid
Exemplo n.º 12
0
def same_keys(a, b):
    def key(k):
        if isinstance(k, str):
            return (k, -1, -1, -1)
        else:
            return k
    return sorted(a.dask, key=key) == sorted(b.dask, key=key)
Exemplo n.º 13
0
    def distractors(self,count):
        results = []

        results.append(([self.initial_value, self.initial_value + self.integral_value], r'You have exchanged the roles of \(f(' + str(self.initial) + ')\) and the integral.'))

        results.append(([self.integral_value - self.initial_value, self.integral_value], r'You are subtracting \(f(' + str(self.initial) + ')\) when you should be adding it.'))

        results.append((sorted([self.integral_value, self.initial_value]), r'You should add \(f(' + str(self.initial) + ')\) to the value of the integral.'))

        true_value = n(sum(self.term_x, var('x'), self.initial, oo))

        nearby = RR(true_value).nearby_rational(max_denominator=2 * QQ(self.integral_value).denominator())
        results.append((sorted([self.integral_value, nearby]), r'You should add \(f(' + str(self.initial) + ')\) to the value of the integral.'))

        nearby = RR(true_value).nearby_rational(max_denominator=2 * QQ(self.integral_value).denominator())
        results.append((sorted([self.integral_value + self.initial_value, self.integral_value + self.initial_value*2]), r'You should add \(f(' + str(self.initial) + ')\) to the value of the integral.'))

        results = [r for r in results if not ((r[0][0] <= true_value) and (true_value <= r[0][1]))]

        real_results = []
        for r in results:
            if not any([x[0] == r[0] for x in real_results]):
                real_results.append(r)

        return real_results
Exemplo n.º 14
0
    def assertMetric(self, metric_name, value=None, tags=None, count=None,
                     at_least=1, hostname=None, device_name=None, metric_type=None):
        candidates = []
        for m_name, ts, val, mdata in self.metrics:
            if m_name == metric_name:
                if value is not None and val != value:
                    continue
                if tags is not None and sorted(tags) != sorted(mdata.get("tags", [])):
                    continue
                if hostname is not None and mdata['hostname'] != hostname:
                    continue
                if device_name is not None and mdata['device_name'] != device_name:
                    continue
                if metric_type is not None and mdata['type'] != metric_type:
                    continue

                candidates.append((m_name, ts, val, mdata))

        try:
            self._candidates_size_assert(candidates, count=count, at_least=at_least)
        except AssertionError:
            log.error("Candidates size assertion for {0} (value: {1}, tags: {2}, "
                      "count: {3}, at_least: {4}, hostname: {5}) failed"
                      .format(metric_name, value, tags, count, at_least, hostname))
            raise

        for mtuple in self.metrics:
            for cmtuple in candidates:
                if mtuple == cmtuple:
                    mtuple[3]['tested'] = True
        log.debug("{0} FOUND !".format(metric_name))
Exemplo n.º 15
0
    def assertServiceMetadata(self, meta_keys, count=None, at_least=1):
        log.debug("Looking for service metadata with keys {0}".format(meta_keys))
        if count is not None:
            log.debug(" * should be defined for exactly {0} instances".format(count))
        elif at_least is not None:
            log.debug(" * should be defined for at least {0} instances".format(at_least))

        candidates = []
        for sm in self.service_metadata:
            if sorted(sm.keys()) != sorted(meta_keys):
                continue

            candidates.append(sm)

        try:
            self._candidates_size_assert(candidates, count=count, at_least=at_least)
        except AssertionError:
            log.error("Candidates size assertion for service metadata with keys {0}"
                      " (count: {1}, at_least: {2}) failed".format(meta_keys, count, at_least))
            raise

        for sm in self.service_metadata:
            for csm in candidates:
                if sm == csm:
                    sm['tested'] = True
        log.debug("Service metadata FOUND !")
Exemplo n.º 16
0
def canonical_string(req):
    """
    Canonicalize a request to a token that can be signed.
    """
    amz_headers = {}

    buf = "%s\n%s\n%s\n" % (req.method, req.headers.get('Content-MD5', ''),
                            req.headers.get('Content-Type') or '')

    for amz_header in sorted((key.lower() for key in req.headers
                              if key.lower().startswith('x-amz-'))):
        amz_headers[amz_header] = req.headers[amz_header]

    if 'x-amz-date' in amz_headers:
        buf += "\n"
    elif 'Date' in req.headers:
        buf += "%s\n" % req.headers['Date']

    for k in sorted(key.lower() for key in amz_headers):
        buf += "%s:%s\n" % (k, amz_headers[k])

    # RAW_PATH_INFO is enabled in later version than eventlet 0.9.17.
    # When using older version, swift3 uses req.path of swob instead
    # of it.
    path = req.environ.get('RAW_PATH_INFO', req.path)
    if req.query_string:
        path += '?' + req.query_string
    if '?' in path:
        path, args = path.split('?', 1)
        for key in urlparse.parse_qs(args, keep_blank_values=True):
            if key in ('acl', 'logging', 'torrent', 'location',
                       'requestPayment', 'versioning', 'delete'):
                return "%s%s?%s" % (buf, path, key)
    return buf + path
Exemplo n.º 17
0
def extendedReportOn(fileName):
    fileFilter = open(fileName)
    file = open(os.path.join(baseDir, 'processing/filter.txt'), 'w')
    filterSet = set()
    for s in fileFilter:
        filterSet.add(s.strip())

    sortedAll = sorted(allNumbers, key=sortingFunctionOfAllNumbers)

    result = list()
    
    ''' sortedAll  is  LIST ??????'''
    for key in sortedAll:
        dossier = allNumbers[key]
        fixedNumber = dossier.number.replace('XX', 'EP', 1)
        if fixedNumber in filterSet:
            result.append(dossier)
            '''file.write(dossier.toString()+'\n')'''

    resultSorted = sorted(result, key=sortingFunctionOfExtendedReport)
    for dossier in resultSorted:
        file.write(dossier.toString()+'\n')
    
    fileFilter.close()
    file.close()
Exemplo n.º 18
0
            def insert(edit):
                if not any(l.errors for l in linters):
                    return

                filename = os.path.basename(linters[0].filename or 'untitled')
                out = '\n{}:\n'.format(filename)

                for lint in sorted(linters, key=lambda lint: lint.name):
                    if lint.errors:
                        out += '\n  {}:\n'.format(lint.name)
                        items = sorted(lint.errors.items())

                        # Get the highest line number so we know how much padding numbers need
                        highest_line = items[-1][0]
                        width = 1

                        while highest_line >= 10:
                            highest_line /= 10
                            width += 1

                        for line, messages in items:
                            for col, message in messages:
                                out += '    {:>{width}}: {}\n'.format(line + 1, message, width=width)

                output.insert(edit, output.size(), out)
Exemplo n.º 19
0
def tell(path):
    entries = []

    base = os.path.join(config["juno-base"], "root")

    path = os.path.join(base, safepath(path))

    dirpath, dirnames, filenames = os.walk(path).next()
    for i in sorted(dirnames):
        subpath = "/" + os.path.relpath(os.path.join(path, i), base)
        entries.append({ "text": i,
                         "id": subpath,
                         "can_write_parent": auth.can_write_parent(subpath[1:]),
                         "can_write": auth.can_write(subpath[1:]) })
    for i in sorted(filenames):
        if i[0] == ".": continue

        subpath = "/" + os.path.relpath(os.path.join(path, i), base)
        entries.append({ "text": i,
                         "id": subpath,
                         "can_write_parent": auth.can_write_parent(subpath[1:]),
                         "can_write": auth.can_write(subpath[1:]),
                         "leaf": "true" })

    return json.dumps(entries)
Exemplo n.º 20
0
 def testMove(self):
   co = self._get_co(None)
   self._check_move(co)
   out = subprocess2.check_output(
       ['svn', 'status'], cwd=co.project_path)
   out = sorted(out.splitlines())
   expected = sorted(
     [
       'A  +    chromeos/views/webui_menu_widget.h',
       'D       chromeos/views/DOMui_menu_widget.h',
     ])
   self.assertEquals(expected, out)
   # Make sure ancestry is what is expected;
   env = os.environ.copy()
   env['LANGUAGE'] = 'en_US.UTF-8'
   out = subprocess2.check_output(
       ['svn', 'info', 'chromeos/views/webui_menu_widget.h'],
       cwd=co.project_path,
       env=env)
   values = dict(l.split(': ', 1) for l in out.splitlines() if l)
   expected = {
     'Checksum': '65837bb3da662c8fa88a4a50940ea7c6',
     'Copied From Rev': '2',
     'Copied From URL':
         '%strunk/chromeos/views/DOMui_menu_widget.h' % self.svn_base,
     'Name': 'webui_menu_widget.h',
     'Node Kind': 'file',
     'Path': 'chromeos/views/webui_menu_widget.h',
     'Repository Root': '%s' % self.svn_base.rstrip('/'),
     'Revision': '2',
     'Schedule': 'add',
     'URL': '%strunk/chromeos/views/webui_menu_widget.h' % self.svn_base,
   }
   self.assertEquals(expected, values)
Exemplo n.º 21
0
def getPythonFunc(text):
	module_url = "https://docs.python.org/3/library/%s.html" % text

	try:
		class_link = str(urlopen(module_url).read())
	except: return

	try:
		class_link = class_link.replace("\\n", " ").replace("\\t", "").replace("\\r", "").replace("&nbsp;", " ").replace("\'", "'").replace("&quot;", "\"").replace("\\;", "").replace("\\'", "'").replace("&lt;", "<").replace("&gt;", ">").replace("&trade;", "(TM)").replace("&#8212;", "--").replace("&#8220;", "\"").replace("&#8221;", "\"").replace("\n", " ").replace("\t", "").replace("\r", "").replace("&#8217;", "'")

		value = (re.findall(r'[\w\_]+\.</code><code class="descname">(.*?)<a class=' , class_link))


		pattern = re.compile(r'\\x[\w\d]\d|<.*?>')
		# description = pattern.sub(" ", "".join(result))
		for x in value:
			y = pattern.sub("", x).replace(")", ")#").split("#")[0]
			value[value.index(x)] = y

		# print(value)
		main = []; others = [];

		for x in value:
			if x[0].islower() and x.endswith(")"):
				main.append(x)
			else:
				others.append(x)

		meth_main = "; ".join(sorted(set(main)))
		other_main = "; ".join(sorted(set(others)))

		return meth_main, other_main, module_url
	except : return
Exemplo n.º 22
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('taxonomy', help='name of taxonomy to download (%s)' % 
                        ', '.join(sorted(taxonomies.keys())))
    parser.add_argument('-o', '--output', help='path to save tree output', 
                        nargs='?', default=None)
    parser.add_argument('-f', '--format', help='tree format (%s)' %
                        ', '.join(sorted(bp._io.supported_formats.keys())),
                        nargs='?', default='newick')
    parser.add_argument('-i', '--id', help='Use NCBI ids as label',
                    action='store_true')

    args = parser.parse_args()

    if args.taxonomy == 'ALL':
        classes = [x for x in taxonomies.values() if not x is None]
        args.filename = None
    else:
        classes = [taxonomies[args.taxonomy]]
        
    for c in classes:
        taxonomy = c()
        print '** %s **' % taxonomy.name
        filename = ((args.filename if hasattr(args, 'filename') else None) 
                    or ('%s_taxonomy.%s' % (taxonomy.name, args.format)))
        taxonomy.main(filename, tree_format=args.format, ids=args.id)
Exemplo n.º 23
0
Arquivo: run.py Projeto: dbrgn/jedi
def run_related_name_test(script, correct, line_nr):
    """
    Runs tests for gotos.
    Tests look like this:
    >>> abc = 1
    >>> #< abc@1,0 abc@3,0
    >>> abc

    Return if the test was a fail or not, with 1 for fail and 0 for success.
    """
    result = script.related_names()
    correct = correct.strip()
    compare = sorted((r.module_name, r.start_pos[0], r.start_pos[1])
                                                            for r in result)
    wanted = []
    if not correct:
        positions = []
    else:
        positions = literal_eval(correct)
    for pos_tup in positions:
        if type(pos_tup[0]) == str:
            # this means that there is a module specified
            wanted.append(pos_tup)
        else:
            wanted.append(('renaming', line_nr + pos_tup[0], pos_tup[1]))

    wanted = sorted(wanted)
    if compare != wanted:
        print('Solution @%s not right, received %s, wanted %s'\
                    % (line_nr - 1, compare, wanted))
        return 1
    return 0
Exemplo n.º 24
0
def expand_view(request):
  "View for expanding a pattern into matching metric paths"
  local_only    = int( request.REQUEST.get('local', 0) )
  group_by_expr = int( request.REQUEST.get('groupByExpr', 0) )
  leaves_only   = int( request.REQUEST.get('leavesOnly', 0) )
  jsonp = request.REQUEST.get('jsonp', False)

  results = {}
  for query in request.REQUEST.getlist('query'):
    results[query] = set()
    for node in STORE.find(query, local=local_only):
      if node.is_leaf or not leaves_only:
        results[query].add( node.path )

  # Convert our results to sorted lists because sets aren't json-friendly
  if group_by_expr:
    for query, matches in results.items():
      results[query] = sorted(matches)
  else:
    results = sorted( reduce(set.union, results.values(), set()) )

  result = {
    'results' : results
  }

  response = json_response_for(request, result, jsonp=jsonp)
  response['Pragma'] = 'no-cache'
  response['Cache-Control'] = 'no-cache'
  return response
    def test_select_competitive_companies(self):

        # create two industries
        industry_id1 = ensure_id(insert_test_industry())
        industry_id2 = ensure_id(insert_test_industry())

        # create three companies
        company_id1 = ensure_id(insert_test_company(workflow_status = "published"))
        company_id2 = ensure_id(insert_test_company(workflow_status = "published"))
        company_id3 = ensure_id(insert_test_company(workflow_status = "published"))

        # add primary industries to all three companies.  Company 3 gets a different industry.
        self.main_access.mds.call_add_link("company", company_id1, 'primary_industry_classification', 'industry', industry_id1, "primary_industry", "industry_classification", self.context)
        self.main_access.mds.call_add_link("company", company_id2, 'primary_industry_classification', 'industry', industry_id1, "primary_industry", "industry_classification", self.context)
        self.main_access.mds.call_add_link("company", company_id3, 'primary_industry_classification', 'industry', industry_id2, "primary_industry", "industry_classification", self.context)

        # make industries 1 and 2 compete with each other
        link_interval = [datetime.datetime(2012, 1, 1), datetime.datetime(2013, 2, 2)]
        link_data = {"home_to_away": {"weight": .7}, "away_to_home": {"weight": .7}}
        self.main_access.mds.call_add_link("industry", industry_id1, 'competitor', 'industry', industry_id2, "competitor", "industry_competition", self.context, link_interval = link_interval,
                                           link_data = link_data)

        # query the competitions of company
        competitive_companies = select_competitive_companies(company_id1)

        # sort both the expected and real array so that the order doesn't matter
        expected_competitive_companies = sorted([
            { "_id": str(company_id1), "interval": None, "competition_strength": 1 },
            { "_id": str(company_id2), "interval": None, "competition_strength": 1 },
            { "_id": str(company_id3), "interval": [datetime.datetime(2012, 1, 1), datetime.datetime(2013, 2, 2)], "competition_strength": .7 }
        ])
        competitive_companies = sorted(competitive_companies)

        # make sure the competitions are correct
        self.test_case.assertEqual(competitive_companies, expected_competitive_companies)
Exemplo n.º 26
0
def test_onset_functions():
    # Load in all files in the same order
    ref_files = sorted(glob.glob(REF_GLOB))
    est_files = sorted(glob.glob(EST_GLOB))
    sco_files = sorted(glob.glob(SCORES_GLOB))

    assert len(ref_files) == len(est_files) == len(sco_files) > 0

    # Unit tests
    for metric in [mir_eval.onset.f_measure]:
        yield (__unit_test_onset_function, metric)
    # Regression tests
    for ref_f, est_f, sco_f in zip(ref_files, est_files, sco_files):
        with open(sco_f, 'r') as f:
            expected_scores = json.load(f)
        # Load in an example onset annotation
        reference_onsets = mir_eval.io.load_events(ref_f)
        # Load in an example onset tracker output
        estimated_onsets = mir_eval.io.load_events(est_f)
        # Compute scores
        scores = mir_eval.onset.evaluate(reference_onsets, estimated_onsets)
        # Compare them
        for metric in scores:
            # This is a simple hack to make nosetest's messages more useful
            yield (__check_score, sco_f, metric, scores[metric],
                   expected_scores[metric])
Exemplo n.º 27
0
def test_add_group(app, db, json_groups):
    group = json_groups
    old_groups = db.get_group_list()
    app.group.create(group)
    new_groups = db.get_group_list()
    old_groups.append(group)
    assert sorted(old_groups, key=group.id_or_max) == sorted(new_groups, key=group.id_or_max)
Exemplo n.º 28
0
  def testIndexCreate(self):
    with aff4.FACTORY.Create("aff4:/sequential_collection/testIndexCreate",
                             TestIndexedSequentialCollection,
                             token=self.token) as collection:
      for i in range(10 * 1024):
        collection.Add(rdfvalue.RDFInteger(i))

      # It is too soon to build an index, check that we don't.
      self.assertEqual(collection._index, None)
      self.assertEqual(collection.CalculateLength(), 10 * 1024)
      self.assertEqual(sorted(collection._index.keys()), [0])

      # Push the clock forward 10m, and we should build an index on access.
      with test_lib.FakeTime(rdfvalue.RDFDatetime().Now() + rdfvalue.Duration(
          "10m")):
        # Read from start doesn't rebuild index (lazy rebuild)
        _ = collection[0]
        self.assertEqual(sorted(collection._index.keys()), [0])

        self.assertEqual(collection.CalculateLength(), 10 * 1024)
        self.assertEqual(
            sorted(collection._index.keys()), [0, 1024, 2048, 3072, 4096, 5120,
                                               6144, 7168, 8192, 9216])

    # Now check that the index was persisted to aff4 by re-opening and checking
    # that a read from head does load full index (optimistic load):

    with aff4.FACTORY.Create("aff4:/sequential_collection/testIndexCreate",
                             TestIndexedSequentialCollection,
                             token=self.token) as collection:
      self.assertEqual(collection._index, None)
      _ = collection[0]
      self.assertEqual(
          sorted(collection._index.keys()), [0, 1024, 2048, 3072, 4096, 5120,
                                             6144, 7168, 8192, 9216])
Exemplo n.º 29
0
    def testFull(self, num_best=None, shardsize=100):
        if self.cls == similarities.Similarity:
            index = self.cls(None, corpus, num_features=len(dictionary), shardsize=shardsize)
        else:
            index = self.cls(corpus, num_features=len(dictionary))
        if isinstance(index, similarities.MatrixSimilarity):
            expected = numpy.array([
                [0.57735026, 0.57735026, 0.57735026, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
                [0.0, 0.40824831, 0.0, 0.40824831, 0.40824831, 0.40824831, 0.40824831, 0.40824831, 0.0, 0.0, 0.0, 0.0],
                [0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.5, 0.0, 0.0, 0.0],
                [0.0, 0.0, 0.40824831, 0.0, 0.0, 0.0, 0.81649661, 0.0, 0.40824831, 0.0, 0.0, 0.0],
                [0.0, 0.0, 0.0, 0.57735026, 0.57735026, 0.0, 0.0, 0.57735026, 0.0, 0.0, 0.0, 0.0],
                [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1., 0.0, 0.0],
                [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.70710677, 0.70710677, 0.0],
                [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.57735026, 0.57735026, 0.57735026],
                [0.0, 0.0, 0.0, 0.0, 0.0, 0.57735026, 0.0, 0.0, 0.0, 0.0, 0.57735026, 0.57735026],
                ], dtype=numpy.float32)
            # HACK: dictionary can be in different order, so compare in sorted order
            self.assertTrue(numpy.allclose(sorted(expected.flat), sorted(index.index.flat)))
        index.num_best = num_best
        query = corpus[0]
        sims = index[query]
        expected = [(0, 0.99999994), (2, 0.28867513), (3, 0.23570226), (1, 0.23570226)][ : num_best]

        # convert sims to full numpy arrays, so we can use allclose() and ignore
        # ordering of items with the same similarity value
        expected = matutils.sparse2full(expected, len(index))
        if num_best is not None: # when num_best is None, sims is already a numpy array
            sims = matutils.sparse2full(sims, len(index))
        self.assertTrue(numpy.allclose(expected, sims))
        if self.cls == similarities.Similarity:
            index.destroy()
Exemplo n.º 30
0
def cleanupFiles():
    # First get rid of modified files
    for l in ["l1", "l2", "l3"]:
        arcpy.Delete_management(l)

    for f in glob.glob("C:\\Arctmp\\*"):
        try:
            shutil.rmtree(f)
        except:
            print "UNABLE TO REMOVE:", f
    # Now remove the old directory
    for i in xrange(0, 1000000):
        new_workspace = "C:\\Arctmp\\workspace." + str(i)
        if not os.path.exists(new_workspace):
            break
    print "TESTING USING WORKSPACE", new_workspace
    # Now move in fresh copies
    shutil.copytree("C:\\Arcbase", new_workspace)
    print "CONTENTS:"
    arcpy.env.workspace = new_workspace
    for f in sorted(glob.glob(arcpy.env.workspace + "\\*.shp")):
        print f
    for f in sorted(glob.glob(arcpy.env.workspace + "\\*.lyr")):
        print f
    for f in sorted(glob.glob(arcpy.env.workspace + "\\*.gdb")):
        print f
Exemplo n.º 31
0
def run_data_analysis(job, config, tcc_matrix_id, pwise_dist_l1_id,
                      nonzero_ec_id, kallisto_matrix_id, matrix_tsv_id,
                      matrix_cells_id):
    """
    Generates graphs and plots of results.  Uploads images to savedir location.
    :param job: toil job
    :param config: toil job configuration
    :param tcc_matrix_id: jobstore location of TCC matrix (.dat)
    :param pwise_dist_l1_id: jobstore location of L1 pairwise distance (.dat)
    :param nonzero_ec_id: jobstore loation of nonzero ec (.dat)
    :param kallisto_matrix_id: id of kallisto output matrix (.ec)
    :param matrix_tsv_id: id of kallisto output matrix (.tsv)
    :param matrix_cells_id: id of kallisto output matrix (.cells)
    """
    # source: https://github.com/pachterlab/scRNA-Seq-TCC-prep (/blob/master/notebooks/10xResults.ipynb)
    # extract output
    job.fileStore.logToMaster('Performing data analysis')
    # read files
    work_dir = job.fileStore.getLocalTempDir()
    tcc_matrix = job.fileStore.readGlobalFile(
        tcc_matrix_id, os.path.join(work_dir, "TCC_matrix.dat"))
    pwise_dist_l1 = job.fileStore.readGlobalFile(
        pwise_dist_l1_id, os.path.join(work_dir, "pwise_dist_L1.dat"))
    nonzero_ec = job.fileStore.readGlobalFile(
        nonzero_ec_id, os.path.join(work_dir, "nonzero_ec.dat"))
    kallisto_matrix = job.fileStore.readGlobalFile(
        kallisto_matrix_id, os.path.join(work_dir, 'kallisto_matrix.ec'))
    matrix_tsv = job.fileStore.readGlobalFile(
        matrix_tsv_id, os.path.join(work_dir, MATRIX_TSV_FILENAME))
    matrix_cells = job.fileStore.readGlobalFile(
        matrix_cells_id, os.path.join(work_dir, MATRIX_CELLS_FILENAME))
    ##############################################################
    # load dataset
    with open(os.path.join(work_dir, "TCC_matrix.dat"), 'rb') as f:
        tcc_matrix = pickle.load(f)
    with open(os.path.join(work_dir, "pwise_dist_L1.dat"), 'rb') as f:
        pwise_dist_l1 = pickle.load(f)
    with open(os.path.join(work_dir, "nonzero_ec.dat"), 'rb') as f:
        nonzero_ec = pickle.load(f)

    ecfile_dir = os.path.join(work_dir, 'kallisto_matrix.ec')
    eclist = np.loadtxt(ecfile_dir, dtype=str)

    tcc = tcc_matrix.T
    T_norm = normalize(tcc_matrix, norm='l1', axis=0)
    t_normt = T_norm.transpose()

    num_of_cells = np.shape(tcc_matrix)[1]
    print("NUM_OF_CELLS =", num_of_cells)
    print("NUM_OF_nonzero_EC =", np.shape(tcc_matrix)[0])

    #################################

    EC_dict = {}
    for i in range(np.shape(eclist)[0]):
        EC_dict[i] = [int(x) for x in eclist[i, 1].split(',')]

    union = set()
    for i in nonzero_ec:
        new = [tx for tx in EC_dict[i]
               if tx not in union]  # filter out previously seen transcripts
        union.update(new)
    NUM_OF_TX_inTCC = len(union)
    print("NUM_OF_Transcripts =", NUM_OF_TX_inTCC
          )  # number of distinct transcripts in nonzero eq. classes

    ##############################################################
    # inspect

    # sort eq. classes based on size
    size_of_ec = [len(EC_dict[i]) for i in nonzero_ec]
    ec_idx = [i[0] for i in sorted(enumerate(size_of_ec), key=lambda x: x[1])]
    index_ec = np.array(ec_idx)

    ec_sort_map = {}
    nonzero_ec_srt = []  # init
    for i in range(len(nonzero_ec)):
        nonzero_ec_srt += [nonzero_ec[index_ec[i]]]
        ec_sort_map[nonzero_ec[index_ec[i]]] = i

    sumi = np.array(tcc_matrix.sum(axis=1))
    sumi_sorted = sumi[index_ec]
    total_num_of_umis = int(sumi_sorted.sum())
    total_num_of_umis_per_cell = np.array(tcc_matrix.sum(axis=0))[0, :]

    print("Total number of UMIs =", total_num_of_umis)

    #################################

    fig, ax1 = plt.subplots()
    ax1.plot(sorted(total_num_of_umis_per_cell)[::-1], 'b-', linewidth=2.0)
    ax1.set_title('UMI counts per cell')
    ax1.set_xlabel('cells (sorted by UMI counts)')
    ax1.set_ylabel('UMI counts')
    ax1.set_yscale("log", nonposy='clip')
    ax1.grid(True)
    ax1.grid(True, 'minor')
    umi_counts_per_cell = os.path.join(work_dir, "UMI_counts_per_cell.png")
    plt.savefig(umi_counts_per_cell, format='png')

    fig, ax1 = plt.subplots()
    ax1.plot(sorted(sumi.reshape(np.shape(sumi)[0]))[::-1],
             'r-',
             linewidth=2.0)
    ax1.set_title('UMI counts per eq. class')
    ax1.set_xlabel('ECs (sorted by UMI counts)')
    ax1.set_ylabel('UMI counts')
    ax1.set_yscale("log", nonposy='clip')
    ax1.grid(True)
    ax1.grid(True, 'minor')
    umi_counts_per_class = os.path.join(work_dir, "UMI_counts_per_class.png")
    plt.savefig(umi_counts_per_class, format='png')

    cell_nonzeros = np.array(((T_norm != 0)).sum(axis=0))[0]

    fig, ax1 = plt.subplots()
    ax1.plot(total_num_of_umis_per_cell, cell_nonzeros, '.g', linewidth=2.0)
    ax1.set_title('UMI counts vs nonzero ECs')
    ax1.set_xlabel('total num of umis per cell')
    ax1.set_ylabel('total num of nonzero ecs per cell')
    ax1.set_yscale("log", nonposy='clip')
    ax1.set_xscale("log", nonposy='clip')
    ax1.grid(True)
    ax1.grid(True, 'minor')
    umi_counts_vs_nonzero_ecs = os.path.join(work_dir,
                                             "UMI_counts_vs_nonzero_ECs.png")
    plt.savefig(umi_counts_vs_nonzero_ecs, format='png')

    # TCC MEAN-VARIANCE
    #todo verify this works
    TCC_var = np.var(tcc.todense(), axis=0)
    TCC_mean = np.mean(tcc.todense(), axis=0)
    TCC_mean = np.array(TCC_mean)[0]
    TCC_var = np.array(TCC_var)[0]
    fig = plt.figure()
    N = tcc.sum()
    C = tcc.shape[0]
    ax = plt.gca()
    ax.plot(TCC_mean,
            TCC_var,
            '.',
            c='blue',
            alpha=0.5,
            markeredgecolor='none')
    xlims = [0.0001, 10 * TCC_mean.max()]
    ax.set_xlim(xlims)
    ax.set_ylim([0.0001, 10 * TCC_var.max()])
    ax.set_yscale('symlog')
    ax.set_xscale('symlog')
    ax.plot(xlims, [(C - 1) * (xlims[0])**2, (C - 1) * (xlims[1])**2],
            color='g',
            linestyle='-',
            linewidth=2)
    ax.plot(xlims, [(xlims[0]), (xlims[1])],
            color='k',
            linestyle='--',
            linewidth=1)
    ax.set_title("TCC Mean-Variance [" + str(tcc.shape[1]) + " TCCs in " +
                 str(C) + " Cells]")
    ax.set_xlabel("mean(TCC)")
    ax.set_ylabel("var(TCC)")
    tcc_mean_variance = os.path.join(work_dir, "TCC_mean_variance.png")
    plt.savefig(tcc_mean_variance, format='png')

    ##############################################################
    # clustering

    #################################
    # t-SNE
    x_tsne = tSNE_pairwise(2, pwise_dist_l1)

    #################################
    # spectral clustering
    n_clusters = config.n_clusters
    similarity_mat = pwise_dist_l1.max() - pwise_dist_l1
    labels_spectral = spectral(n_clusters, similarity_mat)

    spectral_clustering = stain_plot(x_tsne,
                                     labels_spectral, [],
                                     "TCC -- tSNE, spectral clustering with " +
                                     str(n_clusters) + " n_clusters",
                                     work_dir=work_dir,
                                     filename="spectral_clustering_tSNE")

    #################################
    # affinity propagation
    pref = -np.median(pwise_dist_l1) * np.ones(num_of_cells)
    labels_aff = AffinityProp(-pwise_dist_l1, pref, 0.5)
    np.unique(labels_aff)

    affinity_propagation_tsne = stain_plot(
        x_tsne, labels_aff, [], "TCC -- tSNE, affinity propagation", work_dir,
        "affinity_propagation_tSNE")

    #################################
    # pca
    pca = PCA(n_components=2)
    x_pca = pca.fit_transform(t_normt.todense())

    affinity_propagation_pca = stain_plot(x_pca, labels_aff, [],
                                          "TCC -- PCA, affinity propagation",
                                          work_dir, "affinity_propagation_PCA")

    # SC3
    outfilePath = job.fileStore.getLocalTempFile()
    SC3OutputPath = os.path.join(work_dir, SC3_OUTPUT_DIRECTORY)
    os.mkdir(SC3OutputPath)
    shouldUseSC3Output = True
    with open(outfilePath, "r+") as outfile:

        def dockerPathTo(resource):
            return os.path.join(DOCKER_WORK_DIR, resource)

        def boolForR(aBool):
            return "TRUE" if aBool else "FALSE"

        try:
            dockerCall(job,
                       tool='rscript',
                       workDir=work_dir,
                       parameters=map(str, [
                           config.min_k, config.max_k,
                           dockerPathTo(MATRIX_TSV_FILENAME),
                           dockerPathTo(MATRIX_CELLS_FILENAME),
                           dockerPathTo(SC3_OUTPUT_DIRECTORY),
                           boolForR(config.use_estimated_k),
                           boolForR(config.debug)
                       ]),
                       outfile=outfile)
            pass
        except CalledProcessError:
            outfile.seek(0, 0)
            job.fileStore.logToMaster(
                "Docker failed with the following log:  " +
                str(outfile.read()))
            shouldUseSC3Output = False
    # build tarfile of output plots
    output_files = [
        umi_counts_per_cell, umi_counts_per_class, umi_counts_vs_nonzero_ecs,
        tcc_mean_variance, spectral_clustering, affinity_propagation_tsne,
        affinity_propagation_pca, outfilePath
    ] + ([
        os.path.join(work_dir, SC3_OUTPUT_DIRECTORY, x)
        for x in os.listdir(SC3OutputPath)
    ] if shouldUseSC3Output else [])
    tarball_files(tar_name='single_cell_plots.tar.gz',
                  file_paths=output_files,
                  output_dir=work_dir)
    # return file id for consolidation
    return job.fileStore.writeGlobalFile(
        os.path.join(work_dir, 'single_cell_plots.tar.gz'))
Exemplo n.º 32
0
 def handler(self, connection):
     try:
         # Create a hello message
         connmark = connection.connmark
         hello = common.ofp_hello.new()
         hello.header.version = max(self.allowedversions)
         versionbitmap = common.ofp_hello_elem_versionbitmap.new()
         versionStart = 0
         thisBitmap = 0
         for v in sorted(self.allowedversions):
             while v > versionStart + 32:
                 versionbitmap.bitmaps.append(thisBitmap)
                 thisBitmap = 0
                 versionStart += 32
             thisBitmap = thisBitmap | (1 << (v - versionStart))
         versionbitmap.bitmaps.append(thisBitmap)
         hello.elements.append(versionbitmap)
         write = self.formatrequest(hello, connection)
         for m in connection.write(write, False):
             yield m
         # Wait for a hello
         hellomatcher = OpenflowPresetupMessageEvent.createMatcher(
             connection=connection)
         for m in connection.waitWithTimeout(self.hellotimeout,
                                             hellomatcher):
             yield m
         if connection.timeout:
             # Drop the connection
             raise OpenflowProtocolException(
                 'Did not receive hello message before timeout')
         else:
             msg = connection.event.message
             if msg.header.type != common.OFPT_HELLO:
                 raise OpenflowProtocolException(
                     'The first packet on this connection is not OFPT_HELLO'
                 )
             else:
                 helloversion = None
                 usebitmap = False
                 for e in msg.elements:
                     if e.type == OFPHET_VERSIONBITMAP:
                         # There is a bitmap
                         for v in reversed(sorted(self.allowedversions)):
                             bitmapIndex = v // 32
                             bitmapPos = (v & 31)
                             if len(e.bitmaps) < bitmapIndex:
                                 continue
                             if e.bitmaps[bitmapIndex] & (1 << bitmapPos):
                                 helloversion = v
                                 break
                         usebitmap = True
                         break
                 if not usebitmap:
                     helloversion = min(max(self.allowedversions),
                                        msg.header.version)
                 if helloversion is None or helloversion not in self.allowedversions:
                     self._logger.warning(
                         'Remote switch openflow protocol version is not compatible. Their hello message: %r, we expect version: %r. Connection = %r',
                         common.dump(msg), self.allowedversions, connection)
                     # Hello fail
                     hellofail = common.ofp_error_msg.new()
                     hellofail.header.version = max(self.allowedversions)
                     hellofail.type = common.OFPET_HELLO_FAILED
                     hellofail.code = common.OFPHFC_INCOMPATIBLE
                     if helloversion is None:
                         hellofail.data = b'A common version is not found from the bitmap\x00'
                     else:
                         hellofail.data = (
                             'Openflow version is not supported\x00' %
                             (common.ofp_version.getName(
                                 helloversion,
                                 str(helloversion)), )).encode()
                     write = self.formatreply(hellofail, msg, connection)
                     for m in connection.write(write):
                         yield m
                     for m in connection.reset(False, connmark):
                         yield m
                     raise GeneratorExit
                 else:
                     # Still we may receive a hello fail from the other side, we should expect that.
                     # The error message may come before feature request is sent.
                     err_matcher = OpenflowPresetupMessageEvent.createMatcher(
                         connection=connection, type=common.OFPT_ERROR)
                     # Send a feature request message
                     connection.openflowversion = helloversion
                     currdef = definations[helloversion]
                     connection.openflowdef = currdef
                     # Feature request message has no body
                     featurereq = currdef.ofp_msg.new()
                     featurereq.header.type = currdef.OFPT_FEATURES_REQUEST
                     write = self.formatrequest(featurereq, connection)
                     try:
                         for m in connection.withException(
                                 connection.write(write, False),
                                 err_matcher):
                             yield m
                         featurereply_matcher = OpenflowPresetupMessageEvent.createMatcher(
                             connection=connection,
                             type=currdef.OFPT_FEATURES_REPLY)
                         for m in connection.waitWithTimeout(
                                 self.featurerequesttimeout,
                                 featurereply_matcher, err_matcher):
                             yield m
                         if connection.timeout:
                             raise OpenflowProtocolException(
                                 'Remote switch did not response to feature request.'
                             )
                         elif connection.matcher is err_matcher:
                             self._logger.warning(
                                 'Error while request feature: %r Connection = %r',
                                 connection.event.message, connection)
                             raise OpenflowProtocolException(
                                 'Error while request feature: %r' %
                                 (connection.event.message, ))
                         else:
                             msg = connection.event.message
                             connection.openflow_featuresreply = msg
                             connection.openflow_datapathid = msg.datapath_id
                             connection.openflow_auxiliaryid = getattr(
                                 msg, 'auxiliary_id', 0)
                             connection.openflow_capabilities = msg.capabilities
                             connection.openflow_n_buffers = msg.n_buffers
                             connection.openflow_n_tables = msg.n_tables
                             statechange = OpenflowConnectionStateEvent(
                                 connection.openflow_datapathid,
                                 connection.openflow_auxiliaryid,
                                 OpenflowConnectionStateEvent.
                                 CONNECTION_SETUP, connection,
                                 connection.connmark, self)
                             for m in connection.waitForSend(statechange):
                                 yield m
                             for msg in connection.openflow_msgbuffer:
                                 e = self._createevent(connection, msg)
                                 if e is not None:
                                     for m in connection.waitForSend(e):
                                         yield m
                     except RoutineException as exc:
                         self._logger.warning(
                             'Remote report hello fail: %r Connection = %r',
                             common.dump(exc.arguments[1].message),
                             connection)
                         for m in connection.reset(True, connmark):
                             yield m
                         raise GeneratorExit
     except QuitException:
         pass
     except GeneratorExit:
         pass
     except:
         self._logger.exception(
             'Unexpected exception on processing openflow protocols, Connection = %r',
             connection)
         for m in connection.reset(True, connmark):
             yield m
Exemplo n.º 33
0
 X[0][:] = nodes
 X[1][:] = RMS
 print(str(len(X[0])) + ' ' + str(len(X[1])))
 print(X[0][0:100])
 print(X[1][0:100])
 Y = np.zeros((2, len(nodes)))
 Y[0][:] = nodes
 Y[1][:] = RMS
 print(str(len(Y[0])) + ' ' + str(len(Y[1])))
 print(Y[0][0:100])
 print(Y[1][0:100])
 Z = np.zeros((2, len(nodes)))
 Z[0][:] = degrees
 Z[1][:] = nodes
 print(str(len(Z[0])) + ' ' + str(len(Z[1])))
 print(Z[0][0:100])
 print(Z[1][0:100])
 data = sorted(data, key=lambda x: (x[0]))
 # list(map(print, data))
 # degree_function = config["reservoir"]["degree_function"].split(':')[1]
 end = len(nodes)
 # plt.text(2000, 1.5, degree_function, size=10, ha="center")
 fig = plt.figure()
 ax = fig.add_subplot(111, projection='3d')
 # plt.title('D = ' + degree_function)
 ax.scatter(nodes, degrees, RMS, c='b')
 # plt.plot(nodes[0:end], RMS[0:end])
 plt.xlabel('Nodes')
 plt.ylabel('Degree')
 plt.tight_layout()
 plt.show()
Exemplo n.º 34
0
list_of_place = ['hangzhou','shanghai','meizhou','xian','beijing']
#1
print(list_of_place)
print(sorted(list_of_place))
print(list_of_place)

print(sorted(list_of_place,reverse = True))
print(list_of_place)

list_of_place.reverse()
print(list_of_place)

list_of_place.sort()
print(list_of_place)

list_of_place.sort(reverse = True)
print(list_of_place)
Exemplo n.º 35
0
def handle(msg):
    content_type, chat_type, chat_id = telepot.glance(msg)

    file_id = ""
    chat_id = msg['chat']['id']
    command = ""
    user = ""

    if (content_type == "text"):
        chat_id = msg['chat']['id']
        command = msg['text']
        print("in text mode")
        user = msg['from']['first_name']
        print("Got command: %s" % command)

        if command == 'time':
            bot.sendMessage(chat_id, str(datetime.datetime.now()))

        #determining reply message based on the hourof day
        elif command == 'Hi' or command == 'hi' or command == 'HI' or command == 'hI':  #Hi Query
            replyMessage = "Hi " + user + " "
            greeting = "It is sleeping time you still awake"
            hour = int(
                datetime.datetime.strftime(datetime.datetime.now(), '%H'))
            #print(hour)
            if (hour >= 4 and hour < 12):
                greeting = "Good Morning"
            elif (hour >= 12 and hour < 16):
                greeting = "Good Afternoon"
            elif (hour >= 16 and hour < 20):
                greeting = "Good Evening"

            replyMessage = replyMessage + greeting
            bot.sendMessage(chat_id, replyMessage)

        #gives various details of raspberry pi
        elif command.lower() == "How are you".lower():  #Health Query
            print("In Health Query")
            cpu_temparature = get_cpu_temparature()
            cpu_usage = psutil.cpu_percent()
            ram = psutil.virtual_memory()
            ram_total = ram.total / 2**20  # MiB.
            ram_used = ram.used / 2**20
            ram_free = ram.free / 2**20
            ram_percent_used = ram.percent

            disk = psutil.disk_usage('/')
            disk_total = disk.total / 2**30  # GiB.
            disk_used = disk.used / 2**30
            disk_free = disk.free / 2**30
            disk_percent_used = disk.percent

            message = "I am doing as \nCPU Temparature " + str(
                cpu_temparature) + "C \nCPU Usage " + str(
                    cpu_usage) + " \nRam Percent Used " + str(
                        ram_percent_used) + " \nFree Disk Space " + str(
                            disk_free) + "Gb"
            bot.sendMessage(chat_id, message)

        #sends the local ip address and the wifi name to which it is connected to
        elif command.lower() == "Where are you".lower():
            print("telling where am I")
            s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
            s.connect((ConfigParams.google_domain, 80))
            ipaddr = s.getsockname()[0]
            wifi = Wireless('wlan0')
            wifiname = wifi.getEssid()

            message = "I am connected on " + ipaddr + " \nto WiFi " + wifiname
            bot.sendMessage(chat_id, message)
        elif command.lower() == "coming up cricket":
            print("fetching upcoming matches")

        #if below command is cricket then it will fetch scrores from cricbuzz page
        elif command.lower() == "cricket".lower():
            print("Fetching Cricket Scores ...")
            page = requests.get(ConfigParams.crc_buzz_url)
            tree = html.fromstring(page.content)
            #searching for required data
            allscoreslist = tree.xpath(ConfigParams.cric_buzz_path)
            allscores = []
            #for loop used to remove duplicate values may override actual existing values some time
            #todo
            for score in allscoreslist:
                if score not in allscores:
                    allscores.append(score)
            message = ""
            teamscores = []
            #formatting data received for readability
            for score in allscoreslist:
                if score[0].isdigit():
                    message = message + (score + "\n")
                else:
                    if len(score) > 6:
                        score = score + "\n"
                        message = message + score
                        message = message + "**************" + "\n"
                        if message not in teamscores:
                            teamscores.append(message)
                            message = ""
                        else:
                            print("Met matching values")
                            message = ""
                    else:
                        message = message + (score + "\t")

            bot.sendMessage(chat_id, "".join(teamscores))

        #used for downloading files uploaded to this bot
        elif command.lower().find("download") != -1:

            if command.split(".")[1] == "jpg" or command.split(
                    ".")[1] == "jpeg" or command.split(".")[1] == "png":
                try:
                    filename = '/home/pi/Scripts/photos/' + command.split(
                        " ")[1]
                    document = open(r'/home/pi/Scripts/photos/' +
                                    command.split(" ")[1])
                except IOError:
                    bot.sendMessage(chat_id, "File not found")
            else:
                try:
                    filename = '/home/pi/Scripts/documents/' + command.split(
                        " ")[1]
                    document = open(r'/home/pi/Scripts/documents/' +
                                    command.split(" ")[1])
                except IOError:
                    bot.sendMessage(chat_id, "File not found")

            bot.sendDocument(chat_id, document)

        #if message contains stocks key word then it tries to fetch company and sends to get nse stock code to get current price
        elif command.lower().split(":")[0] == "stocks":
            #variable for storing variable name
            company = command.split(":")[1]
            nse = Nse()
            all_codes = readCodesFile('StockCodes.csv', company)
            if bool(all_codes):
                codes = sorted(all_codes.keys())
                message = " "
                for code in codes:
                    message = message + code + " : " + str(
                        nse.get_quote(all_codes[code])['lastPrice']) + "\n"
            else:
                message = "Stock not found"
            bot.sendMessage(chat_id, message)
        else:
            message = "My Boss asked me to stay silent rather giving false information"
            bot.sendMessage(chat_id, message)

    #if user sent message is of photo or video or document then below code is used to store it on raspberry pi and download later
    elif (content_type == "document" or content_type == "photo"
          or content_type == "video"):
        if content_type == "document":
            file_id = msg['document']['file_id']

            file_name = msg['document']['file_name']

        elif content_type == "photo":
            file_id = msg['photo'][-1]['file_id']

        elif content_type == "video":
            file_id = msg['video']['file_id']

        bot.getUpdates()
        filereceived = bot.getFile(file_id)

        filepath = filereceived['file_path']

        file_name, file_extension = os.path.splitext(filepath)

        if content_type == "document":
            bot.download_file(file_id,
                              "/home/pi/Scripts/" + file_name + file_extension)
            bot.sendMessage(chat_id,
                            "Received and stored your file " + file_name)
        elif content_type == "photo":
            bot.download_file(file_id,
                              "/home/pi/Scripts/" + file_name + file_extension)
            bot.sendMessage(chat_id,
                            "Received and stored your photo " + file_name)
        elif content_type == "video":
            bot.download_file(file_id,
                              "/home/pi/Scripts/" + file_name + file_extension)
            bot.sendMessage(chat_id,
                            "Received and stored your video " + file_name)

    #if user sent message is location then below code is executed
    elif content_type == 'location':
        location = msg['location']

        lat = location['latitude']
        lon = location['longitude']

        owm = pyowm.OWM(ConfigParams.open_weather_key)
        observation = owm.weather_at_coords(lat, lon)
        weather = observation.get_weather()
        location = observation.get_location()

        gmaps = googlemaps.Client(key=ConfigParams.google_key)
        geo_loc = str(lat), str(lon)
        g = geocoder.google(geo_loc, method='reverse')

        message = "***Weather&Location Statistics***"
        message = message + "\nCity : " + location.get_name(
        ) + "\nState : " + g.state + "\nPostalCode : " + g.postal + "\nTemp Max : " + str(
            weather.get_temperature('celsius')
            ['temp_max']) + "\nTemp Min : " + str(
                weather.get_temperature('celsius')
                ['temp_min']) + " \nStatus : " + weather.get_detailed_status(
                ) + "\nSunRise : " + weather.get_sunrise_time('iso')
        message = message + "\nSunSetTime : " + weather.get_sunset_time(
            'iso') + "\n"

        bot.sendMessage(chat_id, message)
Exemplo n.º 36
0
def getCommentCategory(text):
    category_text = "cat"
    #0
    category_thank = [
        "grac", "graz", "ringraz", "grrrazzz", "thank", "thnks", "thak",
        "thanks", "much", "tnx", "merci"
    ]
    #category_thank = ["grac", "graz","thank"]
    #1
    category_congratulation = [
        "augur", "complean", " felic", " tanti", "congrat"
    ]
    #category_congratulation = ["augur", "complean"," felic", " tanti ","congrat"]
    #2
    category_agreement = [
        "cert", "concordi", "convenir", "accord", " si ", "esattamente",
        " vero", "conferm", "anch'io", "sii", "anche io", "d'accordissimo"
        "agree", "certain", " ok ", " right", " sure", " yes", "of course",
        "true", "me too", "yeah"
    ]
    #category_agreement = ["cert", "concordi", "convenir", "accord", " si ", "esattamente", "conferm",
    #"agree", "certain", " ok ", " right", " sure", " yes", "of course", "true"]
    #3
    category_positive = [
        "amar", "amor", "bac", "bacion", "bei", "bel", "ben", "bont",
        "bravissim", "buon", "carin", "carissm", "compl", "cuor", "dolc",
        "fantas", "favol", "felic", "gentil", "grand", "happi", "meravigl",
        "merce", "miglior", "molt", "onor", "ottim", "perfett", "piac",
        "riusc grand", "spettacol", "splendid", "stup", "tesor", "ador",
        "gioia", "preferit", "geni", "unic", "simpatic", "gosto", "bom dia",
        "buen", "bom", "forte", "soddisfaz", "prego", "paradis", "megli",
        "oddio", "divin", "rider", "magia", "bona", "amo", "arte", "valeva",
        "sognante", "onirica", "boni", "dio", "vorrei", "fata", "unic",
        "notevole", "sogno", "eccellent", "cara", "meritat", "magna", "buoono",
        "whoop", "romantic", "viva", "forza", "mamma mia", "capolavor",
        "perfect", "braavaa", "interessant", "wee", "caro", "apprezzi", "hope",
        "abaw", "luminos", "bone", "important", "perfeit", "maravi", "precios",
        "lind", "eleganz", "cuti", "fatt", "fiori", "eco", "belle", "touchant",
        "magnifique", "tesoro", "adoro", "amaz", "awesom", "beaut", "brav",
        "cool", "cute", "darling", "dear", "enjoy", "excellent", "fabulous",
        "good", "gorgeous", "great", "kind", "like", "lov", "magnif", "nice",
        "prett", "spectacul", "super", "sweet", "well", "wonder", "wow",
        "interest", "favorite", "special", "unique", "brilliant", "best",
        "big", "satisfa", "better", "welcome", "anytime", "fav", "heaven",
        "wish", "god", "artist", "laugh", "aha", "hehe", "you too", "magic",
        "win", "a l o h a", "happy", "worth", "dream", "fan", "merry",
        "christmas", "xmas", "yee", "top", "wow", "woow", "yup", "mmm", "care",
        "fairy", "remark", "strong", "excellent", "deserve", "masterpiece",
        "clean", "flawless", "lob", "miss", "need", "niccee", "jaja", "aww",
        "f4f", "muah", "thumb", "cheer", "sexy", "ohh", "ohmamma", "coool",
        "yee", "tempt", "stunn", "ahh", "heart", "mm", "any time", "whao",
        "lol", "lool", "hum", "woah", "wau", "beaitiful", "uau", "yeeh",
        "ahmaz", "ehe", "euu", "aww", "ooh", "epic", "bono", "<3", "keren",
        "kereen", ";)", ":)", ":-)", ":d", "(:", ":p", ":-d", ":*", ";-)",
        "-)", "xd", "xx"
    ]
    #category_positive = ["amar", "amor", "bac", "bacion", "bei", "bel", "ben", "bont", "bravissim",
    #"buon", "carin", "carissm", "compl", "cuor", "dolc", "fantas", "favol", "felic",
    #"gentil", "grand", "happi", "meravigl", "merce", "miglior", "molt", "onor", "ottim",
    #"perfett", "piac", "riusc grand", "spettacol", "splendid", "stup", "tesor", "ador",
    #"gioia", "preferit", "geni", "unic", "simpatic", "gosto", "bom dia", "buen",
    #"bom", "forte", "soddisfaz", "prego", "paradis", "megli", "oddio", "divin", "rider",
    #"magia", "bona", "amo", "arte", "valeva", "sognante", "onirica", "boni", "dio",
    #"vorrei", "fata", "unic", "notevole", "sogno", "eccellent", "cara", "meritat",
    #"magna", "buoono", "whoop", "romantic", "viva",
    #"amaz", "awesom", "beaut", "brav", "cool", "cute", "darling", "dear", "enjoy",
    #"excellent", "fabulous", "good", "gorgeous", "great", "kind", "like", "lov",
    #"magnif", "nice", "prett", "spectacul", "super", "sweet", "well", "wonder", "wow",
    #"interest","favorite", "special", "unique",]
    #4
    category_invitation = [
        "canali", "invit", "pagin", "segui", "sito", "venir", " blog", "check",
        "clic", "follow", "http", "invite", "link", "mail", "page", "http",
        ".com ", "likes4likes", "like4like", "likeforfollow", "likeforlike"
        "likesforlikes", "love4love", " site", "visit", "web", "direct",
        "bussiness card", "look at", "my bio"
    ]
    #category_invitation = ["canali", "invit", "pagin", "segui", "sito", "venir",
    #" blog", "check", "clic", "follow", "http", "invite", "link", "mail", "page",
    #" site", "visit", "web"]
    #5
    category_food = [
        "acqua", "acquolin", "aperitiv", "appet", "aranc", "assagg", "bevand",
        "birr", "biscott", "braciol", "broccol", "bruschett", "caff", "calzon",
        "cannell", "cappuccin", "capres", "beve", "carne", "carot", "cavol",
        "cena", "cibo", "ciocco", "colazion", "cottur", "crem", "croccant",
        "cucchi", " cucin", "cuoc", "delica", "deliz", "diet", "dolc", "dolci",
        "espresso", "fagiol", "fame", "farin", "formagg", "fragol", "fresc",
        "frig", "fritt", "froll", "frutt", "gluten", "gnam", "gnocc",
        "gourmet", "grano", "gust", "hungri", "impast", "lasagn", "latt",
        "lievit", "limon", "mangi", "marmell", "mascarpon", "melanz", "mele",
        "mensa", "merenda", "noc", "nutell", "nutrit", "olio", "oliv", "pane",
        "panna", "parmigian", "pasticcer", "peper", "pesc", "ghiacci", "pizz",
        "polpett", "pomodor", "pranz", "pranz", "prosciut", "raviol", "ricett",
        "ricott", "risott", "ristor", "salam", "salat", "salato", "salsa",
        "saltimbocc", "sapor", "spaghet", "spuntin", "squis", "tiramis",
        "tonn", "uov", "vanigl", "vegan", "verdur", " vino", "vitamin",
        "yogurt", "zaffer", "zuccher", "zupp", "carbonara", "carbo", "perra",
        "cott", "sfam", "mozzarel", "orto", "panci", "merend", " gelat",
        "sfoglia", "menta", "piatt", "affamata", "mozarel", "composto",
        "sfornat", " bake", "banan", " basil", "beer", "bread", "breakfast",
        "cake", "calorie", "caramel", "carrot", "chees", "chef", "chocol",
        "cinamon", "coffe", "cook", "crisp", "crunch", "cuisin", "dairy",
        "delici", "delish", "dessert", "diet", "dinner", "dish", "drink",
        " eat ", "egg", "feed", "fish", "food", "fresh", "fried", "fruit",
        "fry", "fung", "homemade", "ingredient", "jam", "kitchen", "lunch",
        "meal", "meat", "muffin", " nom", "nourish", "omellet", "orange",
        "pancake", "pastr", "pepper", "pudding", "recip", "restaur", "salad",
        "salmon", "salt ", "seafood", "snack", "squid", "strawberr", "sugar",
        " sweet", "tart", " tast", "tea", "tomato", "tuna", " vanill", "veg",
        "wine", "yast", "yum", "pastry", "stomach", "pear ", "pears ", "apple",
        "orange", "avocado", "mint", "yamm", "dish", "cherr", "hungry",
        "donuts", "tort", "magar", "lecker", "miam", "health"
    ]
    #category_food = ["acqua", "acquolin", "aperitiv", "appet", "aranc", "assagg", "bevand", "birr", "biscott",
    #"braciol", "broccol", "bruschett", "caff", "calzon", "cannell", "cappuccin", "capres", "beve",
    #"carne", "carot", "cavol", "cena", "cibo", "ciocco", "colazion", "cottur", "crem", "croccant",
    #"cucchi", " cucin", "cuoc", "delica", "deliz", "diet", "dolc", "dolci", "espresso", "fagiol",
    #"fame", "farin", "formagg", "fragol", "fresc", "frig", "fritt", "froll", "frutt", "gluten",
    #"gnam", "gnocc", "gourmet", "grano", "gust", "hungri", "impast", "lasagn", "latt", "lievit",
    #"limon", "mangi", "marmell", "mascarpon", "melanz", "mele", "mensa", "merenda", "noc", "nutell",
    #"nutrit", "olio", "oliv", "pane", "panna", "parmigian", "pasticcer", "peper", "pesc","ghiacci",
    #"pizz", "polpett", "pomodor", "pranz", "pranz", "prosciut", "raviol", "ricett", "ricott", "risott",
    #"ristor", "salam", "salat", "salato", "salsa", "saltimbocc", "sapor", "spaghet", "spuntin", "squis",
    #"tiramis", "tonn", "uov", "vanigl", "vegan", "verdur", " vino", "vitamin", "yogurt", "zaffer",
    #"zuccher", "zupp",
    #" bake", "banan", " basil", "beer", "bread", "breakfast", "cake", "calorie", "caramel", "carrot",
    #"chees", "chef", "chocol", "cinamon", "coffe", "cook", "crisp", "crunch", "cuisin", "dairy", "delici",
    #"delish", "dessert", "diet", "dinner", "dish", "drink", " eat ", "egg", "feed", "fish", "food",
    #"fresh", "fried", "fruit", "fry", "fung", "homemade", "ingredient", "jam", "kitchen", "lunch", "meal",
    #"meat", "muffin", " nom", "nourish", "omellet", "orange", "pancake", "pastr", "pepper", "pudding",
    #"recip", "restaur", "salad", "salmon", "salt ", "seafood", "snack", "squid", "strawberr", "sugar",
    #" sweet", "tart", " tast", "tea", "tomato", "tuna", " vanill", "veg", "wine"]
    #6
    category_greeting = [
        "arriv", "buon", "sera", "buongiorn", "ciao", "giorn", "mattin",
        "nott", "salv", "buonjiorn", "ciaio", "moin", "hey", "morning",
        "night", "morning", "afternoon", "hello", "good", "bonjour", "hola",
        "coucou", "bonne", "bounjour", "ohayo", "bonsoir"
    ]
    #category_greeting = ["arriv", "buon", "sera", "buongiorn", "ciao", "giorn", "mattin", "nott",
    #"salv",
    #"hey", "morning", "night", "morning", "afternoon", "hello", "good"]

    category = []
    category.append({"name": "thank", "keywords": category_thank, "count": []})
    category.append({
        "name": "congratulation",
        "keywords": category_congratulation,
        "count": []
    })
    category.append({
        "name": "agreement",
        "keywords": category_agreement,
        "count": []
    })
    category.append({
        "name": "positive",
        "keywords": category_positive,
        "count": []
    })
    category.append({
        "name": "invitation",
        "keywords": category_invitation,
        "count": []
    })
    category.append({"name": "food", "keywords": category_food, "count": []})
    category.append({
        "name": "greeting",
        "keywords": category_greeting,
        "count": []
    })

    i = 0
    score_category = []
    for cat in category:
        for word in cat["keywords"]:
            count = text.count(word)
            cat["count"].append(count)
        score_category.append(sum(cat["count"]))
    score_category[3] = 0.5 * score_category[3]

    score_category.append(0)
    score_category.append(0.0001)
    word_token = word_tokenize(text)
    for w in word_token:
        if len(w) > 0 and w[0] == "#":
            score_category[7] += 0.001
    #print score_category

    rank = [
        x for _, x in sorted(zip(score_category, category_name), reverse=True)
    ]

    category_text = rank[0]

    return category_text
Exemplo n.º 37
0
    def test_component_attribs(self):
        with io.open(self.baselineProfile, 'r', encoding='utf-8-sig') as f:
            target = f.read()
        targetLines = target.splitlines()
        targetTag = {}
        for line in targetLines:
            try:
                t, val = line.split(':',1)
                targetTag[t] = val
            except ValueError:
                # need more than one value to unpack; this is a weak way to
                # handle multi-line default values, eg TextComponent.text.default
                targetTag[t] += '\n' + line  # previous t value

        param = experiment.Param('', '')  # want its namespace
        ignore = ['__doc__', '__init__', '__module__', '__str__', 'next',
                  '__unicode__', '__native__', '__nonzero__', '__long__']

        # these are for display only (cosmetic) and can end up being localized
        # so typically do not want to check during automated testing, at least
        # not when things are still new-ish and subject to change:
        ignore += ['hint',
                   'label',  # comment-out to compare labels when checking
                   'categ',
                   'next',
                   'dollarSyntax',
                   ]
        for field in dir(param):
            if field.startswith("__"):
                ignore.append(field)
        fields = set(dir(param)).difference(ignore)

        mismatched = []
        for compName in sorted(self.allComp):
            comp = self.allComp[compName](parentName='x', exp=self.exp)
            order = '%s.order:%s' % (compName, eval("comp.order"))

            if order+'\n' not in target:
                tag = order.split(':',1)[0]
                try:
                    mismatch = order + ' <== ' + targetTag[tag]
                except (IndexError, KeyError): # missing
                    mismatch = order + ' <==> NEW (no matching param in the reference profile)'
                print(mismatch.encode('utf8'))

                if not ignoreOrder:
                    mismatched.append(mismatch)

            for parName in comp.params:
                # default is what you get from param.__str__, which returns its value
                if not constants.PY3:
                    if isinstance(comp.params[parName].val, unicode):
                        comp.params[parName].val = comp.params[parName].val.encode('utf8')
                default = '%s.%s.default:%s' % (compName, parName, comp.params[parName])
                lineFields = []
                for field in fields:
                    if parName == 'name' and field == 'updates':
                        continue
                        # ignore b/c never want to change the name *during a running experiment*
                        # the default name.updates varies across components: need to ignore or standardize
                    f = '%s.%s.%s:%s' % (compName, parName, field, eval("comp.params[parName].%s" % field))
                    lineFields.append(f)

                for line in [default] + lineFields:
                    # some attributes vary by machine so don't check those
                    if line.startswith('ParallelOutComponent.address') and ignoreParallelOutAddresses:
                        continue
                    elif line.startswith('SettingsComponent.OSF Project ID.allowedVals'):
                        continue
                    elif ('SettingsComponent.Use version.allowedVals' in line or
                        'SettingsComponent.Use version.__dict__' in line):
                        # versions available on travis-ci are only local
                        continue
                    origMatch = line+'\n' in target
                    lineAlt = (line.replace(":\'", ":u'")
                                    .replace("\\\\","\\")
                                    .replace("\\'", "'"))
                    # start checking params
                    if not (line+'\n' in target
                            or lineAlt+'\n' in target):
                        # mismatch, so report on the tag from orig file
                        # match checks tag + multi-line, because line is multi-line and target is whole file
                        tag = line.split(':',1)[0]
                        try:
                            mismatch = line + ' <== ' + targetTag[tag]
                        except KeyError: # missing
                            mismatch = line + ' <==> NEW (no matching param in the reference profile)'

                        # ignore attributes that inherit from object:

                        if ignoreObjectAttribs:
                            for item in ignoreList:
                                if item in mismatch:
                                    break
                            else:
                                mismatched.append(mismatch)
                        else:
                            mismatched.append(mismatch)

        for mismatch in mismatched:
            warnings.warn("Non-identical Builder Param: {}".format(mismatch))
Exemplo n.º 38
0
 def __combine(nl_dict, code_dict):
     ret = []
     for key in sorted([int(key) for key in nl_dict.keys()]):
         ret.append((nl_dict[str(key)], code_dict[str(key)], str(key)))
     return ret
Exemplo n.º 39
0
def extract(directory):
    """extract all the hs & to data from uprite sensor"""
    global coordinates
    global orienation
    global pace

    start_time = clocktime.time()  # record clocktime
    save_data = {}

    # Extract Patient Number
    patient_name = directory[-6:]
    print("Extracting data for patient:", patient_name)

    # Open data_file, gravity_window, data_window
    data, data_wdw, grav_wdw = open_files(directory)

    # Take out acceleration and gyroscope data from tailbone
    accel_all = data['UR']['sensorData']['tailBone']['accel']['data']
    gyro_all = data['UR']['sensorData']['tailBone']['gyro']['data']

    # Round all window coordinates
    for p in pace:
        for i in range(0, 2):
            data_wdw[p][i] = my_round(data_wdw[p][i])
            grav_wdw[i] = my_round(grav_wdw[i])

    print('Interval for motion data:', data_wdw)
    print('Interval for gravity data:', grav_wdw)

    # Check if not enough data
    if (data_wdw['flag']['F'] == 0):
        print('Not enough accel-data recorded')
        return
    elif data_wdw['F'][1] > len(gyro_all['x']):
        print("Not enough gyro data", data_wdw['F'][1], len(gyro_all['x']))
        return

    ### MAY BE UNNEC
    mean_accel = dict()
    for w in coordinates:  #gravity vector
        mean_accel[w] = stats.mean(accel_all[w][grav_wdw[0]:grav_wdw[1]])

    # Initialize variables
    accel = dict()
    gyro = dict()

    # Iterate through slow, calm, fast paces
    for p in pace:

        # cut data with windows
        for w in coordinates:
            accel[w] = accel_all[w][data_wdw[p][0]:data_wdw[p][1]]
            gyro[w] = gyro_all[w][data_wdw[p][0]:data_wdw[p][1]]
        gyro['sec'] = gyro_all['seconds'][data_wdw[p][0]:data_wdw[p][1]]
        accel['sec'] = accel_all['seconds'][data_wdw[p][0]:data_wdw[p][1]]

        ####

        # rename angular_pos variable
        angular_pos = {}

        # Find high-pass angular position for the z direction
        fs = 100
        cut = 0.5
        gyro2_hpf = {}
        gyro2_hpf['z'] = highpass(gyro['z'], fs, cut)
        angular_pos[1] = {}
        angular_pos[1]['z'] = integrate.IMU(gyro['sec'],
                                            gyro2_hpf['z'],
                                            units='rad')

        # Find low-pass angular position for the z direction
        fs = 100
        Ny = fs / 2
        cut = [5 / Ny, 6 / Ny]
        angular_pos[2] = {}
        angular_pos[2]['z'] = lowpass(angular_pos[1]['z'], cut, fs, ripple_tol)

        neg_ang_pos = {}
        neg_ang_pos['z'] = angular_pos[2]['z']
        neg_ang_pos['z'] = [0 if x > 0 else x for x in neg_ang_pos['z']]
        neg_ang_pos['z'] = list(map(abs, neg_ang_pos['z']))

        # Search for gyroscope peaks/troughs z-direction
        # Initializing approximate step ranges
        search_size = 40
        min_dist = my_round(1 / 3 * 100)
        max_dist = my_round(1 / 0.5 * 100)
        fs = 100

        #search for all the troughs
        troughs,_,_,_ = find_peaks.forward(neg_ang_pos['z'], search_size, min_dist, \
          max_dist, fs)
        temp = list(reversed(neg_ang_pos['z']))
        backward_troughs,_,_,_ = find_peaks.forward(temp, search_size, min_dist, \
          max_dist, fs)
        temp = len(neg_ang_pos['z']) - 1  # used positive again GC?
        backward_troughs = list(reversed([temp - x for x in backward_troughs]))

        all_troughs = sorted(list(set(troughs + backward_troughs)))

        plt.plot(gyro['sec'], angular_pos[2]['z'])
        plt.plot([gyro['sec'][x] for x in all_troughs], [angular_pos[2]['z'][x] for x \
         in all_troughs], 'c^')

        plt.show()

        ####

        quit()

        #############################################################################################################################
        plt.savefig('../../docs/' + patient_name + p + '.pdf')

    with open(os.path.join(directory, 'uprite_hs_to.pkl'), 'wb') as afile:
        pickle.dump(save_data, afile)

    print("Completed HS & TO for patient: ", patient_name)
    print('Successful run!')
    print('-----------RUNTIME: %s second ----' %
          (clocktime.time() - start_time))
Exemplo n.º 40
0
x = sorted([int(_) for _ in input().split()], reverse=True)
print(10*x[0]+x[1]+x[2])
Exemplo n.º 41
0
 def get_zones(self):
     return sorted(set(list(self._zones.keys()) + \
                       list(self._builtin_zones.keys())))
Exemplo n.º 42
0
def check_groups(new_groups, old_groups):
    return sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
Exemplo n.º 43
0
 def get_icmptypes(self):
     return sorted(set(list(self._icmptypes.keys()) + \
                       list(self._builtin_icmptypes.keys())))
Exemplo n.º 44
0
 def get_helpers(self):
     return sorted(set(list(self._helpers.keys()) + \
                       list(self._builtin_helpers.keys())))
Exemplo n.º 45
0
    def tell_query(self, query, start=None, end=None):
        '''List entries that match a query.

        Note that an end time without a start time will be ignored.'''
        LOG.info('tell_query("{0}", start={1}, end={2})'.format(
            query, start, end))
        if not start:
            end = None

        needs_refresh = False
        query = query.strip()

        if self.cache.get('disable_cache', False):
            LOG.debug('cache is disabled')
            needs_refresh = True
        elif self.cache.get('time') and self.cache.get('time_entries'):
            last_load_time = self.cache.get('time')
            LOG.debug('last load was %s', last_load_time)
            import time
            now = int(time.time())
            if now - last_load_time > CACHE_LIFETIME:
                LOG.debug('automatic refresh')
                needs_refresh = True
        else:
            LOG.debug('cache is missing timestamp or data')
            needs_refresh = True

        if needs_refresh:
            LOG.debug('refreshing cache')

            try:
                all_entries = toggl.TimeEntry.all()
            except Exception:
                LOG.exception('Error getting time entries')
                raise Exception('Problem talking to toggl.com')

            import time
            self.cache['time'] = int(time.time())
            self.cache['time_entries'] = serialize_entries(all_entries)
        else:
            LOG.debug('using cached data')
            all_entries = deserialize_entries(self.cache['time_entries'])

        LOG.debug('%d entries', len(all_entries))

        if start:
            LOG.debug('filtering on start time %s', start)
            if end:
                LOG.debug('filtering on end time %s', end)
                all_entries = [
                    e for e in all_entries
                    if e.start_time < end and e.stop_time > start
                ]
            else:
                all_entries = [e for e in all_entries if e.stop_time > start]

        efforts = {}

        # group entries with the same description into efforts (so as not to be
        # confused with Toggl tasks
        for entry in all_entries:
            if entry.description not in efforts:
                efforts[entry.description] = Effort(entry.description, start,
                                                    end)
            efforts[entry.description].add(entry)

        efforts = efforts.values()
        efforts = sorted(efforts,
                         reverse=True,
                         key=lambda e: e.newest_entry.start_time)

        items = []

        if start:
            if len(efforts) > 0:
                hours = sum(to_hours(e.seconds)[0] for e in efforts)
                LOG.debug('total hours: %s', hours)
                total_time = "{0}".format(hours)

                if end:
                    item = Item('{0} hours on {1}'.format(
                        total_time,
                        start.date().strftime(DATE_FORMAT)),
                                subtitle=Item.LINE)
                else:
                    item = Item('{0} hours from {1}'.format(
                        total_time,
                        start.date().strftime(DATE_FORMAT)),
                                subtitle=Item.LINE)
            else:
                item = Item('Nothing to report')

            items.append(item)

        for effort in efforts:
            item = Item(effort.description, valid=True)
            now = LOCALTZ.localize(datetime.datetime.now())

            newest_entry = effort.newest_entry
            if newest_entry.is_running:
                item.icon = 'running.png'
                started = newest_entry.start_time
                delta = to_approximate_time(now - started)

                seconds = effort.seconds
                LOG.debug('total seconds for {0}: {1}'.format(effort, seconds))
                total = ''
                if seconds > 0:
                    hours, exact_hours = to_hours(seconds)
                    total = ' ({0} ({1:.2f}) hours total)'.format(
                        hours, exact_hours)
                item.subtitle = 'Running for {0}{1}'.format(delta, total)
                item.arg = 'stop|{0}|{1}'.format(newest_entry.id,
                                                 effort.description)
            else:
                seconds = effort.seconds
                hours, exact_hours = to_hours(seconds)

                if start:
                    item.subtitle = ('{0} ({1:.2f}) hours'.format(
                        hours, exact_hours))
                else:
                    oldest = effort.oldest_entry
                    since = oldest.start_time
                    since = since.strftime('%m/%d')
                    item.subtitle = ('{0} ({1:.2f}) hours since {2}'.format(
                        hours, exact_hours, since))

                pid = newest_entry.pid or ''
                item.arg = 'continue|{0}|{1}'.format(pid, effort.description)

            items.append(item)

        if len(query.strip()) > 1:
            # there's a filter
            test = query[1:].strip()
            items = self.fuzzy_match_list(test, items, key=lambda t: t.title)

        if len(items) == 0:
            items.append(Item("Nothing found"))

        return items
Exemplo n.º 46
0
 def get_services(self):
     return sorted(set(list(self._services.keys()) + \
                       list(self._builtin_services.keys())))
    numLabels = np.arange(0, len(np.unique(clt.labels_)) + 1)
    hist, _ = np.histogram(clt.labels_, bins=numLabels)

    hist = hist.astype("float")
    hist /= hist.sum()

    return hist


img = cv2.imread("notH_1.jpg")  #change filename as you want
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.reshape(
    (img.shape[0] * img.shape[1], 3))  #represent as row*column,channel number
clt = KMeans(n_clusters=5)  #cluster number
clt.fit(img)
hist = find_histogram(clt)
print(hist)
print("                           ")
print(clt.cluster_centers_)
print("                           ")
F_dms = [x for _, x in sorted(zip(hist, clt.cluster_centers_))]
F_dms.reverse()
print(F_dms)
print("                           ")
list_hex = []
for i in F_dms:
    list_hex.append('#%02x%02x%02x' % tuple([int(j) for j in i]))
    #list_rgbs.append(tuple(i))
print("Hex codes are: ", list_hex)
print("                           ")
Exemplo n.º 48
0
 def get_ipsets(self):
     return sorted(set(list(self._ipsets.keys()) + \
                       list(self._builtin_ipsets.keys())))
Exemplo n.º 49
0
            letters += a[i]
        check = id[4:len(id) - 1]
        id = int(id[:3])

        count = {}
        for char in letters:
            if count.has_key(char):
                count[char] += 1
            else:
                count[char] = 1
        count2 = {}
        for i in count:
            if count2.has_key(count[i]):
                count2[count[i]] += i
            else:
                count2[count[i]] = i
        for i in count2:
            count2[i] = "".join(sorted(count2[i]))
        sort = ""
        sort2 = []
        for i in count2:
            sort2.append(i)
        for i in reversed(sort2):
            sort += count2[i]

        if check == sort[:5]:
            rooms.append(uncr(name, id))

for r in rooms:
    if "north" in r[0]:
        print r
Exemplo n.º 50
0
 def oldest_entry(self):
     return sorted(self.time_entries, key=lambda e: e.start_time)[0]
Exemplo n.º 51
0
    def docclass(self, object, name=None, mod=None, *ignored):
        """Produce text documentation for a given class object."""
        realname = object.__name__
        name = name or realname
        bases = object.__bases__

        def makename(c, m=object.__module__):
            return classname(c, m)

        if name == realname:
            title = 'class ' + self.bold(realname)
        else:
            title = self.bold(name) + ' = class ' + realname
        if bases:
            parents = map(makename, bases)
            title = title + '(%s)' % ', '.join(parents)

        contents = []
        push = contents.append

        try:
            signature = inspect.signature(object)
        except (ValueError, TypeError):
            signature = None
        if signature:
            argspec = str(signature)
            if argspec and argspec != '()':
                push(name + argspec)

        doc = getdoc(object)
        if doc:
            push(self.indent(doc.splitlines()[0]))

        # List the mro, if non-trivial.
        mro = deque(inspect.getmro(object))
        if len(mro) > 2:
            push("Method resolution order:")
            for base in mro:
                push('    ' + makename(base))

        # List the built-in subclasses, if any:
        subclasses = sorted((str(cls.__name__)
                             for cls in type.__subclasses__(object)
                             if not cls.__name__.startswith("_")
                             and cls.__module__ == "builtins"),
                            key=str.lower)
        no_of_subclasses = len(subclasses)
        MAX_SUBCLASSES_TO_DISPLAY = 4
        if subclasses:
            push("Built-in subclasses:")
            for subclassname in subclasses[:MAX_SUBCLASSES_TO_DISPLAY]:
                push('    ' + subclassname)
            if no_of_subclasses > MAX_SUBCLASSES_TO_DISPLAY:
                push('    ... and ' +
                     str(no_of_subclasses - MAX_SUBCLASSES_TO_DISPLAY) +
                     ' other subclasses')
            push('')

        def header(msg):
            push(f"\n{msg}\n" + ("-" * len(msg)))

        def spill(msg, attrs, predicate):
            ok, attrs = _split_list(attrs, predicate)
            if ok:
                header(msg)
                for name, kind, homecls, value in ok:
                    try:
                        value = getattr(object, name)
                    except Exception:
                        # Some descriptors may meet a failure in their __get__.
                        # (bug #1785)
                        push(self.docdata(value, name, mod))
                    else:
                        push(self.document(value, name, mod, object))
            return attrs

        def spilldescriptors(msg, attrs, predicate):
            ok, attrs = _split_list(attrs, predicate)
            if ok:
                header(msg)
                for name, kind, homecls, value in ok:
                    push(self.docdata(value, name, mod))
            return attrs

        def spilldata(msg, attrs, predicate):
            ok, attrs = _split_list(attrs, predicate)
            if ok:
                header(msg)
                for name, kind, homecls, value in ok:
                    if callable(value) or inspect.isdatadescriptor(value):
                        doc = getdoc(value)
                    else:
                        doc = None
                    try:
                        obj = getattr(object, name)
                    except AttributeError:
                        obj = homecls.__dict__[name]
                    push(self.docother(obj, name, mod, maxlen=70, doc=doc))
            return attrs

        attrs = [(name, kind, cls, value)
                 for name, kind, cls, value in classify_class_attrs(object)
                 if visiblename(name, obj=object)]

        while attrs:
            if mro:
                thisclass = mro.popleft()
            else:
                thisclass = attrs[0][2]
            attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)

            if object is not builtins.object and thisclass is builtins.object:
                attrs = inherited
                continue
            elif thisclass is object:
                tag = "defined here"
            else:
                tag = "inherited from %s" % classname(thisclass,
                                                      object.__module__)

            sort_attributes(attrs, object)

            # Pump out the attrs, segregated by kind.
            attrs = spill("Methods %s" % tag, attrs,
                          lambda t: t[1] == 'method')
            attrs = spill("Class methods %s" % tag, attrs,
                          lambda t: t[1] == 'class method')
            attrs = spill("Static methods %s" % tag, attrs,
                          lambda t: t[1] == 'static method')
            attrs = spilldescriptors("Readonly properties %s" % tag, attrs,
                                     lambda t: t[1] == 'readonly property')
            attrs = spilldescriptors("Data descriptors %s" % tag, attrs,
                                     lambda t: t[1] == 'data descriptor')
            attrs = spilldata("Data and other attributes %s" % tag, attrs,
                              lambda t: t[1] == 'data')

            assert attrs == []
            attrs = inherited

        contents = '\n'.join(contents)
        if not contents:
            return title + '\n'
        return title + '\n' + self.indent(contents.rstrip(), ' |  ') + '\n'
Exemplo n.º 52
0
 def sorted_by_date(self):
     return sorted(self._summaries.items())
Exemplo n.º 53
0
def generate_dot(app_labels, **kwargs):
    cli_options = kwargs.get('cli_options', None)
    disable_fields = kwargs.get('disable_fields', False)
    include_models = parse_file_or_list(kwargs.get('include_models', ""))
    all_applications = kwargs.get('all_applications', False)
    use_subgraph = kwargs.get('group_models', False)
    verbose_names = kwargs.get('verbose_names', False)
    inheritance = kwargs.get('inheritance', True)
    relations_as_fields = kwargs.get("relations_as_fields", True)
    sort_fields = kwargs.get("sort_fields", True)
    language = kwargs.get('language', None)
    if language is not None:
        activate_language(language)
    exclude_columns = parse_file_or_list(kwargs.get('exclude_columns', ""))
    exclude_models = parse_file_or_list(kwargs.get('exclude_models', ""))

    def skip_field(field):
        if exclude_columns:
            if verbose_names and field.verbose_name:
                if field.verbose_name in exclude_columns:
                    return True
            if field.name in exclude_columns:
                return True
        return False

    if all_applications:
        app_labels = list_app_labels()

    graphs = []
    for app_label in app_labels:
        app = get_app(app_label)
        if not app:
            continue
        graph = Context({
            'name': '"%s"' % app.__name__,
            'app_name': "%s" % '.'.join(app.__name__.split('.')[:-1]),
            'cluster_app_name': "cluster_%s" % app.__name__.replace(".", "_"),
            'models': []
        })

        appmodels = list(get_models_compat(app_label))
        abstract_models = []
        for appmodel in appmodels:
            abstract_models = abstract_models + [abstract_model for abstract_model in appmodel.__bases__ if hasattr(abstract_model, '_meta') and abstract_model._meta.abstract]
        abstract_models = list(set(abstract_models))  # remove duplicates
        appmodels = abstract_models + appmodels

        for appmodel in appmodels:
            appmodel_abstracts = [abstract_model.__name__ for abstract_model in appmodel.__bases__ if hasattr(abstract_model, '_meta') and abstract_model._meta.abstract]

            # collect all attribs of abstract superclasses
            def getBasesAbstractFields(c):
                _abstract_fields = []
                for e in c.__bases__:
                    if hasattr(e, '_meta') and e._meta.abstract:
                        _abstract_fields.extend(e._meta.fields)
                        _abstract_fields.extend(getBasesAbstractFields(e))
                return _abstract_fields
            abstract_fields = getBasesAbstractFields(appmodel)

            model = {
                'app_name': appmodel.__module__.replace(".", "_"),
                'name': appmodel.__name__,
                'abstracts': appmodel_abstracts,
                'fields': [],
                'relations': []
            }

            # consider given model name ?
            def consider(model_name):
                if exclude_models and model_name in exclude_models:
                    return False
                elif include_models and model_name not in include_models:
                    return False
                return not include_models or model_name in include_models

            if not consider(appmodel._meta.object_name):
                continue

            if verbose_names and appmodel._meta.verbose_name:
                model['label'] = force_bytes(appmodel._meta.verbose_name)
            else:
                model['label'] = model['name']

            # model attributes
            def add_attributes(field):
                if verbose_names and field.verbose_name:
                    label = force_bytes(field.verbose_name)
                    if label.islower():
                        label = label.capitalize()
                else:
                    label = field.name

                t = type(field).__name__
                if isinstance(field, (OneToOneField, ForeignKey)):
                    t += " ({0})".format(field.rel.field_name)
                # TODO: ManyToManyField, GenericRelation

                model['fields'].append({
                    'name': field.name,
                    'label': label,
                    'type': t,
                    'blank': field.blank,
                    'abstract': field in abstract_fields,
                    'relation': isinstance(field, RelatedField),
                    'primary_key': field.primary_key,
                })

            attributes = [field for field in appmodel._meta.local_fields]
            if not relations_as_fields:
                # Find all the 'real' attributes. Relations are depicted as graph edges instead of attributes
                attributes = [field for field in attributes if not isinstance(field, RelatedField)]

            # find primary key and print it first, ignoring implicit id if other pk exists
            pk = appmodel._meta.pk
            if pk and not appmodel._meta.abstract and pk in attributes:
                add_attributes(pk)

            for field in attributes:
                if skip_field(field):
                    continue
                if pk and field == pk:
                    continue
                add_attributes(field)

            if sort_fields:
                model['fields'] = sorted(model['fields'], key=lambda field: (not field['primary_key'], not field['relation'], field['label']))

            # FIXME: actually many_to_many fields aren't saved in this model's db table, so why should we add an attribute-line for them in the resulting graph?
            # if appmodel._meta.many_to_many:
            #    for field in appmodel._meta.many_to_many:
            #        if skip_field(field):
            #            continue
            #        add_attributes(field)

            # relations
            def add_relation(field, extras=""):
                if verbose_names and field.verbose_name:
                    label = force_bytes(field.verbose_name)
                    if label.islower():
                        label = label.capitalize()
                else:
                    label = field.name

                # show related field name
                if hasattr(field, 'related_query_name'):
                    related_query_name = field.related_query_name()
                    if verbose_names and related_query_name.islower():
                        related_query_name = related_query_name.replace('_', ' ').capitalize()
                    label += ' (%s)' % related_query_name

                # handle self-relationships and lazy-relationships
                if isinstance(field.rel.to, six.string_types):
                    if field.rel.to == 'self':
                        target_model = field.model
                    else:
                        raise Exception("Lazy relationship for model (%s) must be explicit for field (%s)" % (field.model.__name__, field.name))
                else:
                    target_model = field.rel.to

                _rel = {
                    'target_app': target_model.__module__.replace('.', '_'),
                    'target': target_model.__name__,
                    'type': type(field).__name__,
                    'name': field.name,
                    'label': label,
                    'arrows': extras,
                    'needs_node': True
                }
                if _rel not in model['relations'] and consider(_rel['target']):
                    model['relations'].append(_rel)

            for field in appmodel._meta.local_fields:
                if field.attname.endswith('_ptr_id'):  # excluding field redundant with inheritance relation
                    continue
                if field in abstract_fields:  # excluding fields inherited from abstract classes. they too show as local_fields
                    continue
                if skip_field(field):
                    continue
                if isinstance(field, OneToOneField):
                    add_relation(field, '[arrowhead=none, arrowtail=none, dir=both]')
                elif isinstance(field, ForeignKey):
                    add_relation(field, '[arrowhead=none, arrowtail=dot, dir=both]')

            for field in appmodel._meta.local_many_to_many:
                if skip_field(field):
                    continue
                if isinstance(field, ManyToManyField):
                    if (getattr(field, 'creates_table', False) or  # django 1.1.
                            (hasattr(field.rel.through, '_meta') and field.rel.through._meta.auto_created)):  # django 1.2
                        add_relation(field, '[arrowhead=dot arrowtail=dot, dir=both]')
                elif isinstance(field, GenericRelation):
                    add_relation(field, mark_safe('[style="dotted", arrowhead=normal, arrowtail=normal, dir=both]'))

            if inheritance:
                # add inheritance arrows
                for parent in appmodel.__bases__:
                    if hasattr(parent, "_meta"):  # parent is a model
                        l = "multi-table"
                        if parent._meta.abstract:
                            l = "abstract"
                        if appmodel._meta.proxy:
                            l = "proxy"
                        l += r"\ninheritance"
                        _rel = {
                            'target_app': parent.__module__.replace(".", "_"),
                            'target': parent.__name__,
                            'type': "inheritance",
                            'name': "inheritance",
                            'label': l,
                            'arrows': '[arrowhead=empty, arrowtail=none, dir=both]',
                            'needs_node': True,
                        }
                        # TODO: seems as if abstract models aren't part of models.getModels, which is why they are printed by this without any attributes.
                        if _rel not in model['relations'] and consider(_rel['target']):
                            model['relations'].append(_rel)

            graph['models'].append(model)
        if graph['models']:
            graphs.append(graph)

    nodes = []
    for graph in graphs:
        nodes.extend([e['name'] for e in graph['models']])

    for graph in graphs:
        for model in graph['models']:
            for relation in model['relations']:
                if relation['target'] in nodes:
                    relation['needs_node'] = False

    now = datetime.datetime.now()
    t = loader.get_template('django_extensions/graph_models/digraph.dot')

    if not isinstance(t, Template) and not (hasattr(t, 'template') and isinstance(t.template, Template)):
        raise Exception("Default Django template loader isn't used. "
                        "This can lead to the incorrect template rendering. "
                        "Please, check the settings.")

    c = Context({
        'created_at': now.strftime("%Y-%m-%d %H:%M"),
        'cli_options': cli_options,
        'disable_fields': disable_fields,
        'use_subgraph': use_subgraph,
        'graphs': graphs,
    })
    if django.VERSION >= (1, 8):
        c = c.flatten()
    dot = t.render(c)

    return dot
Exemplo n.º 54
0
args = parser.parse_args()
parts_file = args.parts
do_spreading = args.spread

import gv
import os
import os.path
import glob
import numpy as np
import amitgroup as ag

parts_descriptor = gv.BinaryDescriptor.getclass('parts').load(parts_file)

#path = os.path.join(os.environ['UIUC_DIR'], 'TestImages/test-*.pgm')
path = os.path.join(os.environ['VOC_DIR'], 'JPEGImages/*.jpg')  
files = sorted(glob.glob(path))[:40]

pi = np.zeros(parts_descriptor.num_parts)

tot = 0
cut = 4

intensities = np.array([])

if do_spreading:
    # TODO: Should be specified
    radii = (2, 2)
else:
    radii = (0, 0)

e_count = 0
Exemplo n.º 55
0
#solver_file_path = opjh("kzpy3/caf6/z2_color/solver_"+str(gpu)+"_a.prototxt")
solver_file_path = opjh("kzpy3/caf6/z2_color_deeper_multiscale2/solver.prototxt")
version = 'version 1b'
weights_file_mode = None #'most recent' #'this one' #None #'most recent' #'this one'  #None #'most recent'
weights_file_path = None #opjD('z2_color_deeper_multiscale2') #None #opjh('kzpy3/caf6/z2_color/z2_color.caffemodel') #None #'/home/karlzipser/Desktop/z2_color' # None #opjD('z2_color')

caffe_net = Caffe_Net(solver_file_path,version,weights_file_mode,weights_file_path,False)





runs_folder = '/media/karlzipser/ExtraDrive1/runs'
assert(len(gg(opj(runs_folder,'*'))) > 0)
run_names = sorted(gg(opj(runs_folder,'*.hdf5')),key=natural_keys)
solver_inputs_dic = {}
keys = {}
k_ctr = 0
for hdf5_filename in run_names:
	try:
		solver_inputs_dic[hdf5_filename] = h5py.File(hdf5_filename,'r')
		print hdf5_filename
		kk = solver_inputs_dic[hdf5_filename].keys()
		for k in kk:
			keys[k] = hdf5_filename
			k_ctr += 1
	except Exception as e:
		cprint("********** Exception ***********************",'red')
		print(e.message, e.args)
Exemplo n.º 56
0
def generate_datasets_json(barcodeId, barcodeInfo, library, runID, notes,
                           site_name, platform, instrumentName, chipType,
                           datasets_json_path):

    # TS-6135: ignore optional LB field, TODO: track library in database

    if not site_name:
        site_name = ""
    if not notes:
        notes = ""

    datasets = {
        "meta": {
            "format_name": "Dataset Map",
            "format_version": "1.0",
            "generated_by": "basecaller.py",
            "creation_date": dateutil.parser.parse(time.asctime()).isoformat()
        },
        "sequencing_center":
        "%s/%s" %
        (''.join(ch for ch in site_name if ch.isalnum()), instrumentName),
        "datasets": [],
        "read_groups": {}
    }

    # get no barcode sample name and reference
    sample = barcodeInfo['no_barcode']['sample']
    reference = barcodeInfo['no_barcode']['referenceName']

    # Scenario 1. No barcodes.
    if len(barcodeInfo) == 1:
        datasets["datasets"].append({
            "dataset_name": sample,
            "file_prefix": "rawlib",
            "read_groups": [
                runID,
            ]
        })
        datasets["read_groups"][runID] = {
            "index": 0,
            "sample": sample,
            #"library"           : library,
            "reference": reference,
            "description":
            ''.join(ch for ch in notes if ch.isalnum() or ch == " "),
            "platform_unit": "%s/%s" % (platform, chipType.replace('"', ""))
        }

    # Scenario 2. Barcodes present
    else:
        datasets["barcode_config"] = {}
        # TODO: not needed for calibration
        datasets["datasets"].append({
            "dataset_name": sample + "/No_barcode_match",
            "file_prefix": "nomatch_rawlib",
            "read_groups": [
                runID + ".nomatch",
            ]
        })

        datasets["read_groups"][runID + ".nomatch"] = {
            "index":
            0,
            "sample":
            sample,
            #"library"           : library,
            #"reference"         : reference,
            "reference":
            "",
            "description":
            ''.join(ch for ch in notes if ch.isalnum() or ch == " "),
            "platform_unit":
            "%s/%s/%s" % (platform, chipType.replace('"', ""), "nomatch")
        }
        datasets["barcode_config"]["barcode_id"] = barcodeId

        try:
            for barcode_name, barcode_info in sorted(barcodeInfo.iteritems()):

                if barcode_name == 'no_barcode':
                    continue

                datasets["read_groups"][runID + "." + barcode_name] = {
                    "barcode_name":
                    barcode_name,
                    "barcode_sequence":
                    barcode_info['sequence'],
                    "barcode_adapter":
                    barcode_info['adapter'],
                    "index":
                    barcode_info['index'],
                    "sample":
                    barcode_info['sample'],
                    #"library"           : library,
                    "reference":
                    barcode_info['referenceName'],
                    "description":
                    ''.join(ch for ch in notes if ch.isalnum() or ch == " "),
                    "platform_unit":
                    "%s/%s/%s" %
                    (platform, chipType.replace('"', ""), barcode_name)
                }

        except:
            print traceback.format_exc()
            datasets["read_groups"] = {}

        try:

            if 'calibration' in datasets_json_path:

                # create groups of barcodes with same references

                referencedict = defaultdict(list)

                for barcode_name, barcode_info in sorted(
                        barcodeInfo.iteritems()):

                    if barcode_name == 'no_barcode':
                        continue

                    if barcode_info['referenceName']:
                        if barcode_info['calibrate']:
                            referencedict[barcode_info[
                                'referenceName']].append(barcode_name)
                        else:
                            referencedict['no_calibration'].append(
                                barcode_name)
                    else:
                        # TODO: not needed for calibration
                        referencedict['no_reference'].append(barcode_name)

                for reference, bclist in referencedict.iteritems():
                    datasets["datasets"].append({
                        "dataset_name":
                        reference,
                        "file_prefix":
                        '%s_rawlib' % reference,
                        "read_groups": [
                            runID + "." + barcode_name
                            for barcode_name in bclist
                        ]
                    })

                print referencedict

            else:

                for barcode_name, barcode_info in sorted(
                        barcodeInfo.iteritems()):

                    if barcode_name == 'no_barcode':
                        continue

                    datasets["datasets"].append({
                        "dataset_name":
                        barcode_info['sample'] + "/" + barcode_name,
                        "file_prefix":
                        '%s_rawlib' % barcode_name,
                        "read_groups": [runID + "." + barcode_name]
                    })

        except:
            print traceback.format_exc()
            datasets["datasets"] = []

    f = open(datasets_json_path, "w")
    json.dump(datasets, f, indent=4)
    f.close()
 def _get_paths_from_nodes(self, nodes):
     return sorted([node.path for node in nodes])
Exemplo n.º 58
0
 def volumes(self):
     return sorted(set(self.index))
Exemplo n.º 59
0
	theEPIs = []
	theAnat = None
	outdir = vars(args)['Results Directory']

	#make links to desired start directory, and create links to source files in desired start directory

	#Parse TE string
	if ',' not in args.TEs: 
		print 'ERROR: Please check the syntax of the comma-separated list of TEs'
		print
	args_TEs = args.TEs
	theTEs = args_TEs.replace(' ','').replace('\t','').split(',')
	#print theTEs

	#Parse file string
	theEPIs_in = sorted(vars(args)['ME-EPI Datasets'])
	theEPIs = sorted(list(set([os.path.basename(epi) for epi in theEPIs_in])))
	#print 'the datasets are:', theEPIs_in
	if len(theEPIs) < 3:  
		print 'ERROR: Please select a ME-EPI dataset with >=3 image time series with unique filenames using the file chooser'
		print

	if len(theTEs) != len(theEPIs) and len(theEPIs)<3:
		#raise proper errors here
		print 'ERROR: The number of TEs and the number of ME-EPI datasets do not match!'
		print

	if not os.path.exists(outdir): os.mkdir(outdir)
	print "PREPARING INPUT DATASETS..."
	
	if theAnat!=None:  
Exemplo n.º 60
0
def run_iuga(input_g, k_value, time_limit, lowest_acceptable_similarity,
             dataset, *args, **kwargs):
    # parameters
    k = k_value

    filtered_points = kwargs.get('filtered_points', [])
    clusters = kwargs.get('clusters', [])

    # indexing file
    # should the algorithm stop if it reaches the end of the index (i.e.,
    # scanning all records once)
    STOP_VISITING_ONCE = False

    # Note that in case of user group analysis, each group is a record. In
    # case of spatiotemporal data, each geo point is a record.

    # variables
    # the ID of current k records will be recorded in this object.
    current_records = {}

    # ths ID of next potential k records will be recorded in this object.
    new_records = {}

    # total execution time
    total_time = 0.0

    # read input data frame
    start = time.time()
    if USE_SQL:
        similarities, distances, idr_scores = read_input_from_sql(
            dataset, input_g, filtered_points, clusters)
    else:
        similarities, distances, proximities = read_input_from_hdf(
            dataset, input_g, filtered_points, clusters)
    if DEBUG:
        logging.info('[IUGA] {} seconds'.format(time.time() - start))

    # sorting similarities and distances in descending order
    similarities_sorted = sorted(similarities.items(),
                                 key=lambda x: x[1],
                                 reverse=True)
    distances_sorted = sorted(distances.items(),
                              key=lambda x: x[1],
                              reverse=True)

    # begin - prepare lists for easy retrieval
    records = {}
    similarity_by_id = defaultdict(float)
    distance_by_id = defaultdict(float)
    idr_score_by_id = defaultdict(float)

    # cnt = 0
    for value in similarities_sorted:
        # records[cnt] = value[0]
        similarity_by_id[value[0]] = value[1]
        # cnt += 1

    for value in distances_sorted:
        distance_by_id[value[0]] = value[1]

    for value in idr_scores.items():
        idr_score_by_id[value[0]] = value[1]

    records_sorted = sorted(similarities.items(),
                            key=lambda x: (-idr_score_by_id[x[0]], -x[1]))
    print('[records_sorted]', records_sorted)
    cnt = 0
    for key, _ in records_sorted:
        records[cnt] = key
        cnt += 1

    # begin - prepare lists for easy retrieval

    # print(len(records), "records retrieved and indexed.")

    # begin - retrieval functions

    # end - retrieval functions

    # initialization by k most similar records
    for i in range(k):
        current_records[i] = records[i]

    # print("begin:", show(current_records, k, similarity_by_id, distance_by_id))

    # greedy algorithm
    pointer = k - 1
    nb_iterations = 0
    pointer_limit = len(records) - 1
    while total_time < time_limit and pointer < pointer_limit:
        nb_iterations += 1
        pointer += 1
        redundancy_flag = False
        for i in range(k):
            if current_records[i] == records[pointer]:
                redundancy_flag = True
                break
        if redundancy_flag:
            continue
        begin_time = datetime.datetime.now(datetime.timezone.utc)

        current_distances = get_distances_of(current_records, k,
                                             distance_by_id)
        current_diversity = diversity.diversity(current_distances)
        # current_proximities = get_proximities_of(current_records, k, proximity_by_id)
        # current_clustering_mean = mean(current_proximities)

        new_records = make_new_records(current_records, pointer, k, records)

        new_distances = get_distances_of(new_records, k, distance_by_id)
        new_diversity = diversity.diversity(new_distances)
        # new_proximities = get_proximities_of(new_records, k, proximity_by_id)
        # new_clustering_mean = mean(new_proximities)

        if new_diversity > current_diversity:
            if DEBUG:
                print((current_diversity, new_diversity))
            current_records = new_records

        end_time = datetime.datetime.now(datetime.timezone.utc)
        duration = (end_time - begin_time).microseconds / 1000.0
        total_time += duration
        if similarity_by_id[records[pointer]] < lowest_acceptable_similarity:
            if STOP_VISITING_ONCE:
                break
            else:
                pointer = k

    # print("end:", show(current_records, k, similarity_by_id, distance_by_id))
    # print("execution time (ms)", total_time)
    # print("# iterations", nb_iterations)

    min_similarity = 1
    dicToArray = []
    for i in range(k):
        if similarity_by_id[current_records[i]] < min_similarity:
            min_similarity = similarity_by_id[current_records[i]]
        dicToArray.append(current_records[i])
    my_distances = get_distances_of(current_records, k, distance_by_id)
    my_diversity = diversity.diversity(my_distances)
    return [min_similarity, round(my_diversity, 3), sorted(dicToArray)]