コード例 #1
1
ファイル: storage.py プロジェクト: Tha-Robert/kademlia
class ForgetfulStorage(object):
    implements(IStorage)

    def __init__(self, ttl=604800):
        """
        By default, max age is a week.
        """
        self.data = OrderedDict()
        self.ttl = ttl

    def __setitem__(self, key, value):
        if key in self.data:
            del self.data[key]
        self.data[key] = (time.time(), value)
        self.cull()

    def cull(self):
        for k, v in self.iteritemsOlderThan(self.ttl):
            self.data.popitem(last=False)

    def get(self, key, default=None):
        self.cull()
        if key in self.data:
            return False, self[key]
        return False, default

    def __getitem__(self, key):
        self.cull()
        return self.data[key][1]

    def __iter__(self):
        self.cull()
        return iter(self.data)

    def __repr__(self):
        self.cull()
        return repr(self.data)

    def iteritemsOlderThan(self, secondsOld):
        minBirthday = time.time() - secondsOld
        zipped = self._tripleIterable()
        matches = takewhile(lambda r: minBirthday >= r[1], zipped)
        return imap(operator.itemgetter(0, 2), matches)

    def _tripleIterable(self):
        ikeys = self.data.iterkeys()
        ibirthday = imap(operator.itemgetter(0), self.data.itervalues())
        ivalues = imap(operator.itemgetter(1), self.data.itervalues())
        return izip(ikeys, ibirthday, ivalues)

    def iteritems(self):
        self.cull()
        ikeys = self.data.iterkeys()
        ivalues = imap(operator.itemgetter(1), self.data.itervalues())
        return izip(ikeys, ivalues)
コード例 #2
0
def solve():
    """Problem solution implementation."""
    def next_split(length):
        d = (length - 1) / 2
        return [int(round(d)), int(d)]

    n, k = [int(x) for x in raw_input().split()]

    # optimization
    if k == n:
        return '0 0'

    c = OrderedDict({n: 1})
    while c.viewkeys() and k > 1:
        k -= 1
        length = c.iterkeys().next()  # First key == largest key
        new_keys = filter(lambda x: x > 0, next_split(length))
        for nk in new_keys:
            if nk in c:
                c[nk] += 1
            else:
                c[nk] = 1
        # Delete obsolete keys to save memory
        c[length] -= 1
        if c[length] == 0:
            del c[length]

    l_r = next_split(c.iterkeys().next())
    return '{} {}'.format(max(l_r), min(l_r))
コード例 #3
0
ファイル: uniform_grid.py プロジェクト: bataeves/dpa
class UniformGrid(BaseGrid):
    """docstring for UniformGrid"""
    def __init__(self, **kwargs):
        super(UniformGrid, self).__init__()
        self.series = OrderedDict()
        self.dtype = kwargs.get("dtype", float)
        shape = []
        for serie, start, end, cnt in kwargs.get("series"):
            self.series[serie] = np.linspace(start, end, cnt)
            # self.series[serie] = np.zeros(cnt, dtype=self.dtype)
            # part = (end - start) / (cnt - 1)
            # for i in xrange(0, cnt):
            #   self.series[serie][i] = start + part * i
            shape.append(cnt)

        self.values = np.zeros(tuple(shape))

    def gi(self, **kwargs):
        val = self.values
        for serie in self.series.iterkeys():
            val = val[kwargs.get(serie, 0)]
        return val

    def si(self, value, **kwargs):
        prev_val = val = self.values
        prev_index = 0
        for serie in self.series.iterkeys():
            prev_val = val
            prev_index = kwargs.get(serie, 0)
            val = val[kwargs.get(serie, 0)]
        prev_val[prev_index] = value
        return
コード例 #4
0
class JobProperties(object):
    ' Common class for holding job properties much like a struct '

    def __init__(self, jobsdb, hashid):
        self.jobsdb = jobsdb
        self.props = OrderedDict()
        self.hashid = hashid
        self.createHashTable()

    def createHashTable(self):
        try:
            con = lite.connect(self.jobsdb)
            cur = con.cursor()
        except:
            print("Unable to find the database.  Please be in the sim_dir and "
                    "populate the db folder with the proper databases\n")
            print "Tried to open jobs database:", self.jobsdb
            sys.exit()
        with con:
            # Getting table names for jobs
            cmd = 'PRAGMA table_info(Jobs)'
            pragma_data = cur.execute(cmd).fetchall()
            columns = [p[1] for p in pragma_data]

            # Getting information from particular job 
            cmd = 'SELECT * FROM Jobs WHERE HASHID = "%s"' % self.hashid
            job_info = cur.execute(cmd).fetchall()[0]

        # Creating the hash table
        for i, column in enumerate(columns):
            self.props[column] = job_info[i]

    def getProp(self, prop):
        return self.props[prop]

    def setProp(self, prop, newValue):
        self.props[prop] = newValue

    def keys(self):
        for k in self.props.iterkeys():
            return k 
    
    def isIn(self, line):
        isIn = False
        line = line.split()
        for k in self.props.iterkeys():
            kref = '<'+k+'>'   # exmaple in code would like like <soil_pav>
            for w in line:
                if kref.upper() == w.upper():
                    isIn = True
                    break
            if isIn: 
                break
        return isIn, kref.lower(), k

    def __str__(self):
        s = ""
        for k,v in self.props.iteritems():
            s += "key: %s\tvalue: %s\n" % (k,v)
        return s
コード例 #5
0
ファイル: uniform_grid.py プロジェクト: bataeves/dpa
class UniformGrid(BaseGrid):
  """docstring for UniformGrid"""

  def __init__(self, **kwargs):
    super(UniformGrid, self).__init__()
    self.series = OrderedDict()
    self.dtype = kwargs.get("dtype", float)
    shape = []
    for serie, start, end, cnt in kwargs.get("series"):
      self.series[serie] = np.linspace(start, end, cnt)
      # self.series[serie] = np.zeros(cnt, dtype=self.dtype)
      # part = (end - start) / (cnt - 1)
      # for i in xrange(0, cnt):
      #   self.series[serie][i] = start + part * i
      shape.append(cnt)

    self.values = np.zeros(tuple(shape))

  def gi(self, **kwargs):
    val = self.values
    for serie in self.series.iterkeys():
      val = val[kwargs.get(serie, 0)]
    return val

  def si(self, value, **kwargs):
    prev_val = val = self.values
    prev_index = 0
    for serie in self.series.iterkeys():
      prev_val = val
      prev_index = kwargs.get(serie, 0)
      val = val[kwargs.get(serie, 0)]
    prev_val[prev_index] = value
    return
コード例 #6
0
ファイル: devices.py プロジェクト: vertrex/DFF
    def __tablesStore(self, store):
        translator = self.DevicesRegTranslator()
        properties = OrderedDict()
        properties['friendlyName'] = 0
        properties['deviceDesc'] = 0
        properties['klass'] = 0
        properties['serialNumber'] = 0
        properties['users'] = 0
        properties['mountPoints'] = 0

        for device in store:
            for prop in properties.iterkeys():
                if getattr(device, prop)():
                    properties[prop] += 1

        finalProperty = []
        for prop in properties.iterkeys():
            if properties[prop] != 0:
                finalProperty.append(prop)

        finalTable = set()
        for device in store:
            devprop = ()
            for prop in finalProperty:
                res = getattr(device, prop)()
                devprop += (res, )
            finalTable.add(devprop)

        translateProperty = []
        for prop in finalProperty:
            translateProperty.append(translator.translate(prop))

        return (translateProperty, finalTable)
コード例 #7
0
ファイル: plot_vorticity.py プロジェクト: tsupinie/research
def main():
    base_path = "/caps2/tsupinie/"
    experiments = OrderedDict([('1kmf-sndr0h=25km', 'CTRL'), ('1kmf-zs25-no-05XP', 'NO_MWR'), ('1kmf-z-no-snd', 'NO_SND'), ('1kmf-zs25-no-mm', 'NO_MM'), ('1kmf-zs25-no-mm-05XP', 'NO_MWR_MM'), ('1kmf-z-no-v2', 'NO_V2')])

    domain_bounds = (slice(110, 135), slice(118, 143))
    grid = goshen_1km_grid(bounds=domain_bounds)
    domain_bounds = grid.getBounds()
    temp = goshen_1km_temporal(start=14400, end=14400)

    xs, ys = grid.getXY()
    levels = np.arange(-0.030, 0.033, 0.003)

    exp_vort = []
    exp_refl = []

    for exp in experiments.iterkeys():
        vort = cPickle.load(open("vort_pkl/vorticity_%s.pkl" % exp, 'r'))

        refl = []
        for time_sec in temp:        
            try:
                mo = ARPSModelObsFile("%s/%s/KCYSan%06d" % (base_path, exp, time_sec))
            except AssertionError:
                mo = ARPSModelObsFile("%s/%s/KCYSan%06d" % (base_path, exp, time_sec), mpi_config=(2, 12))
            except:
                mo = {'Z':np.zeros((1, 255, 255), dtype=np.float32)}
            refl.append(mo)

        exp_refl.append(np.array(refl))

    def subplotFactory(exp, exp_vort, exp_refl):
        def doSubplot(multiplier=1.0, layout=(-1, -1)):

            pylab.quiver(xs, ys, exp_vort['u'].mean(axis=0)[domain_bounds], exp_vort['v'].mean(axis=0)[domain_bounds])
            pylab.contour(xs, ys, exp_refl['Z'][0][domain_bounds], colors='#666666', levels=np.arange(20, 80, 20))

            pylab.contour(xs, ys, exp_vort['vort'].mean(axis=0)[domain_bounds], colors='k', linestyles='--', linewidths=1.5, levels=[ 0.015 ])
            pylab.contour(xs, ys, exp_vort['vort'].max(axis=0)[domain_bounds], colors='k', linestyles='-', linewidths=1.5, levels=[ 0.015 ])

            pylab.contourf(xs, ys, exp_vort['vort'].mean(axis=0)[domain_bounds], cmap=matplotlib.cm.get_cmap('RdBu_r'), levels=levels, zorder=-10)
            grid.drawPolitical()

            pylab.text(0.05, 0.95, experiments[exp], ha='left', va='top', transform=pylab.gca().transAxes, size=14 * multiplier)
            return

        return doSubplot

    for wdt, time_sec in enumerate(temp):
        print time_sec
        subplots = []
        for exp_name, vort, refl in zip(experiments.iterkeys(), exp_vort, exp_refl):
            print vort.shape
            subplots.append(subplotFactory(exp_name, vort[:, wdt], refl[wdt]))

        pylab.figure(figsize=(12, 8))
        pylab.subplots_adjust(left=0.05, bottom=0.1, right=0.875, top=0.975, hspace=0.1, wspace=0.1)
        publicationFigure(subplots, (2, 3), corner='ur', colorbar=(r'Vorticity ($\times$ 10$^3$ s$^{-1}$)', "%d", levels, np.round(1000 * levels)))
        pylab.savefig("vorticity_%d.png" % time_sec)
    return
コード例 #8
0
ファイル: lru.py プロジェクト: arovit/python-LRU
class LRUCacheDict(object):
    """ A dictionary-like object, supporting LRU caching semantics.
    """
    def __init__(self, max_size=1024, expiration=15*60):
        self.max_size = max_size
        self.expiration = expiration

        self.__values = {}
        self.__expire_times = OrderedDict()
        self.__access_times = OrderedDict()

    def size(self):
        return len(self.__values)

    def clear(self):
        self.__values.clear()
        self.__expire_times.clear()
        self.__access_times.clear()

    def has_key(self, key):
        return self.__values.has_key(key)

    def __setitem__(self, key, value):
        t = int(time())
        self.__delete__(key)
        self.__values[key] = value
        self.__access_times[key] = t
        self.__expire_times[key] = t + self.expiration
        self.cleanup()

    def __getitem__(self, key):
        t = int(time())
        del self.__access_times[key]
        self.__access_times[key] = t
        self.cleanup()
        return self.__values[key]

    def __delete__(self, key):
        if self.__values.has_key(key):
            del self.__values[key]
            del self.__expire_times[key]
            del self.__access_times[key]

    def cleanup(self):
        if self.expiration is None:
            return None
        t = int(time())
        #Delete expired
        for k in self.__expire_times.iterkeys():
            if self.__expire_times[k] < t:
                self.__delete__(k)
            else:
                break

        #If we have more than self.max_size items, delete the oldest
        while (len(self.__values) > self.max_size):
            for k in self.__access_times.iterkeys():
                self.__delete__(k)
                break
コード例 #9
0
ファイル: collector.py プロジェクト: mlonf/magritte
class Collector(object):
    def __init__(self):
        self.collection = OrderedDict()

    def get_other_versions(self, package_name, package_version):
        other_versions = []
        for other_package_name, other_package_version in self.collection.iterkeys():
            if other_package_name == package_name\
            and not other_package_version == package_version:
                other_versions.append((other_package_name, other_package_version))
        return other_versions

    def append_to_packages_list(self, package_name, package_version):
        other_versions =  dict(self.get_other_versions(package_name, package_version)).values()
        if other_versions:
            lowest = min(other_versions + [package_version])
            highest = max(other_versions + [package_version])
            if highest < lowest:
                raise ImpossibleVersionComparisonException("version %s of %s cannot be higher than %s" % (lowest, package_name, highest))
            if package_version < highest:
                logger.warning("downgrading %s from version %s to version %s",package_name, highest, package_version)
                del self.collection[(package_name, highest)]
            elif package_version > lowest:
                logger.warning("version %s of %s is higher than %s, ignored",package_version, package_name, lowest)
                return
        self.collection[package_name, package_version] = package_name, package_version


    def collect_dependencies(self, package_dependencies):
        for dependency in package_dependencies:
            package_name = dependency[0]
            package_version = dependency[1]
            self.append_to_packages_list(package_name, package_version)


    def traverse_dependencies(self, package):
        package_name = package[0][0]
        package_version = package[0][1]
        if not package_name:
            raise RequiredAttributeMissingError("package %s requires a name" % package)
        if not package_version:
            raise RequiredAttributeMissingError("package %s requires a version" % package)
        package_dependencies = None
        if len(package) == 2:
            package_dependencies = package[1]
        if package_dependencies:
            self.collect_dependencies(package_dependencies)
        self.append_to_packages_list(package_name, package_version)

    def traverse(self, structure):
        for package in structure:
            self.traverse_dependencies(package)

    def format_requirements(self):
        requirements = []
        for name,version in self.collection.iterkeys():
            requirements.append("%s-%s" % (name, version))
        return requirements
コード例 #10
0
ファイル: dht.py プロジェクト: pombredanne/Uroko
class ForgetfulStorage(object):
	def __init__(self, ttl=604800):
		"""
		Implements a storage class for distributed log messages.
		Default max age is a week.
		"""
		self.data = OrderedDict()
		self.ttl  = ttl

	def __setitem__(self, key, value):
#		if key in self.data:
#			self.data[key] = (time.time(),value)
		self.data[key] = (time.time(), value)
		self.cull()

	def __getitem__(self, key):
		self.cull()
		return self.data[key][1]

	def __iter__(self):
		self.cull()
		return iter(self.data)

	def __repr__(self):
		self.cull()
		return repr(self.data)

	def get(self, key, default=None):
		self.cull()
		if key in self.data:
			return self[key]
		return default

	def cull(self):
		"""
		Note that it may be useful to track what we evict.
		"""
		for k, v in self.iteritems_older_than(self.ttl):
			self.data.popitem(last=False)

	def iteritems_older_than(self, seconds_old):
		min_birthday = time.time() - seconds_old
		zipped = self._triple_iterable()
		matches = takewhile(lambda r: min_birthday >= r[1], zipped)
		return imap(operator.itemgetter(0, 2), matches)

	def _triple_iterable(self):
		ikeys = self.data.iterkeys()
		ibirthday = imap(operator.itemgetter(0), self.data.itervalues())
		ivalues = imap(operator.itemgetter(1), self.data.itervalues())
		return izip(ikeys, ibirthday, ivalues)

	def iteritems(self):
		self.cull()
		ikeys = self.data.iterkeys()
		ivalues = imap(operator.itemgetter(1), self.data.itervalues())
		return izip(ikeys, ivalues)
コード例 #11
0
ファイル: lru.py プロジェクト: liangtaos/lru_python
class LRUCacheDict(object):
    def __init__(self, max_size=1024, expiration=15 * 60):
        self.max_size = max_size
        self.expiration = expiration

        self.__values = {}
        self.__expire_times = OrderedDict()
        self.__access_times = OrderedDict()

    def size(self):
        return len(self.__values)

    def clear(self):
        self.__values.clear()
        self.__expire_times.clear()
        self.__access_times.clear()

    def has_key(self, key):
        return self.__values.has_key(key)

    def __setitem__(self, key, value):
        t = int(time())
        self.__delete__(key)
        self.__values[key] = value
        self.__access_times[key] = t
        self.__expire_times[key] = t + self.expiration
        self.cleanup()

    def __getitem__(self, key):
        t = int(time())
        del self.__access_times[key]
        self.__access_times[key] = t
        self.cleanup()
        return self.__values[key]

    def __delete__(self, key):
        if self.__values.has_key(key):
            del self.__values[key]
            del self.__expire_times[key]
            del self.__access_times[key]

    def cleanup(self):
        if self.expiration is None:
            return None
        t = int(time())
        #Delete expired
        for k in self.__expire_times.iterkeys():
            if self.__expire_times[k] < t:
                self.__delete__(k)
            else:
                break

        #If we have more than self.max_size items, delete the oldest
        while (len(self.__values) > self.max_size):
            for k in self.__access_times.iterkeys():
                self.__delete__(k)
                break
コード例 #12
0
ファイル: storage.py プロジェクト: fgadaleta/kademlia
class ForgetfulStorage(object):
    #implements(IStorage)

    def __init__(self, ttl=604800):
        """
        By default, max age is a week.
        """
        self.data = OrderedDict()
        self.ttl = ttl

    def __setitem__(self, key, value):
        if key in self.data:
            del self.data[key]
        self.data[key] = (time.time(), value)
        self.cull()

    def cull(self):
        for k, v in self.iteritemsOlderThan(self.ttl):
            self.data.popitem(last=False)

    def get(self, key, default=None):
        self.cull()
        if key in self.data:
            return self[key]
        return default

    def __getitem__(self, key):
        self.cull()
        return self.data[key][1]

    def __iter__(self):
        self.cull()
        return iter(self.data)

    def __repr__(self):
        self.cull()
        return repr(self.data)

    def iteritemsOlderThan(self, secondsOld):
        minBirthday = time.time() - secondsOld
        zipped = self._tripleIterable()
        matches = takewhile(lambda r: minBirthday >= r[1], zipped)
        return imap(operator.itemgetter(0, 2), matches)

    def _tripleIterable(self):
        ikeys = self.data.iterkeys()
        ibirthday = imap(operator.itemgetter(0), self.data.itervalues())
        ivalues = imap(operator.itemgetter(1), self.data.itervalues())
        return izip(ikeys, ibirthday, ivalues)

    def iteritems(self):
        self.cull()
        ikeys = self.data.iterkeys()
        ivalues = imap(operator.itemgetter(1), self.data.itervalues())
        return izip(ikeys, ivalues)
コード例 #13
0
ファイル: merge_3way.py プロジェクト: netsia/voltha_doc
 class AnalyzeChanges(object):
     def __init__(self, lst1, lst2, keyname):
         self.keymap1 = OrderedDict((getattr(rev._config._data, keyname), i)
                                    for i, rev in enumerate(lst1))
         self.keymap2 = OrderedDict((getattr(rev._config._data, keyname), i)
                                    for i, rev in enumerate(lst2))
         self.added_keys = [
             k for k in self.keymap2.iterkeys() if k not in self.keymap1]
         self.removed_keys = [
             k for k in self.keymap1.iterkeys() if k not in self.keymap2]
         self.changed_keys = [
             k for k in self.keymap1.iterkeys()
             if k in self.keymap2 and
                 lst1[self.keymap1[k]]._hash != lst2[self.keymap2[k]]._hash
         ]
コード例 #14
0
def get_docs(queries, filename):

    with open(
            "/Users/snehagaikwad/Documents/IR/IR_data/AP_DATA/HW5/qrels.adhoc.51-100.AP89.txt"
    ) as qrel_lines:
        for line in qrel_lines:
            fields = line.split()
            query_num = fields[0]
            if query_num in queries:
                doc_def[query_num].add(fields[2])
                label_dict[(query_num, fields[2])] = fields[3]

    for q_no in doc_def.iterkeys():
        docs = doc_def[q_no]
        for doc in docs:
            doc_list.add(doc)

    count = 1
    for doc in doc_list:
        doc_dict[doc] = count
        count = count + 1

    sorted_docs = OrderedDict(
        sorted(doc_dict.items(), key=lambda x: x[1], reverse=True))

    with open(filename, "w") as fw:
        for key in sorted_docs.iterkeys():
            fw.write(str(key) + " " + str(doc_dict[key]) + "\n")
コード例 #15
0
ファイル: tunit_cache.py プロジェクト: FooBarrior/yavide
class FifoCache():
    def __init__(self, max_capacity):
        self.max_capacity = max_capacity
        self.store = OrderedDict()

    def iterkeys(self):
        return self.store.iterkeys()

    def itervalues(self):
        return self.store.itervalues()

    def iteritems(self):
        return self.store.iteritems()

    def __getitem__(self, key):
        return self.store[key]

    def __setitem__(self, key, value):
        if key in self.store:
            del self.store[key]
        else:
            if len(self.store) == self.max_capacity:
                self.store.popitem(last=False) # last=False --> FIFO, last=True --> LIFO
        self.store[key] = value

    def __delitem__(self, key):
        del self.store[key]

    def __iter__(self):
        return self.store.__iter__()

    def __len__(self):
        return len(self.store)
コード例 #16
0
class LineGroup(object):
    def __init__(self, group_name, line_names, increasing=True):
        self.group_name = group_name
        self.lines = OrderedDict([(name, Line()) for name in line_names])
        self.increasing = increasing
        self.xlim = (None, None)

    def get_line_names(self):
        return list(self.lines.iterkeys())

    def get_line_xs(self):
        #return [line.xs for line in self.lines.itervalues()]
        """
        for key, line in self.lines.items():
            if not hasattr(line, "last_index"):
                print(self.group_name, key, "no last index")
            else:
                print(self.group_name, key, "OK")
            print(type(line.xs), type(line.ys), type(line.counts), type(line.datetimes))
        """
        return [line.get_xs() for line in self.lines.itervalues()]

    def get_line_ys(self):
        #return [line.ys for line in self.lines.itervalues()]
        return [line.get_ys() for line in self.lines.itervalues()]

    def get_max_x(self):
        #return max([max(line.xs) if len(line.xs) > 0 else 0 for line in self.lines.itervalues()])
        return max([np.maximum(line.get_xs()) if line.last_index > -1 else 0 for line in self.lines.itervalues()])
コード例 #17
0
ファイル: main.py プロジェクト: khazamov/hackerrankchallenge
def delete_duplicates(input_file, my_file):
    content = ""
    last_first = re.compile("\w+,\w+")
    last_first_middle = re.compile("\w+,\w+ \w+")
    fid = open(input_file)
    fid_wrt = open(my_file, "w+")
    lines = fid.readlines()
    dict = OrderedDict()

    for line in lines:
        if len(re.split(":", line)) != 1:
            name, SSN = re.split(":", line)
            SSN = SSN.strip("\n")
            if dict.has_key(SSN):
                dict[SSN].append(name)
            else:
                dict.update({SSN: []})
                dict[SSN].append(name)

    for SSN in dict.iterkeys():
        name_array = dict[SSN]
        split_name = []
        name_array.sort(key=lambda item: (-len(item), item))
        if re.match(last_first_middle, name_array[0]) or re.match(last_first, name_array[0]):
            split_name = re.findall(r"[\w']+", name_array[0])
            if re.match(last_first_middle, name_array[0]):
                normal_form = split_name[1] + " " + split_name[2] + " " + split_name[0]
            elif re.match(last_first, name_array[0]):
                normal_form = split_name[1] + " " + split_name[0]
        else:
            normal_form = name_array[0]
        content += normal_form + ":" + SSN + "\n"

    fid_wrt.write(content.rstrip("\n"))
    fid_wrt.close()
コード例 #18
0
def build(master_dict, path, l, fh):
	master = ()
	master = sorted(master_dict)

	m_dict = OrderedDict()
	for item in master:
		m_dict[item] = 0	
	
	fhl=open(path,"r")
	lines = fhl.read().split()
	#lines = re.sub("[()+.,\']",'',lines)
	#words = nltk.tokenize.word_tokenize(lines)
	bi_lines = bigrams(lines)	
	temp = FreqDist(bi_lines)
	fhl.close()

	#temp = get_bigram_file(path)
	for key in m_dict.iterkeys():
		if temp.has_key(key):
			m_dict[key] = temp[key]
		else:
			m_dict[key] = 0
	#m_dict.update(temp)
	values = list()
	for val in m_dict.itervalues():
		values.append(str(val))
	#print len(values)	
	fh.write(l+","+",".join(values))
	fh.write('\n')
def build(master_dict, path, l, fh):
	'''	
	master = ()
	master = sorted(master_dict)

	m_dict = OrderedDict()
	for item in master:
		m_dict[item] = 0
	'''
	print "12"
	temp_dict = get_bigram_file(path)
	temp_dict = OrderedDict(sorted(temp_dict.items()))
	print "13"
	#m_dict.update(temp)
	if l == 'pos':
		lab = '+1'
	else:
		lab = '-1'
	print "14"
	#fh.write(lab)
	temp_str = lab
	for key in temp_dict.iterkeys():
		#print " "+str(key)+":"+str(temp_dict[key])				
		#fh.write(" "+str(master_dict.keys().index(key))+":"+str(temp_dict[key]))
		temp_str += " "+str(master_dict[key])+":"+str(temp_dict[key])	
	fh.write(temp_str+"\n")
	print "15"	
コード例 #20
0
ファイル: creator.py プロジェクト: chinjieh/mugenplcfg
    def getDeviceShortNames(self, devicepaths, pciids):
        shortnames = OrderedDict()
        namecount = {}
        try:
        # Initialise PciIdsParser, throws customExceptions.PciIdsFileNotFound
        # if fail
            pciIdsParser = parseutil.PciIdsParser(pciids)
        except customExceptions.PciIdsFileNotFound:
            message.addError("pci.ids file could not be located in tool "
                             "directory: %s. " % paths.CURRENTDIR +
                             "Device "
                             "names could not be obtained. Please ensure that "
                             "the file is in the directory.",
                             False)
        else:

            for devicepath in devicepaths:
                # Add entry to dictionary shortnames
                shortnames[devicepath] = self.getClassName(
                    devicepath, pciIdsParser)

            namelist = []
            for value in shortnames.itervalues():
                namelist.append(value)

            listnumberer = util.ListNumberer(namelist)
            for devicepath in shortnames.iterkeys():
                shortnames[devicepath] = listnumberer.getName(
                    shortnames[devicepath])

        return shortnames
コード例 #21
0
ファイル: plot_AUC.py プロジェクト: tsupinie/research
def main():
    temp = goshen_1km_temporal(start=14400)
    experiments = OrderedDict([('1kmf-sndr0h=25km', 'CTRL'), ('1kmf-zs25-no-05XP', 'NO_MWR'), ('1kmf-z-no-snd', 'NO_SND'), ('1kmf-zs25-no-mm', 'NO_MM'), ('1kmf-zs25-no-mm-05XP', 'NO_MWR_MM'), ('1kmf-z-no-v2', 'NO_V2')])
    refl_thresh = [ 25, 45 ]

    all_AUCs = []
    for thresh in refl_thresh:
        rocs = []
        for exp in experiments.iterkeys():
            pkl_name = "roc_pkl/%s_%02ddBZ_roc.pkl" % (exp, thresh)
            roc = cPickle.load(open(pkl_name, 'r'))
            rocs.append(roc)

        rocs = zip(*rocs)
        AUCs = []
        for time, roc_group in zip(temp, rocs):
            AUC = [ computeAUC(r[0]) for r in roc_group ]
            AUCs.append(AUC)
        all_AUCs.append(AUCs)

    all_AUCs = [ zip(*a) for a in all_AUCs ]

    def subplotFactory(AUC_group, thresh):
        colors = dict(zip(experiments.iterkeys(), ['k', 'r', 'g', 'b', 'c', 'm']))
        def doSubplot(multiplier=1.0, layout=(-1, -1)):
            for idx, (exp, exp_name) in enumerate(experiments.iteritems()):
                pylab.plot(temp.getTimes(), AUC_group[idx], label=exp_name, color=colors[exp])

            pylab.axhline(y=0.5, color='k', linestyle=':')

            n_row, n_col = layout
            if n_row == 2:
                pylab.xlabel("Time (UTC)", size='large')
                pylab.xlim(temp.getTimes()[0], temp.getTimes()[-1])
                pylab.xticks(temp.getTimes(), temp.getStrings("%H%M", aslist=True), rotation=30, size='x-large')

                pylab.legend(loc=3)

            else:
                pylab.xlim(temp.getTimes()[0], temp.getTimes()[-1])
                pylab.xticks(temp.getTimes(), [ "" for t in temp ])

            pylab.ylabel(r"AUC (Reflectivity $\geq$ %d dBZ)" % thresh, size='large')
            pylab.ylim(0.4, 1.0)
            pylab.yticks(size='x-large')
            return

        return doSubplot

    subplots = []
    for AUC_group, thresh in zip(all_AUCs, refl_thresh):
        subplots.append(subplotFactory(AUC_group, thresh))

    pylab.figure(figsize=(8, 9.5))
    pylab.subplots_adjust(left=0.1, right=0.95, top=0.95, bottom=0.1, hspace=0.05)
    publicationFigure(subplots, (2, 1), corner='ur')

    pylab.savefig("roc_AUC.png")
    pylab.close()
    return
コード例 #22
0
def get_docs(queries,filename):

    with open("/Users/snehagaikwad/Documents/IR/IR_data/AP_DATA/HW5/qrels.adhoc.51-100.AP89.txt") as qrel_lines:
        for line in qrel_lines:
            fields = line.split()
            query_num = fields[0]
            if query_num in queries:
                doc_def[query_num].add(fields[2])
                label_dict[(query_num,fields[2])] = fields[3]

    for q_no in doc_def.iterkeys():
        docs = doc_def[q_no]
        for doc in docs:
            doc_list.add(doc)

    count = 1
    for doc in doc_list:
        doc_dict[doc] = count
        count = count + 1

    sorted_docs = OrderedDict(sorted(doc_dict.items(), key=lambda x:x[1], reverse=True))

    with open(filename, "w") as fw:
        for key in sorted_docs.iterkeys():
            fw.write(str(key) + " " + str(doc_dict[key]) + "\n")
コード例 #23
0
class OrderedSet(object):
    def __init__(self, contents=()):
        self.set = OrderedDict((c, None) for c in contents)

    def __contains__(self, item):
        return item in self.set

    def __iter__(self):
        return self.set.iterkeys()

    def __len__(self):
        return len(self.set)

    def add(self, item):
        self.set[item] = None

    def clear(self):
        self.set.clear()

    def pop(self):
        item = next(iter(self.set))
        del self.set[item]
        return item

    def remove(self, item):
        del self.set[item]

    def to_list(self):
        return [k for k in self.set]
コード例 #24
0
    def build_sprite_list(self):
        """
        Builds a lookup table of sprite lumps, and loads a PLAYPAL palette from the current WAD list.
        """

        sprite_lumps = OrderedDict()
        for wad in self.wads:
            sprite_lumps.update(wad.get_sprite_lumps())

        # Create a list of sprite names.
        for name in sprite_lumps.iterkeys():
            sprite_name = name[0:4]
            self.sprites[sprite_name] = SpriteEntry(sprite_name)

        # Add sprite lumps for each sprite.
        for sprite_key, sprite_entry in self.sprites.iteritems():

            for lump_name, lump in sprite_lumps.iteritems():
                if lump_name[0:4] != sprite_key:
                    continue

                frame = lump_name[4]
                rotation = lump_name[5]
                sprite_entry.add_frame(frame, rotation, lump, False)

                # Mirrored sprite lump.
                if len(lump_name) == 8:
                    frame = lump_name[6]
                    rotation = lump_name[7]
                    sprite_entry.add_frame(frame, rotation, lump, True)

        # Find a PLAYPAL lump to use as palette.
        playpal = self.get_lump('PLAYPAL')
        if playpal is not None:
            self.palette = graphics.Palette(playpal.get_data())
コード例 #25
0
def do_eisa_targetscan_gene_ranks_plot(df, ts_score_col, intron_col, exon_col,
                                       diff_col):
    '''Produces plot comparing gene rankings from different EISA data types (from EISA paper)'''
    plot_labels = OrderedDict()
    plot_labels[intron_col] = r'$\Delta$intron'
    plot_labels[exon_col] = r'$\Delta$exon'
    plot_labels[diff_col] = r'$\Delta$exon-$\Delta$intron'

    eisa_plot_df = pd.DataFrame(data=None, index=df.index)
    eisa_plot_df['target_in_utr'] = False
    eisa_plot_df.loc[df[ts_score_col] < 0, 'target_in_utr'] = True
    print "Genes with targets in UTR: %r" % Counter(
        eisa_plot_df['target_in_utr'])

    for col in plot_labels.iterkeys():
        sorted_data_for_type = df[col].dropna().sort_values(inplace=False)
        eisa_plot_df['%s_ranks' % col] = sorted_data_for_type.rank(
            ascending=True)  #Most negative have highest ranking

    x_range = range(1, 1000 + 1)
    sns.set(font_scale=2.0)
    sns.set_style("white")
    pl.figure(figsize=(6, 6))
    for data_type in plot_labels.keys():

        data_type_percentages = pd.Series(index=x_range)
        for i in x_range:
            gene_mask = eisa_plot_df["%s_ranks" % data_type] <= i
            data_for_genes = eisa_plot_df.loc[gene_mask, ['target_in_utr']]
            percent_of_genes_with_target = float(sum(
                data_for_genes.values)) / float(len(data_for_genes)) * 100

            data_type_percentages[i] = percent_of_genes_with_target

        pl.plot(x_range,
                data_type_percentages,
                label=plot_labels[data_type],
                linewidth=4)

    ax = pl.gca()

    [i.set_linewidth(1.5) for i in ax.spines.itervalues()]
    [i.set_edgecolor('black') for i in ax.spines.itervalues()]
    box = ax.get_position()
    # ax.set_position([box.x0, box.y0 + 0.05 * box.height, box.width * 0.7, box.height])
    xticks = [0, 500, 1000]
    ax.xaxis.set_ticks(xticks)
    ax.set_xticklabels(xticks)  #, fontsize=20)
    yticks = [0, 50, 100]
    ax.yaxis.set_ticks(yticks)
    ax.set_yticklabels(yticks)  #, fontsize=20)
    pl.xlim(xmin=0)  #, xmax=num_genes_on_plot)
    pl.xlabel('Top-ranked genes')
    pl.ylabel('% with seed match')
    ax.legend(loc='upper center', bbox_to_anchor=(0.68, 1.0))
    #     pl.text(910, 92, "3'UTR", horizontalalignment='right', verticalalignment='top', fontsize=35) #Title
    #     ax.legend(loc='upper center', bbox_to_anchor=(0.67, 0.80))
    ax.set_position(
        [box.x0 + 0.05, box.y0 + 0.04, box.width * 0.98, box.height])
    return ax
コード例 #26
0
ファイル: lru-cache.py プロジェクト: yuankui/leetcode
class LRUCache:
    # @param capacity, an integer
    def __init__(self, capacity):
        self.capacity = capacity
        self.map = OrderedDict()
        self.size = 0

    # @return an integer
    def get(self, key):
        if key in self.map:
            value = self.map[key]
            del self.map[key]
            self.map[key] = value
            return value
        return -1

    # @param key, an integer
    # @param value, an integer
    # @return nothing
    def set(self, key, value):
        if key in self.map:
            self.map[key] = value
            self.get(key)
            return

        if self.size >= self.capacity:
            self.removeOldest()
        self.map[key] = value
        self.size += 1

    def removeOldest(self):
        for k in self.map.iterkeys():
            break
        self.size -= 1
        del self.map[k]
コード例 #27
0
 def levelOrderBottom(self, root):
     """
     :type root: TreeNode
     :rtype: List[List[int]]
     """
     if root == None:
         return []
     from collections import deque
     from collections import OrderedDict
     queue = deque([root])
     d = OrderedDict()
     root.level = 1
     while len(queue) != 0:
         node = queue.popleft()
         if node.left != None:
             node.left.level = node.level + 1   
             queue.append(node.left)
         if node.right != None:
             node.right.level = node.level + 1
             queue.append(node.right)
             
         if node.level in d:
             d[node.level].append(node.val)
         else:
             d[node.level] = []
             d[node.level].append(node.val)
     l = [d[key] for key in d.iterkeys()]
     l.reverse()
     return l    
コード例 #28
0
def get_dict(tax_ass):
    phylum_reg = re.compile('p__(.+)')
    genus_reg = re.compile('g__(.+)')
    genus_in_phylum = OrderedDict()
    phylum_genus = OrderedDict()
    with open(tax_ass) as fp:
        for line in fp:
            tabs = line.rstrip().split('\t')
            tax_line = tabs[1]
            taxes = tax_line.split(';')
            p = None
            g = None
            for tax in taxes:
                try:
                    p = phylum_reg.search(tax).group(1).replace(' ', '_')
                except AttributeError:
                    pass
                try:
                    g = genus_reg.search(tax).group(1).replace(' ', '_')
                except AttributeError:
                    pass
            if g is not None and p is not None:
                genus_in_phylum[g] = p
                phylum_genus[p] = g
    color_dict = OrderedDict()
    for ind, p in enumerate(list(phylum_genus.iterkeys())):
        color_dict[p] = COLS_BREWER[ind % 20]
    return color_dict, genus_in_phylum
コード例 #29
0
ファイル: connect_116.py プロジェクト: wshcdr/leetcode-1
    def connect(self, root):
        """
        :type root: TreeLinkNode
        :rtype: nothing
        """
        if root == None:
            return
        from collections import deque
        from collections import OrderedDict
        queue = deque([root])
        d = OrderedDict()
        root.level = 1
        while len(queue) != 0:
            node = queue.popleft()
            if node.left != None:
                node.left.level = node.level + 1
                queue.append(node.left)
            if node.right != None:
                node.right.level = node.level + 1
                queue.append(node.right)

            if node.level not in d:
                d[node.level] = []
            d[node.level].append(node)
        l = [d[key] for key in d.iterkeys()]
        for i in range(len(l)):
            for j in range(len(l[i]) - 1):
                l[i][j].next = l[i][j + 1]
            l[i][-1].next = None
コード例 #30
0
def do_single_plot(output_fpath, alg_data, plot_title, an_pred, nk_pred):
    import matplotlib.pyplot as plt
    from collections import OrderedDict

    # gets the data to be used
    plot_alg_data = dict((an, dict((n, t)
                                   for (n, k), t in ad.iteritems()
                                   if nk_pred(n, k)))
                         for an, ad in alg_data.iteritems()
                         if an_pred(an))

    # makes the data vectors
    n_vec = sorted(n for n in plot_alg_data.itervalues().next().iterkeys())
    data_by_alg = dict((an, [sorted(av[n]) for n in n_vec])
                       for an, av in plot_alg_data.iteritems())
    data_vecs = OrderedDict((an, zip(*[(t[2], t[4] - t[2], t[2] - t[0])
                                       for t in av]))
                            for an, av in\
                            sorted(data_by_alg.iteritems(),
                                   key = lambda ai: AN_ORDER[ai[0]]))

    plt.figure()
    plt.xscale('log')
    plt.yscale('log')
    for an, ad in data_vecs.iteritems():
        #plt.errorbar(n_vec, ad[0], yerr=ad[1:]) -- Error bars are too small
        plt.errorbar(n_vec, ad[0], fmt=FMT_BY_AN[an])
    plt.legend([FULL_NAME_BY_AN[an] for an in data_vecs.iterkeys()],
               'lower right')
    plt.title(plot_title)
    
    plt.xlabel('$n$')
    plt.ylabel('Execution time (seconds)')

    plt.savefig(output_fpath)
コード例 #31
0
ファイル: boardconfig.py プロジェクト: daamien/kansha
class BoardConfig(object):

    """Board configuration component"""

    def __init__(self, board):
        """Initialization

        In:
            - ``board`` -- the board object will want to configure
        """
        self.board = board
        self.menu = OrderedDict()
        self.menu['profile'] = MenuEntry(_(u'Profile'), 'icon-profile', BoardProfile)
        if security.has_permissions('manage', self.board):
            self.menu['labels'] = MenuEntry(_(u'Card labels'), 'icon-tag', BoardLabels)
            self.menu['weights'] = MenuEntry(_(u'Card weights'), 'icon-meter', BoardWeights)
            self.menu['background'] = MenuEntry(_(u'Background'), 'icon-paint-format', BoardBackground)
        self.selected = None
        self.content = component.Component(None)
        self.select(self.menu.iterkeys().next())

    def select(self, v):
        """Select a configuration menu item

        In:
            - ``v`` -- the id_ of the menu item we want to show
        """
        self.selected = v
        self.content.becomes(self.menu[v].content(self.board))
コード例 #32
0
ファイル: connect_117.py プロジェクト: Firkraag/leetcode
 def connect(self, root):
     """
     :type root: TreeLinkNode
     :rtype: nothing
     """
     if root == None:
         return
     from collections import deque
     from collections import OrderedDict
     queue = deque([root])
     d = OrderedDict()
     root.level = 1
     while len(queue) != 0:
         node = queue.popleft()
         if node.left != None:
             node.left.level = node.level + 1   
             queue.append(node.left)
         if node.right != None:
             node.right.level = node.level + 1
             queue.append(node.right)
             
         if node.level not in d:
             d[node.level] = []
         d[node.level].append(node)
     l = [d[key] for key in d.iterkeys()]
     for i in range(len(l)):
         for j in range(len(l[i]) - 1):
             l[i][j].next = l[i][j + 1]
         l[i][-1].next = None
コード例 #33
0
ファイル: layup.py プロジェクト: BecMax/fusedwind-dev
class Region(object):
    """ Holds a region's layers along the blade.
    
    :param layers: Dictionary of Layer3D objects
    :type layers: dict
    """
    def __init__(self):
        self.layers = OrderedDict()

    def add_layer(self, name):
        ''' Inserts a layer into layers dict.
        
        :param name: Name of the material
        :return: The layer added to the region
        '''
        dubl = 0
        for k in self.layers.iterkeys():
            if name in k:
                dubl += 1

        if dubl > 0:
            lname = '%s%02d' % (name, dubl)
        else:
            lname = name

        layer = Layer()
        self.layers[lname] = layer
        return layer
コード例 #34
0
    def _receive_packet_callback(self, packet):
        try:
            header = packet.eieio_header
            if not header.is_time:
                raise Exception(
                    "Only packets with a timestamp are currently considered")

            key_times_labels = OrderedDict()
            while packet.is_next_element:
                element = packet.next_element
                time = element.payload
                key = element.key
                if key in self._key_to_atom_id_and_label:
                    (atom_id, label_id) = \
                        self._key_to_atom_id_and_label[key]
                    if time not in key_times_labels:
                        key_times_labels[time] = dict()
                    if label_id not in key_times_labels[time]:
                        key_times_labels[time][label_id] = list()
                    key_times_labels[time][label_id].append(atom_id)

            for time in key_times_labels.iterkeys():
                for label_id in key_times_labels[time].iterkeys():
                    label = self._receive_labels[label_id]
                    for callback in self._live_event_callbacks[label_id]:
                        callback(label, time, key_times_labels[time][label_id])
        except:
            traceback.print_exc()
コード例 #35
0
def get_dict(tax_ass):
    phylum_reg = re.compile("p__(.+)")
    genus_reg = re.compile("g__(.+)")
    genus_in_phylum = OrderedDict()
    phylum_genus = OrderedDict()
    with open(tax_ass) as fp:
        for line in fp:
            tabs = line.rstrip().split("\t")
            tax_line = tabs[1]
            taxes = tax_line.split(";")
            p = None
            g = None
            for tax in taxes:
                try:
                    p = phylum_reg.search(tax).group(1).replace(" ", "_")
                except AttributeError:
                    pass
                try:
                    g = genus_reg.search(tax).group(1).replace(" ", "_")
                except AttributeError:
                    pass
            if g is not None and p is not None:
                genus_in_phylum[g] = p
                phylum_genus[p] = g
    color_dict = OrderedDict()
    for ind, p in enumerate(list(phylum_genus.iterkeys())):
        color_dict[p] = COLS_BREWER[ind % 20]
    return color_dict, genus_in_phylum
コード例 #36
0
def from_files(filenames):
    """Return an iterator that provides a sequence of Histograms for
the histograms defined in filenames.
    """
    all_histograms = OrderedDict()
    for filename in filenames:
        parser = FILENAME_PARSERS[os.path.basename(filename)]
        histograms = parser(filename)

        # OrderedDicts are important, because then the iteration order over
        # the parsed histograms is stable, which makes the insertion into
        # all_histograms stable, which makes ordering in generated files
        # stable, which makes builds more deterministic.
        if not isinstance(histograms, OrderedDict):
            raise BaseException, "histogram parser didn't provide an OrderedDict"

        for (name, definition) in histograms.iteritems():
            if all_histograms.has_key(name):
                raise DefinitionException, "duplicate histogram name %s" % name
            all_histograms[name] = definition

    # We require that all USE_COUNTER2_* histograms be defined in a contiguous
    # block.
    use_counter_indices = filter(lambda x: x[1].startswith("USE_COUNTER2_"),
                                 enumerate(all_histograms.iterkeys()));
    if use_counter_indices:
        lower_bound = use_counter_indices[0][0]
        upper_bound = use_counter_indices[-1][0]
        n_counters = upper_bound - lower_bound + 1
        if n_counters != len(use_counter_indices):
            raise DefinitionException, "use counter histograms must be defined in a contiguous block"

    for (name, definition) in all_histograms.iteritems():
        yield Histogram(name, definition)
コード例 #37
0
 def next_airport(self, date):
     od = OrderedDict( sorted(self.data[date].items(), 
                          key=lambda t: t[1], ## t[0]指根据key排序, t[1]指根据value排序
                          reverse = False) ) ## True指逆序排序,False指正序排序
     for airport_code in od.iterkeys():
         break
     return airport_code
コード例 #38
0
def check(new_data):
    file_data = OrderedDict()
    add_items = OrderedDict()
    
    #获取当前文件夹的绝对路径
    BASE_DIR = os.path.dirname(__file__) 
    file_path = os.path.join(BASE_DIR, 'file') 
    
    with open(file_path, 'r') as f:
        lines = (line.strip() for line in f)
        for line in lines:
            if len(line) > 0:
                key = line.split('#')[0]
                value = line.split('#')[1]
                file_data[key] = value
    
    for key, value in new_data.iteritems():
        if key  not in file_data.iterkeys():
            add_items[key] = value
            
    if len(add_items) > 0:
            with open(file_path, 'w') as f:
                for key, value in new_data.iteritems():
                    f.write(key + '#' + value.encode('utf-8'))
                    f.write('\n')
    return add_items
コード例 #39
0
class BoardConfig(object):
    """Board configuration component"""
    def __init__(self, board):
        """Initialization

        In:
            - ``board`` -- the board object will want to configure
        """
        self.board = board
        self.menu = OrderedDict()
        self.menu['profile'] = MenuEntry(_(u'Profile'), 'icon-profile',
                                         BoardProfile)
        if security.has_permissions('manage', self.board):
            self.menu['labels'] = MenuEntry(_(u'Card labels'),
                                            'icon-price-tag', BoardLabels)
            self.menu['weights'] = MenuEntry(_(u'Card weights'), 'icon-meter',
                                             BoardWeights)
            self.menu['background'] = MenuEntry(_(u'Background'),
                                                'icon-paint-format',
                                                BoardBackground)
        self.selected = None
        self.content = component.Component(None)
        self.select(self.menu.iterkeys().next())

    def select(self, v):
        """Select a configuration menu item

        In:
            - ``v`` -- the id_ of the menu item we want to show
        """
        self.selected = v
        self.content.becomes(self.menu[v].content(self.board))
コード例 #40
0
ファイル: tunit_cache.py プロジェクト: xizhuanhe/yavide
class FifoCache():
    def __init__(self, max_capacity):
        self.max_capacity = max_capacity
        self.store = OrderedDict()

    def iterkeys(self):
        return self.store.iterkeys()

    def itervalues(self):
        return self.store.itervalues()

    def iteritems(self):
        return self.store.iteritems()

    def __getitem__(self, key):
        return self.store[key]

    def __setitem__(self, key, value):
        if key in self.store:
            del self.store[key]
        else:
            if len(self.store) == self.max_capacity:
                self.store.popitem(
                    last=False)  # last=False --> FIFO, last=True --> LIFO
        self.store[key] = value

    def __delitem__(self, key):
        del self.store[key]

    def __iter__(self):
        return self.store.__iter__()

    def __len__(self):
        return len(self.store)
コード例 #41
0
def logging(results,f=None):
    """ logging the monitoring result for each interval"""
    #TODO find the way to organize the result and print 
    time.sleep(LOGGING_INTERVAL)

    #TODO this is the easy way to organize data, buy not neat
    compact_results = OrderedDict()
    for name in CAPTURE_NAME:
        compact_results['CPU-'+name] = 0
    for name in CAPTURE_NAME:
        compact_results['Mem-'+name] = 0
 
    for k,v in results.iteritems():
        for key in compact_results.iterkeys():
            if key in k:
                compact_results[key] += v
                break
        else: print 'some problems'
        
    #printing to a file if applicable
    list_a = zip(*compact_results.items()) #list_a = zip(*[(k, v) for k,v in summary.iteritems()])
    header = str(list_a[0]).strip("()").replace("'","") +"\n"
    content = str(list_a[1]).strip("()").replace("'","") +"\n"
    if f:
        if f.tell() == 0: f.write(header)
        f.write(content)
    else:
        print list_a
コード例 #42
0
 def get_image_urls(self, title, author, log, abort, timeout):
     from calibre.utils.cleantext import clean_ascii_chars
     from urllib import urlencode
     import html5lib
     import json
     from collections import OrderedDict
     ans = OrderedDict()
     br = self.browser
     q = urlencode({
         'as_q': ('%s %s' % (title, author)).encode('utf-8')
     }).decode('utf-8')
     sz = self.prefs['size']
     if sz == 'any':
         sz = ''
     elif sz == 'l':
         sz = 'isz:l,'
     else:
         sz = 'isz:lt,islt:%s,' % sz
     # See https://www.google.com/advanced_image_search to understand this
     # URL scheme
     url = 'https://www.google.com/search?as_st=y&tbm=isch&{}&as_epq=&as_oq=&as_eq=&cr=&as_sitesearch=&safe=images&tbs={}iar:t,ift:jpg'.format(
         q, sz)
     log('Search URL: ' + url)
     raw = br.open(url).read().decode('utf-8')
     root = html5lib.parse(clean_ascii_chars(raw),
                           treebuilder='lxml',
                           namespaceHTMLElements=False)
     for div in root.xpath('//div[@class="rg_meta"]'):
         try:
             data = json.loads(div.text)
         except Exception:
             continue
         if 'ou' in data:
             ans[data['ou']] = True
     return list(ans.iterkeys())
コード例 #43
0
ファイル: histogram_tools.py プロジェクト: of99/gecko-dev
def from_files(filenames):
    """Return an iterator that provides a sequence of Histograms for
the histograms defined in filenames.
    """
    all_histograms = OrderedDict()
    for filename in filenames:
        parser = FILENAME_PARSERS[os.path.basename(filename)]
        histograms = parser(filename)

        # OrderedDicts are important, because then the iteration order over
        # the parsed histograms is stable, which makes the insertion into
        # all_histograms stable, which makes ordering in generated files
        # stable, which makes builds more deterministic.
        if not isinstance(histograms, OrderedDict):
            raise BaseException, "histogram parser didn't provide an OrderedDict"

        for (name, definition) in histograms.iteritems():
            if all_histograms.has_key(name):
                raise DefinitionException, "duplicate histogram name %s" % name
            all_histograms[name] = definition

    # We require that all USE_COUNTER_* histograms be defined in a contiguous
    # block.
    use_counter_indices = filter(lambda x: x[1].startswith("USE_COUNTER_"),
                                 enumerate(all_histograms.iterkeys()))
    if use_counter_indices:
        lower_bound = use_counter_indices[0][0]
        upper_bound = use_counter_indices[-1][0]
        n_counters = upper_bound - lower_bound + 1
        if n_counters != len(use_counter_indices):
            raise DefinitionException, "use counter histograms must be defined in a contiguous block"

    for (name, definition) in all_histograms.iteritems():
        yield Histogram(name, definition)
コード例 #44
0
ファイル: OrderedDict.py プロジェクト: ivorblockley/cylc
 def iterkeys(self):
     """Include default keys"""
     for key in OrderedDict.iterkeys(self):
         yield key
     for key in getattr(self, 'defaults_', []):
         if not OrderedDict.__contains__(self, key):
             yield key
コード例 #45
0
ファイル: dependency.py プロジェクト: odooku/odooku-data
    def sort(self):

        incomming = OrderedDict([(node, list(edges))
                                 for node, edges in self.iteritems()])

        # Try to output nodes in initial order
        nodes = [node for node in incomming.iterkeys()]
        # Keep a stack in order to detect cyclic dependencies

        stack = []
        while nodes:
            # Get first node
            n = nodes[0]
            # See if this node has dependencies which haven't yet been
            # outputted.
            remaining = [
                node for node in reversed(incomming[n]) if node in nodes
            ]
            if remaining:
                if n not in stack:
                    stack.append(n)
                else:
                    raise DependencyError(stack + [n])
                for m in remaining:
                    # Place dependency at front
                    nodes.remove(m)
                    nodes.insert(0, m)
            else:
                # No dependencies left, output
                yield nodes.pop(0)
コード例 #46
0
ファイル: layup.py プロジェクト: fzahle/fusedwind-dev
class Region(object):
    """ Holds a region's layers along the blade.
    
    :param layers: Dictionary of Layer3D objects
    :type layers: dict
    """
    def __init__(self):
        self.layers = OrderedDict()

    def add_layer(self, name):
        ''' Inserts a layer into layers dict.
        
        :param name: Name of the material
        :return: The layer added to the region
        '''
        dubl = 0
        for k in self.layers.iterkeys():
            if name in k:
                dubl += 1

        lname = '%s%02d' % (name, dubl)
        
        layer = Layer()
        self.layers[lname] = layer
        return layer
コード例 #47
0
    def levelOrder(self, root):
        """
        :type root: TreeNode
        :rtype: List[List[int]]
        """
        if root == None:
            return []
        from collections import deque
        from collections import OrderedDict
        queue = deque([root])
        d = OrderedDict()
        root.level = 1
        while len(queue) != 0:
            node = queue.popleft()
            if node.left != None:
                node.left.level = node.level + 1
                queue.append(node.left)
            if node.right != None:
                node.right.level = node.level + 1
                queue.append(node.right)

            if node.level in d:
                d[node.level].append(node.val)
            else:
                d[node.level] = []
                d[node.level].append(node.val)
        return [d[key] for key in d.iterkeys()]
コード例 #48
0
ファイル: google_images.py プロジェクト: Riva3000/calibre
 def get_image_urls(self, title, author, log, abort, timeout):
     from calibre.utils.cleantext import clean_ascii_chars
     from urllib import urlencode
     import html5lib
     import json
     from collections import OrderedDict
     ans = OrderedDict()
     br = self.browser
     q = urlencode({'as_q': ('%s %s'%(title, author)).encode('utf-8')}).decode('utf-8')
     sz = self.prefs['size']
     if sz == 'any':
         sz = ''
     elif sz == 'l':
         sz = 'isz:l,'
     else:
         sz = 'isz:lt,islt:%s,' % sz
     # See https://www.google.com/advanced_image_search to understand this
     # URL scheme
     url = 'https://www.google.com/search?as_st=y&tbm=isch&{}&as_epq=&as_oq=&as_eq=&cr=&as_sitesearch=&safe=images&tbs={}iar:t,ift:jpg'.format(q, sz)
     log('Search URL: ' + url)
     raw = br.open(url).read().decode('utf-8')
     root = html5lib.parse(clean_ascii_chars(raw), treebuilder='lxml', namespaceHTMLElements=False)
     for div in root.xpath('//div[@class="rg_meta"]'):
         try:
             data = json.loads(div.text)
         except Exception:
             continue
         if 'ou' in data:
             ans[data['ou']] = True
     return list(ans.iterkeys())
コード例 #49
0
ファイル: handlers.py プロジェクト: EvilDmitri/jasper-webapp
    def get(self):
        if self.logged_in:
            sites = SitesModel.query().order().fetch()
            changed_sites = OrderedDict([[x, ' '] for x in URLS])
            for site in sites:
                results = site.results
                lasts = results.split('/')
                if len(lasts) < 2:
                    # Only one result
                    continue

                last = lasts[-1].split('|')[0]  # [1] - timestamp
                last_result = self.get_data(last)

                i = -2
                while True:
                    prev = lasts[i].split('|')[0]
                    prev_result = self.get_data(prev)
                    if prev_result:
                        break
                    i -= 1
                # Now we have IDs

                changes = self.compare_data(last_result, prev_result)
                if len(changes) > 0:
                    changed_sites[site.site_name] = changes

            # Mail results
            from mailer.mail_send import SendStatistics
            stat = False
            for val in changed_sites.itervalues():
                if val is not ' ':
                    stat = True
                    break

            if stat:
                result = ''
                for k in changed_sites.iterkeys():
                    changes = changed_sites[k]
                    changed_cost = ''
                    for change in changes:
                        change = ' '.join(get_data_from_html(change).split('/$'))
                        if change:
                            changed_cost = '; '.join([change, changed_cost])
                    if changed_cost:
                        line = ' '.join([k, changed_cost])
                        result = '\n'.join([result, line])

                stats = SendStatistics()
                stats.post(data=result)
            values = {'user': self.current_user,
                      'site_names': URLS,
                      'sites': changed_sites,
                      'site': ''
                      }
            # self.session.add_flash('Some message', level='error')
            self.render('index.html', values)
        else:
            self.redirect('/login')
コード例 #50
0
ファイル: reservations.py プロジェクト: NIIF/indico
    def get_with_data(*args, **kwargs):
        filters = kwargs.pop('filters', None)
        limit = kwargs.pop('limit', None)
        offset = kwargs.pop('offset', 0)
        order = kwargs.pop('order', Reservation.start_dt)
        limit_per_room = kwargs.pop('limit_per_room', False)
        occurs_on = kwargs.pop('occurs_on')
        if kwargs:
            raise ValueError('Unexpected kwargs: {}'.format(kwargs))

        query = Reservation.query.options(joinedload(Reservation.room))
        if filters:
            query = query.filter(*filters)
        if occurs_on:
            query = query.filter(
                Reservation.id.in_(db.session.query(ReservationOccurrence.reservation_id)
                                   .filter(ReservationOccurrence.date.in_(occurs_on),
                                           ReservationOccurrence.is_valid))
            )
        if limit_per_room and (limit or offset):
            query = limit_groups(query, Reservation, Reservation.room_id, order, limit, offset)

        query = query.order_by(order, Reservation.created_dt)

        if not limit_per_room:
            if limit:
                query = query.limit(limit)
            if offset:
                query = query.offset(offset)

        result = OrderedDict((r.id, {'reservation': r}) for r in query)

        if 'vc_equipment' in args:
            vc_id_subquery = db.session.query(EquipmentType.id) \
                .correlate(Reservation) \
                .filter_by(name='Video conference') \
                .join(RoomEquipmentAssociation) \
                .filter(RoomEquipmentAssociation.c.room_id == Reservation.room_id) \
                .as_scalar()

            # noinspection PyTypeChecker
            vc_equipment_data = dict(db.session.query(Reservation.id, static_array.array_agg(EquipmentType.name))
                                     .join(ReservationEquipmentAssociation, EquipmentType)
                                     .filter(Reservation.id.in_(result.iterkeys()))
                                     .filter(EquipmentType.parent_id == vc_id_subquery)
                                     .group_by(Reservation.id))

            for id_, data in result.iteritems():
                data['vc_equipment'] = vc_equipment_data.get(id_, ())

        if 'occurrences' in args:
            occurrence_data = OrderedMultiDict(db.session.query(ReservationOccurrence.reservation_id,
                                                                ReservationOccurrence)
                                               .filter(ReservationOccurrence.reservation_id.in_(result.iterkeys()))
                                               .order_by(ReservationOccurrence.start_dt))
            for id_, data in result.iteritems():
                data['occurrences'] = occurrence_data.getlist(id_)

        return result.values()
コード例 #51
0
    def get_with_data(*args, **kwargs):
        filters = kwargs.pop('filters', None)
        limit = kwargs.pop('limit', None)
        offset = kwargs.pop('offset', 0)
        order = kwargs.pop('order', Reservation.start_dt)
        limit_per_room = kwargs.pop('limit_per_room', False)
        occurs_on = kwargs.pop('occurs_on')
        if kwargs:
            raise ValueError('Unexpected kwargs: {}'.format(kwargs))

        query = Reservation.query.options(joinedload(Reservation.room))
        if filters:
            query = query.filter(*filters)
        if occurs_on:
            query = query.filter(
                Reservation.id.in_(db.session.query(ReservationOccurrence.reservation_id)
                                   .filter(ReservationOccurrence.date.in_(occurs_on),
                                           ReservationOccurrence.is_valid))
            )
        if limit_per_room and (limit or offset):
            query = limit_groups(query, Reservation, Reservation.room_id, order, limit, offset)

        query = query.order_by(order, Reservation.created_dt)

        if not limit_per_room:
            if limit:
                query = query.limit(limit)
            if offset:
                query = query.offset(offset)

        result = OrderedDict((r.id, {'reservation': r}) for r in query)

        if 'vc_equipment' in args:
            vc_id_subquery = db.session.query(EquipmentType.id) \
                .correlate(Reservation) \
                .filter_by(name='Video conference') \
                .join(RoomEquipmentAssociation) \
                .filter(RoomEquipmentAssociation.c.room_id == Reservation.room_id) \
                .as_scalar()

            # noinspection PyTypeChecker
            vc_equipment_data = dict(db.session.query(Reservation.id, static_array.array_agg(EquipmentType.name))
                                     .join(ReservationEquipmentAssociation, EquipmentType)
                                     .filter(Reservation.id.in_(result.iterkeys()))
                                     .filter(EquipmentType.parent_id == vc_id_subquery)
                                     .group_by(Reservation.id))

            for id_, data in result.iteritems():
                data['vc_equipment'] = vc_equipment_data.get(id_, ())

        if 'occurrences' in args:
            occurrence_data = OrderedMultiDict(db.session.query(ReservationOccurrence.reservation_id,
                                                                ReservationOccurrence)
                                               .filter(ReservationOccurrence.reservation_id.in_(result.iterkeys()))
                                               .order_by(ReservationOccurrence.start_dt))
            for id_, data in result.iteritems():
                data['occurrences'] = occurrence_data.getlist(id_)

        return result.values()
コード例 #52
0
class Allergy():
    best = 987654321

    def __init__(self):
        self.dictionary = defaultdict(list)
        self.eater = {}

    def clear(self):
        self.dictionary.clear()
        self.eater.clear()
        self.best = 987654321

    def start(self):
        testCase = int(rl())
        for tc in xrange(testCase):
            people_num, food_num = map(int, rl().strip().split())
            people = rl().strip().split()
            for food_index in xrange(food_num):
                can_eat_people = rl().split()
                # print can_eat_people[1:]
                self.set_food_list(food_index, can_eat_people[1:])
            self.dictionary = OrderedDict(
                sorted(self.dictionary.items(), key=lambda x: len(x[1])))

            self.check(0)
            print self.best
            self.clear()

    def set_food_list(self, food_idx, people):
        for person in people:
            self.dictionary.setdefault(person, []).append(food_idx)

    def check(self, cnt):
        # 가지치기
        # print "cnt = {} , best = {}".format(cnt,self.best)
        if cnt >= self.best: return

        # 모든 사람들이 다 먹었다면 종료
        if not bool(self.dictionary):
            self.best = cnt
            return

        key = next(self.dictionary.iterkeys())
        # print "key =  {} ".format(key)
        # print key
        # print self.dictionary[key]

        # 먹은 음식 체크
        for food_idx in self.dictionary[key]:
            back = OrderedDict([(key, values)
                                for key, values in self.dictionary.items()
                                if food_idx in values])
            for key in back.keys():
                self.dictionary.pop(key)
            cnt += 1
            self.check(cnt)
            cnt -= 1
            self.dictionary.update(back)
コード例 #53
0
ファイル: unpack.py プロジェクト: lsq/grub2-filemanager
class Struct(object):
    def __init__(self):
        self.fields = OrderedDict()

    @classmethod
    def unpack(cls, u):
        s = cls()
        for field in cls._unpack(u):
            s.add_field(*field)
        return s

    def add_field(self, name, value, fmt=None):
        if hasattr(self, name):
            raise StructError("Internal error: Duplicate Struct field name {}".format(name))
        if fmt is None:
            if isinstance(value, (int, long)) and not isinstance(value, bool):
                fmt = "{:#x}".format
            else:
                fmt = "{!r}".format
        elif isinstance(fmt, str):
            fmt = fmt.format
        elif not callable(fmt):
            raise StructError("Internal error: Expected a format string or callable, but got: {}".format(fmt))
        setattr(self, name, value)
        self.fields[name] = fmt

    def format_field(self, name):
        return self.fields[name](getattr(self, name))

    def __repr__(self):
        return "{}({})".format(self.__class__.__name__, ", ".join("{}={}".format(k, self.format_field(k)) for k in self.fields.iterkeys()))

    def __iter__(self):
        return (getattr(self, k) for k in self.fields.iterkeys())

    def __eq__(self, other):
        if type(self) is not type(other):
            return NotImplemented
        return self.fields.keys() == other.fields.keys() and all(getattr(self, name) == getattr(other, name) for name in self.fields.iterkeys())

    def __ne__(self, other):
        return not self == other

    def __hash__(self):
        return hash(tuple((name, getattr(self, name)) for name in self.fields.iterkeys()))
コード例 #54
0
def from_files(filenames, strict_type_checks=True):
    """Return an iterator that provides a sequence of Histograms for
the histograms defined in filenames.
    """
    if strict_type_checks:
        load_allowlist()

    all_histograms = OrderedDict()
    for filename in filenames:
        parser = None
        for checkFn in FILENAME_PARSERS:
            parser = checkFn(os.path.basename(filename))
            if parser is not None:
                break

        if parser is None:
            ParserError("Don't know how to parse %s." % filename).handle_now()

        histograms = parser(filename, strict_type_checks)

        # OrderedDicts are important, because then the iteration order over
        # the parsed histograms is stable, which makes the insertion into
        # all_histograms stable, which makes ordering in generated files
        # stable, which makes builds more deterministic.
        if not isinstance(histograms, OrderedDict):
            ParserError("Histogram parser did not provide an OrderedDict.").handle_now()

        for (name, definition) in histograms.iteritems():
            if name in all_histograms:
                ParserError('Duplicate histogram name "%s".' % name).handle_later()
            all_histograms[name] = definition

    # We require that all USE_COUNTER2_* histograms be defined in a contiguous
    # block.
    use_counter_indices = filter(lambda x: x[1].startswith("USE_COUNTER2_"),
                                 enumerate(all_histograms.iterkeys()))
    if use_counter_indices:
        lower_bound = use_counter_indices[0][0]
        upper_bound = use_counter_indices[-1][0]
        n_counters = upper_bound - lower_bound + 1
        if n_counters != len(use_counter_indices):
            ParserError("Use counter histograms must be defined in a contiguous block."
                        ).handle_later()

    # Check that histograms that were removed from Histograms.json etc.
    # are also removed from the allowlists.
    if allowlists is not None:
        all_allowlist_entries = itertools.chain.from_iterable(allowlists.itervalues())
        orphaned = set(all_allowlist_entries) - set(all_histograms.keys())
        if len(orphaned) > 0:
            msg = 'The following entries are orphaned and should be removed from ' \
                  'histogram-allowlists.json:\n%s'
            ParserError(msg % (', '.join(sorted(orphaned)))).handle_later()

    for (name, definition) in all_histograms.iteritems():
        yield Histogram(name, definition, strict_type_checks=strict_type_checks)
コード例 #55
0
ファイル: dataset.py プロジェクト: johndpope/sempar-codgen
class Vocab(object):
    def __init__(self):
        self.token_id_map = OrderedDict()
        self.insert_token('<pad>')
        self.insert_token('<unk>')
        self.insert_token('<eos>')

    @property
    def unk(self):
        return self.token_id_map['<unk>']

    @property
    def eos(self):
        return self.token_id_map['<eos>']

    def __getitem__(self, item):
        if item in self.token_id_map:
            return self.token_id_map[item]

        logging.debug('encounter one unknown word [%s]' % item)
        return self.token_id_map['<unk>']

    def __contains__(self, item):
        return item in self.token_id_map

    @property
    def size(self):
        return len(self.token_id_map)

    def __setitem__(self, key, value):
        self.token_id_map[key] = value

    def __len__(self):
        return len(self.token_id_map)

    def __iter__(self):
        return self.token_id_map.iterkeys()

    def iteritems(self):
        return self.token_id_map.iteritems()

    def complete(self):
        self.id_token_map = dict(
            (v, k) for (k, v) in self.token_id_map.iteritems())

    def get_token(self, token_id):
        return self.id_token_map[token_id]

    def insert_token(self, token):
        if token in self.token_id_map:
            return self[token]
        else:
            idx = len(self)
            self[token] = idx

            return idx
def sieve(n):
    # Initialize sequence of numbers; value of False => not prime.
    nums = OrderedDict((i, True) for i in xrange(2,n+1))
    
    for i in nums.iterkeys():
        if nums[i]:
            for j in xrange(2*i, n+1, i):
                nums[j] = False
        
    return [key for key,val in nums.iteritems() if val]