Пример #1
0
def downloadCourse(session, c, sem):
    global files
    global sections
    files = itertools.count()
    sections = itertools.count()
    name = c['key'].replace('/', '-') + u'/'
    path = root + sem.replace('/', '-') + u'/' + name
    path = urllib.url2pathname(path.encode('utf-8')).replace(':', '-').replace('"', '')
    if not os.path.exists(path):
        os.makedirs(path)
    print '       +--' + colors.BOLD + name + colors.ENDC
    r = session.get(c['url'])
    if(r.status_code == 200):
        soup = BeautifulSoup(r.text, 'html.parser')
        if not os.path.exists(path + '.dump'):
            os.makedirs(path + '.dump')

        dst = path + '.dump/' + c['key'].replace('/', '-').encode('utf-8') + '-' + c['type'] + '-' + str(datetime.date.today()) + '-full.html'
        dst = dst.replace(':', '-').replace('"', '')
        
        with open(dst, 'wb') as f:
            f.write(soup.encode('utf-8'))
        for s in soup.find_all(class_='section main clearfix'):
            downloadSection(session, s, path)
        #print 'Saved ' + str(files.next()) + ' Files in ' + str(sections.next()) + ' Sections'
    else:
        print 'ERROR: ' + str(r.status) + ' ' + r.reason
        sys.exit()
Пример #2
0
    def __init__(self, graph=None, encoding='utf-8', prettyprint=True,
                 version='1.1draft'):
        try:
            import xml.etree.ElementTree
        except ImportError:
             raise ImportError('GEXF writer requires '
                               'xml.elementtree.ElementTree')
        self.prettyprint = prettyprint
        self.encoding = encoding
        self.set_version(version)
        self.xml = Element('gexf',
                           {'xmlns': self.NS_GEXF,
                            'xmlns:xsi': self.NS_XSI,
                            'xmlns:viz': self.NS_VIZ,
                            'xsi:schemaLocation': self.SCHEMALOCATION,
                            'version': self.VERSION})

        # counters for edge and attribute identifiers
        self.edge_id = itertools.count()
        self.attr_id = itertools.count()
        # default attributes are stored in dictionaries
        self.attr = {}
        self.attr['node'] = {}
        self.attr['edge'] = {}
        self.attr['node']['dynamic'] = {}
        self.attr['node']['static'] = {}
        self.attr['edge']['dynamic'] = {}
        self.attr['edge']['static'] = {}

        if graph is not None:
            self.add_graph(graph)
Пример #3
0
def test_infinite_iterator():
    expected = pa.array((0, 1, 2))
    arr1 = pa.array(itertools.count(0), size=3)
    assert arr1.equals(expected)
    # Same with explicit type
    arr1 = pa.array(itertools.count(0), type=pa.int64(), size=3)
    assert arr1.equals(expected)
Пример #4
0
    def raw_fields(self):
        list_fields = [self.type, self.eid]

        for (i, gn, cn) in zip(count(), self.Gni, self.Cni):
            #print('i=%r gn=%r cn=%r' % (i, gn, cn))
            list_fields += [gn, cn]
            if i > 0 and i % 3 == 0:
                #print('adding blank')
                list_fields += [None]

        nSpaces = 8 - (len(list_fields) - 1) % 8  # puts UM/ALPHA onto next line
        if nSpaces < 8:
            list_fields += [None] * nSpaces

        # overly complicated loop to print the UM section
        list_fields += ['UM']
        j = 1
        for (i, gm, cm) in zip(count(), self.Gmi, self.Cmi):
            #print "j=%s gmi=%s cmi=%s" %(j,gm,cm)
            list_fields += [gm, cm]
            if i > 0 and j % 3 == 0:
                list_fields += [None, None]
                #print "---"
                j -= 3
            j += 1

        if self.alpha > 0.:  # handles default alpha value
            nSpaces = 8 - (len(list_fields) - 1) % 8  # puts ALPHA onto next line
            if nSpaces == 1:
                list_fields += [None, None]
            list_fields += [self.alpha]
        return list_fields
Пример #5
0
 def __init__(self):
     self.groups = {}
     self.policies = {}
     self.webhooks = {}
     self.group_counter = itertools.count()
     self.policy_counter = itertools.count()
     self.webhook_counter = itertools.count()
Пример #6
0
    def __init__(self, symbol_dirs=None):
        """ Constuct a gEDA parser object. Specifying a list of symbol
            directories in *symbol_dir* will provide a symbol file
            lookup in the specified directories. The lookup will be
            generated instantly examining each directory (if it exists).

            Kwargs:
                symbol_dirs (list): List of directories containing .sym
                    files
        """
        self.offset = shape.Point(40000, 40000)
        ## Initialise frame size with largest possible size
        self.frame_width = 0
        self.frame_height = 0

        # initialise PIN counter
        self.pin_counter = itertools.count(0)
        # initialise  PATH counter
        self.path_counter = itertools.count(0)

        ## add flag to allow for auto inclusion
        if symbol_dirs is None:
            symbol_dirs = []

        symbol_dirs = symbol_dirs + \
            [os.path.join(os.path.dirname(__file__), '..', 'library', 'geda')]

        self.known_symbols = find_symbols(symbol_dirs)

        self.design = None
        self.segments = None
        self.net_points = None
        self.net_names = None
        self.geda_zip = None
Пример #7
0
def nsmallest(n, iterable, key=None):
    """Find the n smallest elements in a dataset.

    Equivalent to:  sorted(iterable, key=key)[:n]
    """
    # Short-cut for n==1 is to use min() when len(iterable)>0
    if n == 1:
        it = iter(iterable)
        head = list(islice(it, 1))
        if not head:
            return []
        if key is None:
            return [min(chain(head, it))]
        return [min(chain(head, it), key=key)]

    # When n>=size, it's faster to use sorted()
    try:
        size = len(iterable)
    except (TypeError, AttributeError):
        pass
    else:
        if n >= size:
            return sorted(iterable, key=key)[:n]

    # When key is none, use simpler decoration
    if key is None:
        it = izip(iterable, count())                        # decorate
        result = _nsmallest(n, it)
        return map(itemgetter(0), result)                   # undecorate

    # General case, slowest method
    in1, in2 = tee(iterable)
    it = izip(imap(key, in1), count(), in2)                 # decorate
    result = _nsmallest(n, it)
    return map(itemgetter(2), result)                       # undecorate
Пример #8
0
    def test_deque(self):
        d = collections.deque()
        self.assertEqual(pprint.pformat(d, width=1), "deque([])")
        d = collections.deque(maxlen=7)
        self.assertEqual(pprint.pformat(d, width=1), "deque([], maxlen=7)")
        words = 'the quick brown fox jumped over a lazy dog'.split()
        d = collections.deque(zip(words, itertools.count()))
        self.assertEqual(pprint.pformat(d),
"""\
deque([('the', 0),
       ('quick', 1),
       ('brown', 2),
       ('fox', 3),
       ('jumped', 4),
       ('over', 5),
       ('a', 6),
       ('lazy', 7),
       ('dog', 8)])""")
        d = collections.deque(zip(words, itertools.count()), maxlen=7)
        self.assertEqual(pprint.pformat(d),
"""\
deque([('brown', 2),
       ('fox', 3),
       ('jumped', 4),
       ('over', 5),
       ('a', 6),
       ('lazy', 7),
       ('dog', 8)],
      maxlen=7)""")
Пример #9
0
 def __init__(self, loadfromfile=None):
     self._counter = Counter()
     # Original source: (1.31) http://sahandsaba.com/thirty-python-language-features-and-tricks-you-may-not-know.html
     self._nameToNo = defaultdict(count().__next__)
     self.noToName = {}  # This is built only upon reading back from file
     if loadfromfile is not None:
         self._nameToNo.default_factory = count(start=self.load(loadfromfile)).__next__
Пример #10
0
 def __init__(self, host, port=80, timeout=None, maxsize=10):
     self.pool = Queue(maxsize)
     self.host = host
     self.port = int(port)
     self.timeout = timeout
     self.num_connections = count()
     self.num_requests = count()
Пример #11
0
    def test_mapping_proxy(self):
        words = 'the quick brown fox jumped over a lazy dog'.split()
        d = dict(zip(words, itertools.count()))
        m = types.MappingProxyType(d)
        self.assertEqual(pprint.pformat(m), """\
mappingproxy({'a': 6,
              'brown': 2,
              'dog': 8,
              'fox': 3,
              'jumped': 4,
              'lazy': 7,
              'over': 5,
              'quick': 1,
              'the': 0})""")
        d = collections.OrderedDict(zip(words, itertools.count()))
        m = types.MappingProxyType(d)
        self.assertEqual(pprint.pformat(m), """\
mappingproxy(OrderedDict([('the', 0),
                          ('quick', 1),
                          ('brown', 2),
                          ('fox', 3),
                          ('jumped', 4),
                          ('over', 5),
                          ('a', 6),
                          ('lazy', 7),
                          ('dog', 8)]))""")
def CalculateMatDis(tabintersectfilepos,tabintersectfileneg,loc,rep,intervalsize,binsize):
    path = os.getcwd()
    filepos = np.genfromtxt(open(tabintersectfilepos),dtype="str",usecols=(3,6),delimiter='\t')
    nelemts = 2*int(intervalsize)/int(binsize)
    nrows = filepos.shape[0]/nelemts
    ncols=(4*intervalsize)/binsize
    matrix = np.arange(nrows*ncols).reshape(nrows,ncols)
    hashevents ={}
    c=itr.count(ncols/2)
    r=itr.count(0)
    for i in xrange(filepos.shape[0]):
        ccurrent=c.next()
        if ccurrent == ncols/2:
            rcurrent=r.next()
            hashevents.update({filepos[i,0]:rcurrent})
            c=itr.count(0)
            ccurrent = c.next()
            matrix[rcurrent,ccurrent]=filepos[i,1]
        else:
            matrix[rcurrent,ccurrent]=filepos[i,1]
        print rcurrent,ccurrent
    print "ppos done"
    fileneg = np.genfromtxt(open(tabintersectfileneg),dtype="str",usecols=(3,6),delimiter='\t')
    ccurrent=-1*ncols/2
    for j in xrange(fileneg.shape[0]):
        ccurrent=ccurrent-1
        if ccurrent == (-1*ncols/2-1):
            ccurrent=-1
        matrix[int(hashevents[fileneg[j,0]]),ccurrent]=fileneg[j,1]
        print hashevents[filepos[j,0]],ccurrent
    np.savetxt(path+"/"+loc+"/"+rep+"_"+str(intervalsize)+"_"+str(binsize)+"_distribution.mat",matrix, fmt="%0.1f", delimiter="\t")
    filehashout=open(path+"/"+loc+"/"+rep+"_"+str(intervalsize)+"_"+str(binsize)+"_matrix_row_annotation.tab","w")
    for j in hashevents.keys():
        filehashout.write(str(j)+"\t"+str(hashevents[j])+"\n")
    return None
Пример #13
0
def compute():
	partitions = [1]
	for i in itertools.count(len(partitions)):
		# We calculate partitions[i] mod 10^6 using a formula based on generalized pentagonal numbers:
		#   partitions(i) =   partitions(i - pentagonal(1)) + partitions(i - pentagonal(-1))
		#                   - partitions(i - pentagonal(2)) - partitions(i - pentagonal(-2))
		#                   + partitions(i - pentagonal(3)) + partitions(i - pentagonal(-3))
		#                   - partitions(i - pentagonal(4)) - partitions(i - pentagonal(-4))
		#                   + ...,
		#   where pentagonal(j) = (3*n^2 - n) / 2, and
		#   we stop the sum when i - pentagonal(+/-j) < 0.
		# Note that for j > 0, pentagonal(j) < pentagonal(-j) < pentagonal(j+1).
		# 
		# (The formula is used without mathematical justification;
		# see https://en.wikipedia.org/wiki/Partition_(number_theory)#Generating_function .)
		item = 0
		for j in itertools.count(1):
			sign = -1 if j % 2 == 0 else +1
			index = (j * j * 3 - j) // 2
			if index > i:
				break
			item += partitions[i - index] * sign
			index += j  # index == (j * j * 3 + j) // 2
			if index > i:
				break
			item += partitions[i - index] * sign
			item %= MODULUS
		
		# Check or memoize the number
		if item == 0:
			return str(i)
		partitions.append(item)
Пример #14
0
    def initBroker(self):

        # tracking Referenceables
        # sending side uses these
        self.nextCLID = count(1).next # 0 is for the broker
        self.myReferenceByPUID = {} # maps ref.processUniqueID to a tracker
        self.myReferenceByCLID = {} # maps CLID to a tracker
        # receiving side uses these
        self.yourReferenceByCLID = {}
        self.yourReferenceByURL = {}

        # tracking Gifts
        self.nextGiftID = count(1).next
        self.myGifts = {} # maps (broker,clid) to (rref, giftID, count)
        self.myGiftsByGiftID = {} # maps giftID to (broker,clid)

        # remote calls
        # sending side uses these
        self.nextReqID = count(1).next # 0 means "we don't want a response"
        self.waitingForAnswers = {} # we wait for the other side to answer
        self.disconnectWatchers = []
        # receiving side uses these
        self.inboundDeliveryQueue = []
        self._waiting_for_call_to_be_ready = False
        self.activeLocalCalls = {} # the other side wants an answer from us
Пример #15
0
    def _canopyOverlap(self,
                       tfidf_predicates,
                       record_pairs) :

        tfidf_fields = defaultdict(list)

        for threshold, field in tfidf_predicates :
            tfidf_fields[field].append(threshold)

        blocker = DedupeBlocker()
        blocker.tfidf_fields = tfidf_fields

        docs = list(set(itertools.chain(*record_pairs)))
        record_ids = dict(itertools.izip(docs, itertools.count()))
 
        for field in blocker.tfidf_fields :
            id_records = zip(itertools.count(), 
                             (record[field] for record in docs))

            self.stop_words[field] = stopWords(id_records)
            blocker.stop_words[field] = self.stop_words[field]
                                        
            # uniquify records

            blocker.tfIdfBlock(id_records, field)

        self._calculateOverlap(blocker, record_pairs, record_ids)
def runAlgorithm(data, d, k):
    W = None

    alphas = 10.0 ** linspace(-4, 1, 6)
    #alphas = 10.0 ** array([-2,-1,0,-2,-3,-2])
    expNum = len(alphas)
    allStates = []
    for (alpha, i) in zip(alphas, count(1)):
        states = jointlyPenalizedMultiplePCA(data, d, alpha, W, k=k)
        allStates.append(states)
        weightVectors = [s.weights for s in states]
        W = array(weightVectors)

    figure()
    for (states, alpha, i) in zip(allStates, alphas, count(1)):
        subplot(expNum, 1, i)
        weightVectors = [s.weights for s in states]
        W = array(weightVectors)
        plot(W.T)
        title(r'Run with $\alpha $ = %f' % alpha)

    figure()
    for (states, alpha, i) in zip(allStates, alphas, count(1)):
        subplot(expNum, 1, i)
        lossVectors = [s.squaredLosses() for s in states]
        L = array(lossVectors)
        semilogy(L.T)
        title(r'Run with $\alpha $ = %f' % alpha)

    show()
Пример #17
0
def nlargest(n, iterable, key = None):
    """Find the n largest elements in a dataset.
    
    Equivalent to:  sorted(iterable, key=key, reverse=True)[:n]
    """
    if n == 1:
        it = iter(iterable)
        head = list(islice(it, 1))
        if not head:
            return []
        if key is None:
            return [max(chain(head, it))]
        return [max(chain(head, it), key=key)]
    else:
        try:
            size = len(iterable)
        except (TypeError, AttributeError):
            pass
        else:
            if n >= size:
                return sorted(iterable, key=key, reverse=True)[:n]

        if key is None:
            it = izip(iterable, count(0, -1))
            result = _nlargest(n, it)
            return map(itemgetter(0), result)
        in1, in2 = tee(iterable)
        it = izip(imap(key, in1), count(0, -1), in2)
        result = _nlargest(n, it)
        return map(itemgetter(2), result)
Пример #18
0
def main(limit_num, limit_denom):
	R = lambda d: (prime.phi(d), d - 1)
	def good(d):
		num, denom = R(d)
		return num * limit_denom < limit_num * denom

	d = 1

	for n in count(2):
		d *= n
		if good(d): break

	min_d = d
	kernel = d

	for m in range(n, 1, -1):
		kernel //= m
		print("kernel: %d" % kernel)
		for k in count(2):
			d = kernel * k
			if d >= min_d: break
			if good(d):
				min_d = d
				print(d)
				break
Пример #19
0
    def __init__(self, queues, file_name):
        """Create a profiler for command *queues* and output file
        *file_name*.
        """
        Thread.__init__(self)
        self._events = Queue()
        self._event_next = itertools.count().next
        self._clqeue_next = itertools.count().next
        self._cldevice_next = itertools.count().next
        self._clqueues = {}             # {queue: id}
        self._cldevices = {}            # {device: id}
        self.daemon = True
        self.finish = False
        self._file_name = None
        self._profile_file = None
        self._file_name = file_name

        self._profile_file = open(file_name, "w")
        self._profile_file.write("# units\nns\n")
        self._write_time_shift(queues)

        self._profile_file.write("# " +
                                 Profiler.format_string.replace("%d", "%s")
                                 % ("event_id", "command_queue_id",
                                    "device_id", "state", "func_name",
                                    "time") + "\n")
Пример #20
0
    def connect(self):
        if self._socket:
            raise common.AlreadyConnected("Client is already connected.")

        infos = socket.getaddrinfo(self.hostname, self.port, 0, 0, socket.SOL_TCP)
        (family,_,_,_, sockaddr) = infos[0]
        # Stage socket on a local variable first
        s = self.build_socket(family)
        if self.use_ssl:
            s = self.wrap_secure_socket(s, ssl.PROTOCOL_TLSv1_2)

        s.settimeout(self.connect_timeout)
        if self.socket_address:
            LOG.debug("Client local port address bound to " + self.socket_address)
            s.bind((self.socket_address, self.socket_port))
        # if connect fails, there is nothing to clean up
        s.connect(sockaddr) # use first
        s.setsockopt(ss.IPPROTO_TCP, ss.TCP_NODELAY, 1)

        # We are connected now, update attributes
        self._socket = s
        try:
            self._handshake()
            self._socket.settimeout(self.socket_timeout)

            self._sequence = itertools.count()
            self._batch_id = itertools.count()
            self._closed = False
        except:
            self._socket = None
            raise
Пример #21
0
def blastall_v_regions(myFastq1,myFastq2,myRef,outputfile,eVal,blastallDir):
    fns={}
    chunk=10**4
    with open(myFastq1, 'r') as datafile1:
        groups = groupby(datafile1, key=lambda k, line=count(): next(line) // chunk)
        for k, group in groups:
            with tempfile.NamedTemporaryFile(delete=False,
                           dir=tempfile.mkdtemp(),prefix='{}_'.format(str(k))) as outfile:
                outfile.write(''.join(group))
                fns[k]=outfile.name   
            blastn_cline = blastallDir+"blastall -p blastn -o "+str(outfile.name)+".blast.out -i "+str(outfile.name)+" -d "+myRef+" -e "+str(eVal)+" -m 8 -b 1"    
            os.system(blastn_cline+" > /dev/null 2>&1")
            os.system("cat "+str(outfile.name)+".blast.out >> "+outputfile)
            os.remove(str(outfile.name)+".blast.out")
            os.remove(str(outfile.name))
            testvar=commands.getstatusoutput("dirname "+str(outfile.name))
            os.system("rm -r "+testvar[1])
    fns={}
    with open(myFastq2, 'r') as datafile2:
        groups = groupby(datafile2, key=lambda k, line=count(): next(line) // chunk)
        for k, group in groups:
            with tempfile.NamedTemporaryFile(delete=False,
                           dir=tempfile.mkdtemp(),prefix='{}_'.format(str(k))) as outfile:
                outfile.write(''.join(group))
                fns[k]=outfile.name   
            blastn_cline = blastallDir+"blastall -p blastn -o "+str(outfile.name)+".blast.out -i "+str(outfile.name)+" -d "+myRef+" -e "+str(eVal)+" -m 8 -b 1"    
            os.system(blastn_cline+" > /dev/null 2>&1")
            os.system("cat "+str(outfile.name)+".blast.out >> "+outputfile)
            os.remove(str(outfile.name)+".blast.out")
            os.remove(str(outfile.name))
            testvar=commands.getstatusoutput("dirname "+str(outfile.name))
            os.system("rm -r "+testvar[1])
Пример #22
0
    def __init__(self, **kwargs):
        self._cp = cp.Cplex()

        # Set up output to go to logging streams
        log_stream = LoggerFile(logger, logging.DEBUG)
        warning_stream = LoggerFile(logger, logging.WARNING)
        error_stream = LoggerFile(logger, logging.ERROR)

        self._cp.set_log_stream(log_stream)
        self._cp.set_results_stream(log_stream)
        self._cp.set_warning_stream(warning_stream)
        self._cp.set_error_stream(error_stream)

        # Set feasibility tolerance. By default, we decrease it to 1e-9.
        feasibility_tolerance = kwargs.get('feasibility_tolerance', 1e-9)
        logger.info('Setting feasibility tolerance to {!r}'.format(
            feasibility_tolerance))
        self._cp.parameters.simplex.tolerances.feasibility.set(
            feasibility_tolerance)

        # Set number of threads
        if 'threads' in kwargs:
            logger.info('Setting threads to {!r}'.format(kwargs['threads']))
            self._cp.parameters.threads.set(kwargs['threads'])

        self._cp.parameters.emphasis.numerical.set(True)

        self._variables = {}
        self._var_names = (i for i in count(0))
        self._constr_names = ('c'+str(i) for i in count(1))

        # Keep track of the objective variables that are non-zero
        self._non_zero_objective = set()

        self._result = None
Пример #23
0
    def initBroker(self):
        self.rootSlicer.broker = self
        self.rootUnslicer.broker = self

        # tracking Referenceables
        # sending side uses these
        self.nextCLID = count(1).next # 0 is for the broker
        self.myReferenceByPUID = {} # maps ref.processUniqueID to a tracker
        self.myReferenceByCLID = {} # maps CLID to a tracker
        # receiving side uses these
        self.yourReferenceByCLID = {}
        self.yourReferenceByURL = {}

        # tracking Gifts
        self.nextGiftID = count().next
        self.myGifts = {} # maps (broker,clid) to (rref, giftID, count)
        self.myGiftsByGiftID = {} # maps giftID to (broker,clid)

        # remote calls
        # sending side uses these
        self.nextReqID = count().next
        self.waitingForAnswers = {} # we wait for the other side to answer
        self.disconnectWatchers = []
        # receiving side uses these
        self.activeLocalCalls = {} # the other side wants an answer from us
Пример #24
0
def get_linear_equations_solution_numerically(eqs, dt):
    # Otherwise assumes it is given in functional form
    n = len(eqs._diffeq_names) # number of state variables
    dynamicvars = eqs._diffeq_names
    # Calculate B
    AB = zeros((n, 1))
    d = dict.fromkeys(dynamicvars)
    for j in range(n):
        d[dynamicvars[j]] = 0. * eqs._units[dynamicvars[j]]
    for var, i in zip(dynamicvars, count()):
        AB[i] = -eqs.apply(var, d)
    # Calculate A
    M = zeros((n, n))
    for i in range(n):
        for j in range(n):
            d[dynamicvars[j]] = 0. * eqs._units[dynamicvars[j]]
        if isinstance(eqs._units[dynamicvars[i]], Quantity):
            d[dynamicvars[i]] = Quantity.with_dimensions(1., eqs._units[dynamicvars[i]].get_dimensions())
        else:
            d[dynamicvars[i]] = 1.
        for var, j in zip(dynamicvars, count()):
            M[j, i] = eqs.apply(var, d) + AB[j]
    #B=linalg.solve(M,AB)
    numeulersteps = 100
    deltat = dt / numeulersteps
    E = eye(n) + deltat * M
    C = eye(n)
    D = zeros((n, 1))
    for step in xrange(numeulersteps):
        C, D = dot(E, C), dot(E, D) - AB * deltat
    return C, D
Пример #25
0
def targz_dir(source_dir, target_archive, dereference_ext_symlinks=True):
  norm_source_dir = os.path.normpath(source_dir)
  total = itertools.count()
  symlinks = itertools.count()
  symlinks_followed = itertools.count()

  def tarinfo_filter(tarinfo):
    total.next()
    log.debug("adding: %s", tarinfo.__dict__)
    if tarinfo.linkname:
      symlinks.next()
      target = followlink(os.path.join(norm_source_dir, tarinfo.name))
      # If it's a relative symlink, and its target is inside our source dir, leave it as is.
      # If it's absolute or outside our source dir, resolve it or error.
      if os.path.isabs(target) \
              or not os.path.normpath(os.path.join(norm_source_dir, target)).startswith(norm_source_dir):
        if dereference_ext_symlinks:
          if not os.path.exists(target):
            raise ArchiveError("Symlink target not found: %r -> %r" % (tarinfo.name, target))
          tarinfo = tarinfo.tarfile.gettarinfo(target)
          symlinks_followed.next()
        else:
          raise ArchiveError("Absolute path in symlink target not supported: %r -> %r" % (tarinfo.name, target))
    return tarinfo

  with tarfile.open(target_archive, "w:gz") as tf:
    log.info("creating archive: %s -> %s", source_dir, target_archive)
    tf.add(source_dir, arcname=".", filter=tarinfo_filter)

  log.info("added %s items to archive (%s were symlinks, %s followed)",
           total.next(), symlinks.next(), symlinks_followed.next())
Пример #26
0
def weightedLoad(infile,weightthresh=None):
	
	myAlign = simformat.read(infile)
	myHeader = myAlign.header
	
	if weightthresh is not None:
		try:
			weightsindex = myHeader.cutoffs.index(weightthresh)
		except:
			raise Exception("No such weighting cutoff, valid cutoffs are: " + repr(myHeader.cutoffs))
		simformat.annotateAlignment(myAlign)
		weights = S.array(simformat.getinvnormsim(myAlign,weightsindex))
	else:
		weights = S.ones(len(myAlign))
	
	N = len(myAlign)
	Width = len(myAlign[0])

	Matrix = sp.lil_matrix((N,Q*Width)) #LiL is better to populate, csc might be even better, but we'd have to write more complex code.
	
	for seqRec,one_weight,i in izip(myAlign,weights,count()):
		seq_as_ints = intConv(seqRec.seq.tostring())
		for residue,j in izip(seq_as_ints,count()):
			Matrix[i,j*Q + residue] = one_weight

	return Matrix.tocsc()
Пример #27
0
def get_linear_equations(eqs):
    '''
    Returns the matrices M and B for the linear model dX/dt = M(X-B),
    where eqs is an Equations object. 
    '''
    # Otherwise assumes it is given in functional form
    n = len(eqs._diffeq_names) # number of state variables
    dynamicvars = eqs._diffeq_names
    # Calculate B
    AB = zeros((n, 1))
    d = dict.fromkeys(dynamicvars)
    for j in range(n):
        d[dynamicvars[j]] = 0. * eqs._units[dynamicvars[j]]
    for var, i in zip(dynamicvars, count()):
        AB[i] = -eqs.apply(var, d)
    # Calculate A
    M = zeros((n, n))
    for i in range(n):
        for j in range(n):
            d[dynamicvars[j]] = 0. * eqs._units[dynamicvars[j]]
        if isinstance(eqs._units[dynamicvars[i]], Quantity):
            d[dynamicvars[i]] = Quantity.with_dimensions(1., eqs._units[dynamicvars[i]].get_dimensions())
        else:
            d[dynamicvars[i]] = 1.
        for var, j in zip(dynamicvars, count()):
            M[j, i] = eqs.apply(var, d) + AB[j]
    #M-=eye(n)*1e-10 # quick dirty fix for problem of constant derivatives; dimension = Hz
    #B=linalg.lstsq(M,AB)[0] # We use this instead of solve in case M is degenerate
    B = linalg.solve(M, AB) # We use this instead of solve in case M is degenerate
    return M, B
Пример #28
0
def solution():
    target = 2*10**6

    # Upper bound on the width and/or length dimensions
    maximum = next(dropwhile(lambda n: rectangles(n, 1) < target, count(1)))

    candidates = {}

    for a in count(1):
        lower = 1
        upper = maximum

        while upper - lower > 1:
            b = (upper + lower) / 2

            if rectangles(a, b) > target:
                upper = b
            else:
                lower = b

        candidates[a, lower] = target - rectangles(a, lower)
        candidates[a, upper] = rectangles(a, upper) - target

        if lower <= a:
            return min((v, l*w) for (l, w), v in candidates.items())[1]
def lcs(v, w):
    s = [[0] * (len(w) + 1) for _ in range(len(v) + 1)]

    backtrack = [[''] * (len(w) + 1) for _ in range(len(v) + 1)]
    for i in range(len(v) + 1):
        backtrack[i][0] = '|'
    for j in range(len(w) + 1):
        backtrack[0][j] = '-'
    backtrack[0][0] = '*'

    for i, vi in zip(count(), v):
        for j, wj in zip(count(), w):
            s[i + 1][j + 1] = max(s[i][j + 1], s[i + 1][j])
            if vi == wj:
                s[i + 1][j + 1] = max(s[i + 1][j + 1], s[i][j] + 1)
            if s[i + 1][j + 1] == s[i][j + 1]:
                backtrack[i + 1][j + 1] = '|'
            elif s[i + 1][j + 1] == s[i + 1][j]:
                backtrack[i + 1][j + 1] = '-'
            elif s[i + 1][j + 1] == s[i][j] + 1:
                backtrack[i + 1][j + 1] = '\\'

    #for l in s:
    #    print(l)
    return s[len(v)][len(w)], backtrack
Пример #30
0
Файл: web.py Проект: tkf/orgviz
def page_orgviz():
    eventfilters = [(i, name) for (i, (name, func)) in zip(itertools.count(), app.config["ORG_CAL_FILTERS"])]
    eventfilters_name_to_id = dict((name, i) for (i, name) in eventfilters)

    def filter_name_to_id(dct):
        if "filter" in dct:
            newdct = dct.copy()
            newdct["filter"] = [eventfilters_name_to_id[f] for f in dct["filter"]]
            return newdct
        else:
            return dct

    cal_perspectives = [(i, name) for (i, (name, dct)) in zip(itertools.count(), app.config["ORG_CAL_PERSPECTIVES"])]
    cal_perspectives_data = dict(
        (i, filter_name_to_id(dct))
        for ((i, name), (_, dct)) in zip(cal_perspectives, app.config["ORG_CAL_PERSPECTIVES"])
    )
    cal_eventclasses = zip(
        ["deadline", "scheduled", "closed", "clock"] + app.config["ORG_CAL_ADD_EVENTCLASSES"] + ["none"],
        itertools.chain("zxcvbnm", itertools.repeat("")),
    )
    return render_template(
        "orgviz.html",
        eventfilters=eventfilters,
        cal_perspectives=cal_perspectives,
        cal_perspectives_data=cal_perspectives_data,
        cal_eventclasses=cal_eventclasses,
        graphs=[
            ("clocked_par_day", "Clocked time per day"),
            ("done_par_day", "Tasks done par day"),
            ("tags_dist", "Top 10 tags in closed tasks"),
            ("clocked_and_closed", "Clocked and closed activities"),
        ],
    )
Пример #31
0
 def __init__(self):
     self.twistedQueue = Queue()
     self.key = count()
     self.results = {}
Пример #32
0
import requests, zipfile, io, os
from itertools import count

set_download_folder = '.sets'

def get_set_file(set_number :int) -> str:
    return f"datadragon-set{set_number}-en_us"

def get_set_link(set_number :int) -> str:
    return f"https://dd.b.pvp.net/{get_set_file(set_number)}.zip"

p = os.path.abspath(set_download_folder)
if not os.path.exists(p):
    os.mkdir(p)

for set_number in count(1):
    if (r := requests.get(get_set_link(set_number))).status_code != 200:
        break
    sp = os.path.join(p, get_set_file(set_number))
    if not os.path.exists(sp):
        os.mkdir(sp)
    z = zipfile.ZipFile(io.BytesIO(r.content))
    z.extractall(sp)
Пример #33
0
def k_shortest_paths(G, source, target, k=1, weight='weight'):
    """Returns the k-shortest paths from source to target in a weighted graph G.

    Parameters
    ----------
    G : NetworkX graph

    source : node
       Starting node

    target : node
       Ending node
       
    k : integer, optional (default=1)
        The number of shortest paths to find

    weight: string, optional (default='weight')
       Edge data key corresponding to the edge weight

    Returns
    -------
    lengths, paths : lists
       Returns a tuple with two lists.
       The first list stores the length of each k-shortest path.
       The second list stores each k-shortest path.  

    Raises
    ------
    NetworkXNoPath
       If no path exists between source and target.

    Examples
    --------
    >>> G=nx.complete_graph(5)    
    >>> print(k_shortest_paths(G, 0, 4, 4))
    ([1, 2, 2, 2], [[0, 4], [0, 1, 4], [0, 2, 4], [0, 3, 4]])

    Notes
    ------
    Edge weight attributes must be numerical and non-negative.
    Distances are calculated as sums of weighted edges traversed.

    """
    if source == target:
        return ([0], [[source]]) 
       
    length, path = nx.single_source_dijkstra(G, source, target, weight=weight)
    if target not in length:
        raise nx.NetworkXNoPath("node %s not reachable from %s" % (source, target))
        
    lengths = [length[target]]
    paths = [path[target]]
    c = count()        
    B = []                        
    G_original = G.copy()    
    
    for i in range(1, k):
        for j in range(len(paths[-1]) - 1):            
            spur_node = paths[-1][j]
            root_path = paths[-1][:j + 1]
            
            edges_removed = []
            for c_path in paths:
                if len(c_path) > j and root_path == c_path[:j + 1]:
                    u = c_path[j]
                    v = c_path[j + 1]
                    if G.has_edge(u, v):
                        edge_attr = G.edge[u][v]
                        G.remove_edge(u, v)
                        edges_removed.append((u, v, edge_attr))
            
            for n in range(len(root_path) - 1):
                node = root_path[n]
                # out-edges
                for u, v, edge_attr in G.edges_iter(node, data=True):
                    G.remove_edge(u, v)
                    edges_removed.append((u, v, edge_attr))
                
                if G.is_directed():
                    # in-edges
                    for u, v, edge_attr in G.in_edges_iter(node, data=True):
                        G.remove_edge(u, v)
                        edges_removed.append((u, v, edge_attr))
            
            spur_path_length, spur_path = nx.single_source_dijkstra(G, spur_node, target, weight=weight)            
            if target in spur_path and spur_path[target]:
                total_path = root_path[:-1] + spur_path[target]
                total_path_length = get_path_length(G_original, root_path, weight) + spur_path_length[target]                
                heappush(B, (total_path_length, next(c), total_path))
                
            for e in edges_removed:
                u, v, edge_attr = e
                G.add_edge(u, v, edge_attr)
                       
        if B:
            (l, _, p) = heappop(B)        
            lengths.append(l)
            paths.append(p)
        else:
            break
    
    return (lengths, paths)
def astar(s, goal, choice):
    global explorednodes
    explored = [
    ]  # creating an empty list named explored to store the explored nodes
    heap = list()
    unique_heap = {
    }  #making a dictionary to keep track of nodes and its components
    counter = itertools.count(
    )  #adding count, if two nodes with same cost comes, to choose based on count value
    if choice == '0':
        #initially for the first node the fscore will be only the heuristic
        fscore = displacedtiles(s)
        print(fscore)
    else:

        fscore = manhattandistance(s)

    count = next(counter)
    components = (fscore, count, s)  #storing the fscore,count and state
    heappush(
        heap, components
    )  #heap data structure used to represent a priority queue, storing first node
    unique_heap[
        s.
        state] = components  #storing first node and its components in 'unique heap'

    while heap:  #till the heap is not empty

        node = heappop(
            heap
        )  #pops the least cost node automatically, since it sorts the heap in the order of cost

        explored.append(node[2].state)  #adding to explored nodes
        check = check_if_goal(node[2].state,
                              goal)  #check if that element is the goal or not
        if (check == 1):
            goalnode = node[2]
            return goalnode

        else:
            result = movement(node[2])  #explore its children
            for val in result:
                if choice == '0':

                    displacedt = displacedtiles(val)
                else:
                    displacedt = manhattandistance(val)
                fscore = displacedt + val.depth  #since fscore = heuristic + distance between current node and start node
                count = next(counter)  #increasing the counter
                components = (fscore, count, val
                              )  #storing the fscore,count and state

                if val.state not in explored:

                    heappush(heap, components)  #if not explored, add to heap
                    explored.append(val.state)  #add to explored
                    unique_heap[val.state] = components  #add to 'unique_heap'

                #if the state is already present in 'unique_heap' and the new fscore is lesser than than fscore then, replace the value at that index in heap and 'unique_heap'
                elif val.state in unique_heap and fscore < unique_heap[
                        val.state][2].fscore:
                    hindex = heap.index((unique_heap[val.state][2].fscore,
                                         count, unique_heap[val.state][2]))

                    heap[int(
                        hindex)] = components  #replacing at that index in heap
                    unique_heap[
                        val.
                        state] = components  #replacing at that index in 'unique_heap'
                    heapify(
                        heap
                    )  #rearrange heap content to make it in ascending order

        explorednodes += 1  #increment count of explored nodes
Пример #35
0
 def pattern():
     for i in itertools.count():
         yield from range(1, 2**i, 2)
Пример #36
0
 def pattern():
     for i in itertools.count():
         yield from (n for n in range(1, 3**i + 1) if n < 3 ** (i - 1) or n % 2 != 0)
Пример #37
0
 def rewind(self):
     self.position = count(0)
     return self
Пример #38
0
 def __init__(self, base):
     super(IDSpace, self).__init__()
     if not base.endswith(_ID_SEPARATOR):
         base += _ID_SEPARATOR
     self._allocator = (base + str(i) for i in itertools.count(1))
Пример #39
0
 def __init__(self):
     self._alarms = []
     self._watch_files = {}
     self._idle_handle = 0
     self._idle_callbacks = {}
     self._tie_break = count()
Пример #40
0
    run_log = os.path.join(work_dir, 'run.log')

    # Add the path to the image generator module to sys.path
    sys.path.append(os.path.realpath(os.path.dirname(args[1])))
    # Remove a script extension from image generator module if any
    generator_name = os.path.splitext(os.path.basename(args[1]))[0]

    try:
        image_generator = __import__(generator_name)
    except ImportError as e:
        print >>sys.stderr, \
            "Error: The image generator '%s' cannot be imported.\n" \
            "Reason: %s" % (generator_name, e)
        sys.exit(1)

    # Enable core dumps
    resource.setrlimit(resource.RLIMIT_CORE, (-1, -1))
    # If a seed is specified, only one test will be executed.
    # Otherwise runner will terminate after a keyboard interruption
    start_time = int(time.time())
    test_id = count(1)
    while should_continue(duration, start_time):
        try:
            run_test(str(test_id.next()), seed, work_dir, run_log, cleanup,
                     log_all, command, config)
        except (KeyboardInterrupt, SystemExit):
            sys.exit(1)

        if seed is not None:
            break
Пример #41
0
 def __init__(self, name):
     self._client = c_api.LocalComputationBuilder(name.encode('utf8'))
     self._parameter_numbering = itertools.count()
 def iter_all_strings(self, sorted_by_value):
     for size in itertools.count(start=1):
         for s in itertools.product(self.salt_value, repeat=size):
             if "".join(s)[0].isdigit(
             ) == False and not "".join(s) in sorted_by_value:
                 yield "".join(s)
Пример #43
0
 def __init__(self):
     self.counter = count(1)
Пример #44
0
def learn(env,
          q_func,
          optimizer_spec,
          session,
          exploration=LinearSchedule(1000000, 0.1),
          stopping_criterion=None,
          replay_buffer_size=1000000,
          batch_size=32,
          gamma=0.99,
          learning_starts=50000,
          learning_freq=4,
          frame_history_len=4,
          target_update_freq=10000,
          grad_norm_clipping=10):
    """Run Deep Q-learning algorithm.

    You can specify your own convnet using q_func.

    All schedules are w.r.t. total number of steps taken in the environment.

    Parameters
    ----------
    env: gym.Env
        gym environment to train on.
    q_func: function
        Model to use for computing the q function. It should accept the
        following named arguments:
            img_in: tf.Tensor
                tensorflow tensor representing the input image
            num_actions: int
                number of actions
            scope: str
                scope in which all the model related variables
                should be created
            reuse: bool
                whether previously created variables should be reused.
    optimizer_spec: OptimizerSpec
        Specifying the constructor and kwargs, as well as learning rate schedule
        for the optimizer
    session: tf.Session
        tensorflow session to use.
    exploration: rl_algs.deepq.utils.schedules.Schedule
        schedule for probability of chosing random action.
    stopping_criterion: (env, t) -> bool
        should return true when it's ok for the RL algorithm to stop.
        takes in env and the number of steps executed so far.
    replay_buffer_size: int
        How many memories to store in the replay buffer.
    batch_size: int
        How many transitions to sample each time experience is replayed.
    gamma: float
        Discount Factor
    learning_starts: int
        After how many environment steps to start replaying experiences
    learning_freq: int
        How many steps of environment to take between every experience replay
    frame_history_len: int
        How many past frames to include as input to the model.
    target_update_freq: int
        How many experience replay rounds (not steps!) to perform between
        each update to the target Q network
    grad_norm_clipping: float or None
        If not None gradients' norms are clipped to this value.
    """
    assert type(env.observation_space) == gym.spaces.Box
    assert type(env.action_space)      == gym.spaces.Discrete
    ###############
    # BUILD MODEL #
    ###############

    if len(env.observation_space.shape) == 1:
        # This means we are running on low-dimensional observations (e.g. RAM)
        input_shape = env.observation_space.shape
    else:
        img_h, img_w, img_c = env.observation_space.shape
        input_shape = (img_h, img_w, frame_history_len * img_c)
    num_actions = env.action_space.n

    # set up placeholders
    # placeholder for current observation (or state)
    obs_t_ph              = tf.placeholder(tf.uint8, [None] + list(input_shape))
    # placeholder for current action
    act_t_ph              = tf.placeholder(tf.int32,   [None])

    # placeholder for current reward
    rew_t_ph              = tf.placeholder(tf.float32, [None])
    # placeholder for next observation (or state)
    obs_tp1_ph            = tf.placeholder(tf.uint8, [None] + list(input_shape))
    # placeholder for end of episode mask
    # this value is 1 if the next state corresponds to the end of an episode,
    # in which case there is no Q-value at the next state; at the end of an
    # episode, only the current state reward contributes to the target, not the
    # next state Q-value (i.e. target is just rew_t_ph, not rew_t_ph + gamma * q_tp1)
    done_mask_ph          = tf.placeholder(tf.float32, [None])

    # casting to float on GPU ensures lower data transfer times.
    obs_t_float   = tf.cast(obs_t_ph,   tf.float32) / 255.0
    obs_tp1_float = tf.cast(obs_tp1_ph, tf.float32) / 255.0

    # Here, you should fill in your own code to compute the Bellman error. This requires
    # evaluating the current and next Q-values and constructing the corresponding error.
    # TensorFlow will differentiate this error for you, you just need to pass it to the
    # optimizer. See assignment text for details.
    # Your code should produce one scalar-valued tensor: total_error
    # This will be passed to the optimizer in the provided code below.
    # Your code should also produce two collections of variables:
    # q_func_vars
    # target_q_func_vars
    # These should hold all of the variables of the Q-function network and target network,
    # respectively. A convenient way to get these is to make use of TF's "scope" feature.
    # For example, you can create your Q-function network with the scope "q_func" like this:
    # <something> = q_func(obs_t_float, num_actions, scope="q_func", reuse=False)
    # And then you can obtain the variables like this:
    # q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='q_func')
    # Older versions of TensorFlow may require using "VARIABLES" instead of "GLOBAL_VARIABLES"
    ######
    
    # YOUR CODE HERE
    q_v = q_func(obs_t_float, num_actions, scope="q_func", reuse=False)
    act_sy = tf.argmax(q_v, axis=1)
    one_hot_mask = tf.one_hot(act_t_ph, depth=num_actions, dtype=tf.float32)
    q_act_t = tf.reduce_sum(q_v * one_hot_mask, axis=1)

    use_double_q = True 
    if use_double_q is True:
        print("using double Q-learning")
        a_from_q = tf.argmax(q_func(obs_tp1_float, num_actions, scope="q_func", reuse=True), axis=1)
        q_v_tp1 = q_func(obs_tp1_float, num_actions, scope="target_q_func", reuse=False)

        mask = tf.one_hot(a_from_q, depth=num_actions, dtype=tf.float32)
        q_act_tp1 = tf.reduce_sum(q_v_tp1 * mask, axis=1)
    else:
        q_v_tp1 = q_func(obs_tp1_float, num_actions, scope="target_q_func", reuse=False)
        q_act_tp1 = tf.reduce_max(q_v_tp1, axis=1)

    target_q = rew_t_ph + gamma * (1 - done_mask_ph) * q_act_tp1

    target_q = tf.stop_gradient(target_q)
    total_error = 0.5 * tf.reduce_mean(tf.square(q_act_t - target_q), axis=0)

    q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='q_func')
    target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='target_q_func')

    ######

    # construct optimization op (with gradient clipping)
    learning_rate = tf.placeholder(tf.float32, (), name="learning_rate")
    optimizer = optimizer_spec.constructor(learning_rate=learning_rate, **optimizer_spec.kwargs)
    train_fn = minimize_and_clip(optimizer, total_error,
                 var_list=q_func_vars, clip_val=grad_norm_clipping)

    # update_target_fn will be called periodically to copy Q network to target Q network
    update_target_fn = []
    for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name),
                               sorted(target_q_func_vars, key=lambda v: v.name)):
        update_target_fn.append(var_target.assign(var))
    update_target_fn = tf.group(*update_target_fn)

    # construct the replay buffer
    replay_buffer = ReplayBuffer(replay_buffer_size, frame_history_len)

    ###############
    # RUN ENV     #
    ###############
    model_initialized = False
    num_param_updates = 0
    mean_episode_reward      = -float('nan')
    best_mean_episode_reward = -float('inf')
    last_obs = env.reset()
    LOG_EVERY_N_STEPS = 10000

    #session.run(tf.global_variables_initializer())
    random_sample_times = 0
    losses = []
    pre_ts = time.time()
    max_acts = [0] * num_actions
    rand_acts = [0] * num_actions

    writer = tf.summary.FileWriter('logs', session.graph)

    for t in itertools.count():
        ### 1. Check stopping criterion
        if stopping_criterion is not None and stopping_criterion(env, t):
            break

        ### 2. Step the env and store the transition
        # At this point, "last_obs" contains the latest observation that was
        # recorded from the simulator. Here, your code needs to store this
        # observation and its outcome (reward, next observation, etc.) into
        # the replay buffer while stepping the simulator forward one step.
        # At the end of this block of code, the simulator should have been
        # advanced one step, and the replay buffer should contain one more
        # transition.
        # Specifically, last_obs must point to the new latest observation.
        # Useful functions you'll need to call:
        # obs, reward, done, info = env.step(action)
        # this steps the environment forward one step
        # obs = env.reset()
        # this resets the environment if you reached an episode boundary.
        # Don't forget to call env.reset() to get a new observation if done
        # is true!!
        # Note that you cannot use "last_obs" directly as input
        # into your network, since it needs to be processed to include context
        # from previous frames. You should check out the replay buffer
        # implementation in dqn_utils.py to see what functionality the replay
        # buffer exposes. The replay buffer has a function called
        # encode_recent_observation that will take the latest observation
        # that you pushed into the buffer and compute the corresponding
        # input that should be given to a Q network by appending some
        # previous frames.
        # Don't forget to include epsilon greedy exploration!
        # And remember that the first time you enter this loop, the model
        # may not yet have been initialized (but of course, the first step
        # might as well be random, since you haven't trained your net...)

        #####
        
        # YOUR CODE HERE
        idx = replay_buffer.store_frame(last_obs.astype(np.uint8))

        epsilon = exploration.value(t)
        #if np.random.random_sample() <= optimizer_spec.kwargs['epsilon']:
        if not model_initialized or random.random() < epsilon:
            random_sample_times += 1
            action = env.action_space.sample()
            rand_acts[action] += 1
        else:
            obs_t = replay_buffer.encode_recent_observation()
            act = session.run([act_sy], feed_dict={obs_t_ph: obs_t[None, :]})
            action = act[0].flatten()[0]
            max_acts[action] += 1

        obs, reward, done, info = env.step(action)
        
        replay_buffer.store_effect(idx, action, reward, done)

        if done == True:
            obs = env.reset()
            done = False

        last_obs = obs

        #####

        # at this point, the environment should have been advanced one step (and
        # reset if done was true), and last_obs should point to the new latest
        # observation

        ### 3. Perform experience replay and train the network.
        # note that this is only done if the replay buffer contains enough samples
        # for us to learn something useful -- until then, the model will not be
        # initialized and random actions should be taken
        if (t > learning_starts and
                t % learning_freq == 0 and
                replay_buffer.can_sample(batch_size)):
            # Here, you should perform training. Training consists of four steps:
            # 3.a: use the replay buffer to sample a batch of transitions (see the
            # replay buffer code for function definition, each batch that you sample
            # should consist of current observations, current actions, rewards,
            # next observations, and done indicator).
            # 3.b: initialize the model if it has not been initialized yet; to do
            # that, call
            #    initialize_interdependent_variables(session, tf.global_variables(), {
            #        obs_t_ph: obs_t_batch,
            #        obs_tp1_ph: obs_tp1_batch,
            #    })
            # where obs_t_batch and obs_tp1_batch are the batches of observations at
            # the current and next time step. The boolean variable model_initialized
            # indicates whether or not the model has been initialized.
            # Remember that you have to update the target network too (see 3.d)!
            # 3.c: train the model. To do this, you'll need to use the train_fn and
            # total_error ops that were created earlier: total_error is what you
            # created to compute the total Bellman error in a batch, and train_fn
            # will actually perform a gradient step and update the network parameters
            # to reduce total_error. When calling session.run on these you'll need to
            # populate the following placeholders:
            # obs_t_ph
            # act_t_ph
            # rew_t_ph
            # obs_tp1_ph
            # done_mask_ph
            # (this is needed for computing total_error)
            # learning_rate -- you can get this from optimizer_spec.lr_schedule.value(t)
            # (this is needed by the optimizer to choose the learning rate)
            # 3.d: periodically update the target network by calling
            # session.run(update_target_fn)
            # you should update every target_update_freq steps, and you may find the
            # variable num_param_updates useful for this (it was initialized to 0)
            #####
            
            # YOUR CODE HERE
            # 3.a sample n data
            sampled = replay_buffer.sample(batch_size)
            obs_t_batch, act_t_batch, rew_t_batch, obs_tp1_batch, done_mask_batch = sampled

            # 3.b initialize model
            if not model_initialized:
                initialize_interdependent_variables(session, \
                                    tf.global_variables(), { \
                                        obs_t_ph: obs_t_batch, \
                                        obs_tp1_ph: obs_tp1_batch,\
                                    })
                session.run(update_target_fn)
                model_initialized = True

            # 3.c train the model
            lr = optimizer_spec.lr_schedule.value(t)
            feed_dict = {obs_t_ph: obs_t_batch, \
                         act_t_ph: act_t_batch, \
                         rew_t_ph: rew_t_batch, \
                         obs_tp1_ph: obs_tp1_batch, \
                         done_mask_ph: done_mask_batch, \
                         learning_rate: lr}

            loss, _ = session.run([total_error, train_fn], feed_dict=feed_dict)
            losses.append(loss)

            # 3.d update the target network
            if t % target_update_freq == 0:
                print('update target q_net at %d' % (num_param_updates))
                session.run(update_target_fn)
                num_param_updates += 1
            #####

        ### 4. Log progress
        episode_rewards = get_wrapper_by_name(env, "Monitor").get_episode_rewards()
        if len(episode_rewards) > 0:
            mean_episode_reward = np.mean(episode_rewards[-100:])
        if len(episode_rewards) > 100:
            best_mean_episode_reward = max(best_mean_episode_reward, mean_episode_reward)
        if t % LOG_EVERY_N_STEPS == 0 and model_initialized:
            now = time.time()
            now_str = time.asctime(time.localtime(now))
            cost = int(now - pre_ts)
            pre_ts = now

            mean_loss = np.mean(np.array(losses))
            eps_num = len(episode_rewards)
            rd_act = random_sample_times
            max_act = t - random_sample_times
            print("== [%s][cost:%ds] timestep:%d, eps:%d, rand_act:%d, max_act:%d, mean_loss:%f, param_updates:%d, lr:%f" \
                    % (now_str, cost, t, eps_num, rd_act, max_act, mean_loss, num_param_updates, lr))
            print("mean_rew_100_eps:%f, best:%f, exploration:%f,real:%.5f" \
                    % (mean_episode_reward, best_mean_episode_reward,\
                      exploration.value(t), rd_act/t))
            print('action dist:', max_acts)
            print('rand action dist:', rand_acts)
            max_acts = [0] * num_actions
            rand_acts = [0] * num_actions

            sys.stdout.flush()

            losses = []
Пример #45
0
class ObservationVectorizer(object):
    _ids = count(0)
    @property
    def knowledge(self):
        return self.__class__.knowledge
    @knowledge.setter
    def knowledge(self, player_knowledge):
        self.__class__.knowledge = player_knowledge

    def __init__(self, env):
        '''
        Encoding Order =
         HandEncoding
        +BoardEncoding
        +DiscardEncoding
        +LastAcionEncoding
        +CardKnowledgeEncoding
        '''
        self.id = next(self._ids)
        self.env = env
        self.obs = None
        self.num_players = self.env.num_players
        self.num_colors = self.env.num_colors
        self.num_ranks = self.env.num_ranks
        self.hand_size = self.env.hand_size
        self.max_info_tokens = self.env.max_information_tokens
        self.max_life_tokens = self.env.max_life_tokens
        self.max_moves = self.env.max_moves
        self.bits_per_card = self.num_colors * self.num_ranks
        self.max_deck_size = 0
        self.variant = self.env.variant
        # start of the vectorized observation
        self.offset = None

        for color in range(self.num_colors):
            for rank in range(self.num_ranks):
                self.max_deck_size += self.env.num_cards(color, rank, self.variant)
        """ Bit lengths """
        # Compute total state length
        self.hands_bit_length = (self.num_players - 1) * self.hand_size * self.bits_per_card + self.num_players


        self.board_bit_length = self.max_deck_size - self.num_players * \
                                self.hand_size + self.num_colors * self.num_ranks \
                                + self.max_info_tokens + self.max_life_tokens


        self.discard_pile_bit_length = self.max_deck_size


        self.last_action_bit_length = self.num_players + 4 + self.num_players + \
                                      self.num_colors + self.num_ranks \
                                      + self.hand_size + self.hand_size + self.bits_per_card + 2

        self.card_knowledge_bit_length = self.num_players * self.hand_size * \
                                         (self.bits_per_card + self.num_colors + self.num_ranks)

        self.total_state_length = self.hands_bit_length + self.board_bit_length + self.discard_pile_bit_length \
                                  + self.last_action_bit_length + self.card_knowledge_bit_length
        self.obs_vec = np.zeros(self.total_state_length)
        if self.id == 0:

            self.player_knowledge = [HandKnowledge(
                self.hand_size, self.num_ranks, self.num_colors) for _ in range(self.num_players)
            ]

            self.knowledge = self.player_knowledge

        else:
            self.player_knowledge = self.knowledge

        self.last_player_action = None

    def get_vector_length(self):
        return self.total_state_length

    def vectorize_observation(self, obs):

        self.obs_vec = np.zeros(self.total_state_length)
        self.obs = obs

        if obs["last_moves"] != []:

            if obs["last_moves"][0].move().type() != DEAL:
                self.last_player_action = obs["last_moves"][0]

            elif len(obs["last_moves"]) >= 2:
                self.last_player_action = obs["last_moves"][1]

            else:
                 self.last_player_action = None

        self.encode_hands(obs)
        #print("OFFSET END ENCODE HANDS", self.offset)
        offset_encode_hands = self.offset
        #print("SUM END ENCODE HANDS", sum(self.obs_vec[:self.offset]))

        self.encode_board(obs)
        #print("OFFSET END ENCODE BOARDS", self.offset)
        offset_encode_boards = self.offset
        #print("SUM END ENCODE BOARDS", sum(self.obs_vec[offset_encode_hands:self.offset]))
        self.encode_discards(obs)
        #print("OFFSET END ENCODE DISCARDS", self.offset)
        offset_encode_discards = self.offset
        #print("SUM END ENCODE DISCARDS", sum(self.obs_vec[offset_encode_boards:self.offset]))
        self.encode_last_action()
        #print("OFFSET END ENCODE LAST ACTION", self.offset)
        offset_encode_last_action = self.offset
        #print("SUM END ENCODE LAST_ACTION", sum(self.obs_vec[offset_encode_discards:self.offset]))
        self.encode_card_knowledge(obs)
        #print("OFFSET END ENCODE CARD KNOWLEDGE", self.offset)

        #print("SUM END ENCODE CARD_KNOWLEDGE", sum(self.obs_vec[offset_encode_last_action:self.offset]))

        self.knowledge = self.player_knowledge

        return self.obs_vec

    '''Encodes cards in all other player's hands (excluding our unknown hand),
     and whether the hand is missing a card for all players (when deck is empty.)
     Each card in a hand is encoded with a one-hot representation using
     <num_colors> * <num_ranks> bits (25 bits in a standard game) per card.
     Returns the number of entries written to the encoding.'''
    def encode_hands(self, obs):
        self.offset = 0
        # don't use own hand
        hands = obs["observed_hands"]
        for player_hand in hands:
            if player_hand[0]["color"] is not None:
                num_cards = 0
                for card in player_hand:
                    rank = card["rank"]
                    color = color_char_to_idx(card["color"])
                    card_index = color * self.num_ranks + rank

                    self.obs_vec[self.offset + card_index] = 1
                    num_cards += 1
                    self.offset += self.bits_per_card

                '''
                A player's hand can have fewer cards than the initial hand size.
                Leave the bits for the absent cards empty (adjust the offset to skip
                bits for the missing cards).
                '''

                if num_cards < self.hand_size:
                    self.offset += (self.hand_size - num_cards) * self.bits_per_card

        # For each player, set a bit if their hand is missing a card
        i = 0
        for i, player_hand in enumerate(hands):
            if len(player_hand) < self.hand_size:
                self.obs_vec[self.offset + i] = 1
        self.offset += self.num_players


        assert self.offset - self.hands_bit_length == 0
        return True
    '''
    Encode the board, including:
       - remaining deck size
         (max_deck_size - num_players * hand_size bits; thermometer)
       - state of the fireworks (<num_ranks> bits per color; one-hot)
       - information tokens remaining (max_information_tokens bits; thermometer)
       - life tokens remaining (max_life_tokens bits; thermometer)
     We note several features use a thermometer representation instead of one-hot.
     For example, life tokens could be: 000 (0), 100 (1), 110 (2), 111 (3).
     Returns the number of entries written to the encoding.
    '''
    def encode_board(self, obs):
        # encode the deck size:
        for i in range(obs["deck_size"]):
            self.obs_vec[self.offset + i] = 1
        self.offset += self.max_deck_size - self.hand_size * self.num_players

        # encode fireworks
        fireworks = obs["fireworks"]
        for c in range(len(fireworks)):
            color = color_idx_to_char(c)
            # print(fireworks[color])
            if fireworks[color] > 0:
                self.obs_vec[self.offset + fireworks[color] - 1] = 1
            self.offset += self.num_ranks

        # encode info tokens
        info_tokens = obs["information_tokens"]
        for t in range(info_tokens):
            self.obs_vec[self.offset + t] = 1
        self.offset += self.max_info_tokens

        # encode life tokens

        life_tokens = obs["life_tokens"]
        #print(f"BAD lifetokens = {life_tokens}")
        for l in range(life_tokens):
            self.obs_vec[self.offset + l] = 1
        self.offset += self.max_life_tokens

        assert self.offset - (self.hands_bit_length + self.board_bit_length) == 0
        return True

    '''
    Encode the discard pile. (max_deck_size bits)
    Encoding is in color-major ordering, as in kColorStr ("RYGWB"), with each
    color and rank using a thermometer to represent the number of cards
    discarded. For example, in a standard game, there are 3 cards of lowest rank
    (1), 1 card of highest rank (5), 2 of all else. So each color would be
    ordered like so:

      LLL      H
      1100011101

    This means for this color:
      - 2 cards of the lowest rank have been discarded
      - none of the second lowest rank have been discarded
      - both of the third lowest rank have been discarded
      - one of the second highest rank have been discarded
      - the highest rank card has been discarded
    Returns the number of entries written to the encoding.
    '''

    def encode_discards(self, obs):
        discard_pile = obs['discard_pile']
        counts = np.zeros(self.num_colors * self.num_ranks)
        #print(f"GBAD discard_pile = {discard_pile}")
        #print(f"GBAD lifes = {obs['life_tokens']}")
        for card in discard_pile:
            color = color_char_to_idx(card["color"])
            rank = card["rank"]
            counts[color * self.num_ranks + rank] += 1

        for c in range(self.num_colors):
            for r in range(self.num_ranks):
                num_discarded = counts[c * self.num_ranks + r]
                for i in range(int(num_discarded)):
                    self.obs_vec[self.offset + i] = 1
                self.offset += self.env.num_cards(c, r, self.variant)

        assert self.offset - (self.hands_bit_length + self.board_bit_length + self.discard_pile_bit_length) == 0
        return True


    '''
    Encode the last player action (not chance's deal of cards). This encodes:
      - Acting player index, relative to ourself (<num_players> bits; one-hot)
      - The MoveType (4 bits; one-hot)
      - Target player index, relative to acting player, if a reveal move
        (<num_players> bits; one-hot)
      - Color revealed, if a reveal color move (<num_colors> bits; one-hot)
      - Rank revealed, if a reveal rank move (<num_ranks> bits; one-hot)
      - Reveal outcome (<hand_size> bits; each bit is 1 if the card was hinted at)
      - Position played/discarded (<hand_size> bits; one-hot)
      - Card played/discarded (<num_colors> * <num_ranks> bits; one-hot)
    Returns the number of entries written to the encoding.
    '''

    def encode_last_action(self):
        if self.last_player_action is None:
            self.offset += self.last_action_bit_length
        else:
            last_move_type = self.last_player_action.move().type()
            #print(f"player inside broken vec: {self.last_player_action.player()}")
            #print(f"action inside broken vec: "
            #      f"{self.last_player_action.player()},"
            #      f"{self.last_player_action.move().color()},"
            #      f"{self.last_player_action.move().rank()},")
            #print(f"COLORBAD: {self.last_player_action.move().color()}")
            #print(f"RANKBAD: {self.last_player_action.move().rank()}")
            '''
            player_id
            Note: no assertion here. At a terminal state, the last player could have
            been me (player id 0).
            '''

            self.obs_vec[self.offset + self.last_player_action.player()] = 1
            self.offset += self.num_players

            # encode move type
            if last_move_type == PLAY:
                self.obs_vec[self.offset] = 1
            elif last_move_type == DISCARD:
                self.obs_vec[self.offset + 1] = 1
            elif last_move_type == REVEAL_COLOR:
                self.obs_vec[self.offset + 2] = 1
            elif last_move_type == REVEAL_RANK:
                self.obs_vec[self.offset + 3] = 1
            else:
                print("ACTION UNKNOWN")
            self.offset += 4

            # encode target player (if hint action)
            if last_move_type == REVEAL_COLOR or last_move_type == REVEAL_RANK:
                observer_relative_target = (self.last_player_action.player() + self.last_player_action.move().target_offset()) % self.num_players

                self.obs_vec[self.offset + observer_relative_target] = 1

            self.offset += self.num_players

            # encode color (if hint action)
            if last_move_type == REVEAL_COLOR:
                last_move_color = self.last_player_action.move().color()

                self.obs_vec[self.offset + color_char_to_idx(last_move_color)] = 1

            self.offset += self.num_colors

            # encode rank (if hint action)
            if last_move_type == REVEAL_RANK:
                last_move_rank = self.last_player_action.move().rank()
                self.obs_vec[self.offset + last_move_rank] = 1

            self.offset += self.num_ranks

            # If multiple positions where selected
            if last_move_type == REVEAL_COLOR or last_move_type == REVEAL_RANK:
                positions = self.last_player_action.card_info_revealed()
                for pos in positions:
                    self.obs_vec[self.offset + pos] = 1

            self.offset += self.hand_size

            # encode position (if play or discard action)
            if last_move_type == PLAY or last_move_type == DISCARD:

                card_index = self.last_player_action.move().card_index()
                #print(f"BAD card_index={card_index}")
                self.obs_vec[self.offset + card_index] = 1

            self.offset += self.hand_size


            # encode card (if play or discard action)
            if last_move_type == PLAY or last_move_type == DISCARD:
                card_index_hgame = self.last_player_action.move().color() * self.num_ranks + \
                                   self.last_player_action.move().rank()
                # print(self.offset + card_index_hgame)
                self.obs_vec[self.offset + card_index_hgame] = 1

            self.offset += self.bits_per_card

            if last_move_type == PLAY:
                if self.last_player_action.scored():
                    self.obs_vec[self.offset] = 1

                # IF INFO TOKEN WAS ADDED
                if self.last_player_action.information_token():
                    self.obs_vec[self.offset + 1] = 1

            self.offset += 2

        assert self.offset - (
                self.hands_bit_length + self.board_bit_length + self.discard_pile_bit_length + self.last_action_bit_length) == 0
        return True


    '''
     Encode the common card knowledge.
     For each card/position in each player's hand, including the observing player,
     encode the possible cards that could be in that position and whether the
     color and rank were directly revealed by a Reveal action. Possible card
     values are in color-major order, using <num_colors> * <num_ranks> bits per
     card. For example, if you knew nothing about a card, and a player revealed
     that is was green, the knowledge would be encoded as follows.
     R    Y    G    W    B
     0000000000111110000000000   Only green cards are possible.
     0    0    1    0    0       Card was revealed to be green.
     00000                       Card rank was not revealed.

     Similarly, if the player revealed that one of your other cards was green, you
     would know that this card could not be green, resulting in:
     R    Y    G    W    B
     1111111111000001111111111   Any card that is not green is possible.
     0    0    0    0    0       Card color was not revealed.
     00000                       Card rank was not revealed.
     Uses <num_players> * <hand_size> *
     (<num_colors> * <num_ranks> + <num_colors> + <num_ranks>) bits.
     Returns the number of entries written to the encoding.
    '''

    def encode_card_knowledge(self, obs):

        card_knowledge_list = obs["card_knowledge"]
        current_pid = obs["current_player"]
        action = self.last_player_action

        if action:  # comparison is equal to 'if action != []'
            type_action = self.last_player_action.move().type()

            if type_action in [REVEAL_COLOR, REVEAL_RANK]:
                player_hand_to_sync = (
                    action.player() +
                    action.move().target_offset() +
                    current_pid
                ) % self.num_players
                card_pos_to_sync = self.last_player_action.card_info_revealed()

                if type_action == REVEAL_COLOR:
                    color_to_sync = color_char_to_idx(self.last_player_action.move().color())
                    self.player_knowledge[player_hand_to_sync].sync_colors(card_pos_to_sync, color_to_sync)

                elif type_action == REVEAL_RANK:
                    rank_to_sync = self.last_player_action.move().rank()
                    self.player_knowledge[player_hand_to_sync].sync_ranks(card_pos_to_sync, rank_to_sync)

            elif type_action in [PLAY, DISCARD]:

                player_hand_to_sync = (action.player() + current_pid) % self.num_players
                card_id = action.move().card_index()

                self.player_knowledge[player_hand_to_sync].remove_card(card_id)

        for pid, player_card_knowledge in enumerate(card_knowledge_list):
            num_cards = 0
            rel_player_pos = (current_pid + pid) % self.num_players

            for card_id, card in enumerate(player_card_knowledge):
                for color in range(self.num_colors):

                    if self.player_knowledge[rel_player_pos].hand[card_id].color_plausible(color):
                        for rank in range(self.num_ranks):

                            if self.player_knowledge[rel_player_pos].hand[card_id].rank_plausible(rank):
                                card_index = color * self.num_ranks + rank
                                self.obs_vec[self.offset + card_index] = 1

                self.offset += self.bits_per_card

                # Encode explicitly revealed colors and ranks
                if card["color"] is not None:
                    color = color_char_to_idx(card["color"])
                    self.obs_vec[self.offset + color] = 1

                self.offset += self.num_colors

                if card["rank"] is not None:
                    rank = card["rank"]
                    self.obs_vec[self.offset + rank] = 1

                self.offset += self.num_ranks
                num_cards += 1

            if num_cards < self.hand_size:
                self.offset += (self.hand_size - num_cards) * (self.bits_per_card + self.num_colors + self.num_ranks)

        # print(self.offset)
        assert self.offset - (
                    self.hands_bit_length +
                    self.board_bit_length +
                    self.discard_pile_bit_length +
                    self.last_action_bit_length +
                    self.card_knowledge_bit_length) == 0

        return True
Пример #46
0
# б) итератор, повторяющий элементы некоторого списка, определенного заранее.

from itertools import count
from itertools import cycle

FINISH = 'q'

print('\nДля вывода следующего значения итератора нажмите Enter, для выхода из итератора введите "q"\n')

# задание а)

start_number = int(input('Введите начальное целое число:'))

exit_str = ''

for number in count(start_number):
    if exit_str == FINISH:
        break
    print(number, end='')
    exit_str = input()


# задание b)

start_list = input('\nВведите элементы, разделённые пробелами: ').split()

exit_str = ''

for elem in cycle(start_list):
    if exit_str == FINISH:
        break
Пример #47
0
    def translate(self,
                  src_path=None,
                  src_data_iter=None,
                  tgt_path=None,
                  tgt_data_iter=None,
                  src_dir=None,
                  batch_size=None,
                  attn_debug=False):
        """
        Translate content of `src_data_iter` (if not None) or `src_path`
        and get gold scores if one of `tgt_data_iter` or `tgt_path` is set.

        Note: batch_size must not be None
        Note: one of ('src_path', 'src_data_iter') must not be None

        Args:
            src_path (str): filepath of source data
            src_data_iter (iterator): an interator generating source data
                e.g. it may be a list or an openned file
            tgt_path (str): filepath of target data
            tgt_data_iter (iterator): an interator generating target data
            src_dir (str): source directory path
                (used for Audio and Image datasets)
            batch_size (int): size of examples per mini-batch
            attn_debug (bool): enables the attention logging

        Returns:
            (`list`, `list`)

            * all_scores is a list of `batch_size` lists of `n_best` scores
            * all_predictions is a list of `batch_size` lists
                of `n_best` predictions
        """
        assert src_data_iter is not None or src_path is not None

        if batch_size is None:
            raise ValueError("batch_size must be set")
        data = inputters.build_dataset(self.fields,
                                       self.data_type,
                                       src_path=src_path,
                                       src_data_iter=src_data_iter,
                                       tgt_path=tgt_path,
                                       tgt_data_iter=tgt_data_iter,
                                       src_dir=src_dir,
                                       sample_rate=self.sample_rate,
                                       window_size=self.window_size,
                                       window_stride=self.window_stride,
                                       window=self.window,
                                       use_filter_pred=self.use_filter_pred)

        if self.cuda:
            cur_device = "cuda"
        else:
            cur_device = "cpu"

        data_iter = inputters.OrderedIterator(
            dataset=data, device=cur_device,
            batch_size=batch_size, train=False, sort=False,
            sort_within_batch=True, shuffle=False)

        builder = onmt.translate.TranslationBuilder(
            data, self.fields,
            self.n_best, self.replace_unk, tgt_path)

        # Statistics
        counter = count(1)
        pred_score_total, pred_words_total = 0, 0
        gold_score_total, gold_words_total = 0, 0

        all_scores = []
        all_predictions = []

        for batch in data_iter:
            batch_data = self.translate_batch(batch, data, fast=self.fast)
            translations = builder.from_batch(batch_data)

            for trans in translations:
                all_scores += [trans.pred_scores[:self.n_best]]
                pred_score_total += trans.pred_scores[0]
                pred_words_total += len(trans.pred_sents[0])
                if tgt_path is not None:
                    gold_score_total += trans.gold_score
                    gold_words_total += len(trans.gold_sent) + 1

                n_best_preds = [" ".join(pred)
                                for pred in trans.pred_sents[:self.n_best]]
                all_predictions += [n_best_preds]
                self.out_file.write('\n'.join(n_best_preds) + '\n')
                self.out_file.flush()

                if self.verbose:
                    sent_number = next(counter)
                    output = trans.log(sent_number)
                    if self.logger:
                        self.logger.info(output)
                    else:
                        os.write(1, output.encode('utf-8'))

                # Debug attention.
                if attn_debug:
                    srcs = trans.src_raw
                    preds = trans.pred_sents[0]
                    preds.append('</s>')
                    attns = trans.attns[0].tolist()
                    header_format = "{:>10.10} " + "{:>10.7} " * len(srcs)
                    row_format = "{:>10.10} " + "{:>10.7f} " * len(srcs)
                    output = header_format.format("", *trans.src_raw) + '\n'
                    for word, row in zip(preds, attns):
                        max_index = row.index(max(row))
                        row_format = row_format.replace(
                            "{:>10.7f} ", "{:*>10.7f} ", max_index + 1)
                        row_format = row_format.replace(
                            "{:*>10.7f} ", "{:>10.7f} ", max_index)
                        output += row_format.format(word, *row) + '\n'
                        row_format = "{:>10.10} " + "{:>10.7f} " * len(srcs)
                    os.write(1, output.encode('utf-8'))

        if self.report_score:
            msg = self._report_score('PRED', pred_score_total,
                                     pred_words_total)
            if self.logger:
                self.logger.info(msg)
            else:
                print(msg)
            if tgt_path is not None:
                msg = self._report_score('GOLD', gold_score_total,
                                         gold_words_total)
                if self.logger:
                    self.logger.info(msg)
                else:
                    print(msg)
                if self.report_bleu:
                    msg = self._report_bleu(tgt_path)
                    if self.logger:
                        self.logger.info(msg)
                    else:
                        print(msg)
                if self.report_rouge:
                    msg = self._report_rouge(tgt_path)
                    if self.logger:
                        self.logger.info(msg)
                    else:
                        print(msg)

        if self.dump_beam:
            import json

            json.dump(self.beam_accum,
                      codecs.open(self.dump_beam, 'w', 'utf-8'))
        return all_scores, all_predictions
Пример #48
0
    def __init__(self,
                 Ylist,
                 input_dim,
                 X=None,
                 X_variance=None,
                 initx='PCA',
                 initz='permute',
                 num_inducing=10,
                 Z=None,
                 kernel=None,
                 inference_method=None,
                 likelihoods=None,
                 name='mrd',
                 Ynames=None,
                 normalizer=False,
                 stochastic=False,
                 batchsize=10):

        self.logger = logging.getLogger(self.__class__.__name__)
        self.input_dim = input_dim
        self.num_inducing = num_inducing

        if isinstance(Ylist, dict):
            Ynames, Ylist = zip(*Ylist.items())

        self.logger.debug("creating observable arrays")
        self.Ylist = [ObsAr(Y) for Y in Ylist]
        #The next line is a fix for Python 3. It replicates the python 2 behaviour from the above comprehension
        Y = Ylist[-1]

        if Ynames is None:
            self.logger.debug("creating Ynames")
            Ynames = ['Y{}'.format(i) for i in range(len(Ylist))]
        self.names = Ynames
        assert len(self.names) == len(
            self.Ylist), "one name per dataset, or None if Ylist is a dict"

        if inference_method is None:
            self.inference_method = InferenceMethodList(
                [VarDTC() for _ in range(len(self.Ylist))])
        else:
            assert isinstance(
                inference_method, InferenceMethodList
            ), "please provide one inference method per Y in the list and provide it as InferenceMethodList, inference_method given: {}".format(
                inference_method)
            self.inference_method = inference_method

        if X is None:
            X, fracs = self._init_X(initx, Ylist)
        else:
            fracs = [X.var(0)] * len(Ylist)

        Z = self._init_Z(initz, X)
        self.Z = Param('inducing inputs', Z)
        self.num_inducing = self.Z.shape[0]  # ensure M==N if M>N

        # sort out the kernels
        self.logger.info("building kernels")
        if kernel is None:
            from ..kern import RBF
            kernels = [
                RBF(input_dim, ARD=1, lengthscale=1. / fracs[i])
                for i in range(len(Ylist))
            ]
        elif isinstance(kernel, Kern):
            kernels = []
            for i in range(len(Ylist)):
                k = kernel.copy()
                kernels.append(k)
        else:
            assert len(kernel) == len(Ylist), "need one kernel per output"
            assert all([isinstance(k, Kern)
                        for k in kernel]), "invalid kernel object detected!"
            kernels = kernel

        self.variational_prior = NormalPrior()
        #self.X = NormalPosterior(X, X_variance)

        if likelihoods is None:
            likelihoods = [
                Gaussian(name='Gaussian_noise'.format(i))
                for i in range(len(Ylist))
            ]
        else:
            likelihoods = likelihoods

        self.logger.info("adding X and Z")
        super(MRD, self).__init__(Y,
                                  input_dim,
                                  X=X,
                                  X_variance=X_variance,
                                  num_inducing=num_inducing,
                                  Z=self.Z,
                                  kernel=None,
                                  inference_method=self.inference_method,
                                  likelihood=Gaussian(),
                                  name='manifold relevance determination',
                                  normalizer=None,
                                  missing_data=False,
                                  stochastic=False,
                                  batchsize=1)

        self._log_marginal_likelihood = 0

        self.unlink_parameter(self.likelihood)
        self.unlink_parameter(self.kern)

        self.num_data = Ylist[0].shape[0]
        if isinstance(batchsize, int):
            batchsize = itertools.repeat(batchsize)

        self.bgplvms = []

        for i, n, k, l, Y, im, bs in zip(itertools.count(), Ynames, kernels,
                                         likelihoods, Ylist,
                                         self.inference_method, batchsize):
            assert Y.shape[
                0] == self.num_data, "All datasets need to share the number of datapoints, and those have to correspond to one another"
            md = np.isnan(Y).any()
            spgp = BayesianGPLVMMiniBatch(Y,
                                          input_dim,
                                          X,
                                          X_variance,
                                          Z=Z,
                                          kernel=k,
                                          likelihood=l,
                                          inference_method=im,
                                          name=n,
                                          normalizer=normalizer,
                                          missing_data=md,
                                          stochastic=stochastic,
                                          batchsize=bs)
            spgp.kl_factr = 1. / len(Ynames)
            spgp.unlink_parameter(spgp.Z)
            spgp.unlink_parameter(spgp.X)
            del spgp.Z
            del spgp.X
            spgp.Z = self.Z
            spgp.X = self.X
            self.link_parameter(spgp, i + 2)
            self.bgplvms.append(spgp)

        b = self.bgplvms[0]
        self.posterior = b.posterior
        self.kern = b.kern
        self.likelihood = b.likelihood

        self.logger.info("init done")
Пример #49
0
def strSeq():
    for n in itertools.count(1):
        for s in itertools.product(ascii_uppercase, repeat=n):
            yield "".join(s)
Пример #50
0
from itertools import count

for i in count(10):
    print(i)

    if i >= 20:
        break
Пример #51
0
class Timer(threading.Thread):
    Entry = Entry
    Schedule = Schedule

    running = False
    on_tick = None
    _timer_count = count(1)

    if TIMER_DEBUG:  # pragma: no cover
        def start(self, *args, **kwargs):
            import traceback
            print('- Timer starting')
            traceback.print_stack()
            super(Timer, self).start(*args, **kwargs)

    def __init__(self, schedule=None, on_error=None, on_tick=None,
                 on_start=None, max_interval=None, **kwargs):
        self.schedule = schedule or self.Schedule(on_error=on_error,
                                                  max_interval=max_interval)
        self.on_start = on_start
        self.on_tick = on_tick or self.on_tick
        threading.Thread.__init__(self)
        self._is_shutdown = threading.Event()
        self._is_stopped = threading.Event()
        self.mutex = threading.Lock()
        self.not_empty = threading.Condition(self.mutex)
        self.daemon = True
        self.name = 'Timer-{0}'.format(next(self._timer_count))

    def _next_entry(self):
        with self.not_empty:
            delay, entry = next(self.scheduler)
            if entry is None:
                if delay is None:
                    self.not_empty.wait(1.0)
                return delay
        return self.schedule.apply_entry(entry)
    __next__ = next = _next_entry  # for 2to3

    def run(self):
        try:
            self.running = True
            self.scheduler = iter(self.schedule)

            while not self._is_shutdown.isSet():
                delay = self._next_entry()
                if delay:
                    if self.on_tick:
                        self.on_tick(delay)
                    if sleep is None:  # pragma: no cover
                        break
                    sleep(delay)
            try:
                self._is_stopped.set()
            except TypeError:  # pragma: no cover
                # we lost the race at interpreter shutdown,
                # so gc collected built-in modules.
                pass
        except Exception as exc:
            logger.error('Thread Timer crashed: %r', exc, exc_info=True)
            os._exit(1)

    def stop(self):
        self._is_shutdown.set()
        if self.running:
            self._is_stopped.wait()
            self.join(THREAD_TIMEOUT_MAX)
            self.running = False

    def ensure_started(self):
        if not self.running and not self.isAlive():
            if self.on_start:
                self.on_start(self)
            self.start()

    def _do_enter(self, meth, *args, **kwargs):
        self.ensure_started()
        with self.mutex:
            entry = getattr(self.schedule, meth)(*args, **kwargs)
            self.not_empty.notify()
            return entry

    def enter(self, entry, eta, priority=None):
        return self._do_enter('enter_at', entry, eta, priority=priority)

    def call_at(self, *args, **kwargs):
        return self._do_enter('call_at', *args, **kwargs)

    def enter_after(self, *args, **kwargs):
        return self._do_enter('enter_after', *args, **kwargs)

    def call_after(self, *args, **kwargs):
        return self._do_enter('call_after', *args, **kwargs)

    def call_repeatedly(self, *args, **kwargs):
        return self._do_enter('call_repeatedly', *args, **kwargs)

    def exit_after(self, secs, priority=10):
        self.call_after(secs, sys.exit, priority)

    def cancel(self, tref):
        tref.cancel()

    def clear(self):
        self.schedule.clear()

    def empty(self):
        return not len(self)

    def __len__(self):
        return len(self.schedule)

    def __bool__(self):
        return True
    __nonzero__ = __bool__

    @property
    def queue(self):
        return self.schedule.queue
Пример #52
0
class Channel(base.StdChannel):
    open = True
    throw_decode_error = False
    _ids = count(1)

    def __init__(self, connection):
        self.connection = connection
        self.called = []
        self.deliveries = count(1)
        self.to_deliver = []
        self.events = {'basic_return': set()}
        self.channel_id = next(self._ids)

    def _called(self, name):
        self.called.append(name)

    def __contains__(self, key):
        return key in self.called

    def exchange_declare(self, *args, **kwargs):
        self._called('exchange_declare')

    def prepare_message(self, body, priority=0, content_type=None,
            content_encoding=None, headers=None, properties={}):
        self._called('prepare_message')
        return dict(body=body,
                    headers=headers,
                    properties=properties,
                    priority=priority,
                    content_type=content_type,
                    content_encoding=content_encoding)

    def basic_publish(self, message, exchange='', routing_key='',
            mandatory=False, immediate=False, **kwargs):
        self._called('basic_publish')
        return message, exchange, routing_key

    def exchange_delete(self, *args, **kwargs):
        self._called('exchange_delete')

    def queue_declare(self, *args, **kwargs):
        self._called('queue_declare')

    def queue_bind(self, *args, **kwargs):
        self._called('queue_bind')

    def queue_unbind(self, *args, **kwargs):
        self._called('queue_unbind')

    def queue_delete(self, queue, if_unused=False, if_empty=False, **kwargs):
        self._called('queue_delete')

    def basic_get(self, *args, **kwargs):
        self._called('basic_get')
        try:
            return self.to_deliver.pop()
        except IndexError:
            pass

    def queue_purge(self, *args, **kwargs):
        self._called('queue_purge')

    def basic_consume(self, *args, **kwargs):
        self._called('basic_consume')

    def basic_cancel(self, *args, **kwargs):
        self._called('basic_cancel')

    def basic_ack(self, *args, **kwargs):
        self._called('basic_ack')

    def basic_recover(self, requeue=False):
        self._called('basic_recover')

    def exchange_bind(self, *args, **kwargs):
        self._called('exchange_bind')

    def exchange_unbind(self, *args, **kwargs):
        self._called('exchange_unbind')

    def close(self):
        self._called('close')

    def message_to_python(self, message, *args, **kwargs):
        self._called('message_to_python')
        return Message(self, body=anyjson.dumps(message),
                delivery_tag=next(self.deliveries),
                throw_decode_error=self.throw_decode_error,
                content_type='application/json', content_encoding='utf-8')

    def flow(self, active):
        self._called('flow')

    def basic_reject(self, delivery_tag, requeue=False):
        if requeue:
            return self._called('basic_reject:requeue')
        return self._called('basic_reject')

    def basic_qos(self, prefetch_size=0, prefetch_count=0,
            apply_global=False):
        self._called('basic_qos')
Пример #53
0
    that Twisted lib obeys.
    If any concern, plz contact [email protected].
"""

from __future__ import division

__all__ = [
    'NamedConstant', 'ValueConstant', 'FlagConstant',
    'Names', 'Values', 'Flags']

import functools
import itertools
import operator

_UNSPECIFIED = None
_CONSTANT_ORDER = functools.partial(next, itertools.count())


class _Constant(object):  # pylint: disable=R0903
    """
    @ivar _index: A C{int} allocated from a shared itertools.counter in order
        to keep track of the order in which L{_Constant}s are instantiated.

    @ivar name: A C{str} giving the name of this constant; only set once the
        constant is initialized by L{_ConstantsContainer}.

    @ivar _container: The L{_ConstantsContainer} subclass this constant belongs
        to; C{None} until the constant is initialized by that subclass.
    """
    def __init__(self):
        self._container = None
Пример #54
0
    import _winapi
    from _winapi import WAIT_OBJECT_0, WAIT_ABANDONED_0, WAIT_TIMEOUT, INFINITE
except ImportError:
    if sys.platform == 'win32':
        raise
    _winapi = None

#
#
#

BUFSIZE = 8192
# A very generous timeout when it comes to local connections...
CONNECTION_TIMEOUT = 20.

_mmap_counter = itertools.count()

default_family = 'AF_INET'
families = ['AF_INET']

if hasattr(socket, 'AF_UNIX'):
    default_family = 'AF_UNIX'
    families += ['AF_UNIX']

if sys.platform == 'win32':
    default_family = 'AF_PIPE'
    families += ['AF_PIPE']


def _init_timeout(timeout=CONNECTION_TIMEOUT):
    return time.time() + timeout
Пример #55
0
    def setUpClass(cls):
        cls.registry = odoo.registry(get_db_name())
        cls.cr = cls.registry.cursor()
        cls.uid = odoo.SUPERUSER_ID
        cls.env = api.Environment(cls.cr, cls.uid, {})

    @classmethod
    def tearDownClass(cls):
        # rollback and close the cursor, and reset the environments
        cls.registry.clear_caches()
        cls.env.reset()
        cls.cr.rollback()
        cls.cr.close()


savepoint_seq = itertools.count()
class SavepointCase(SingleTransactionCase):
    """ Similar to :class:`SingleTransactionCase` in that all test methods
    are run in a single transaction *but* each test case is run inside a
    rollbacked savepoint (sub-transaction).

    Useful for test cases containing fast tests but with significant database
    setup common to all cases (complex in-db test data): :meth:`~.setUpClass`
    can be used to generate db test data once, then all test cases use the
    same data without influencing one another but without having to recreate
    the test data either.
    """
    def setUp(self):
        self._savepoint_id = next(savepoint_seq)
        self.cr.execute('SAVEPOINT test_%d' % self._savepoint_id)
    def tearDown(self):
 def __init__(self, uvsets_count):
     self.uvsets_count = uvsets_count
     self.uv_indices = itertools.count()
Пример #57
0
    return action, info


if __name__ == '__main__':
    env = gym.vector.make('CartPole-v0', num_envs=1)
    env = envs.Logger(env, interval=1000)
    env = envs.Torch(env)
    env = envs.Runner(env)
    env.seed(SEED)

    policy = ActorCriticNet(env)
    optimizer = optim.Adam(policy.parameters(), lr=1e-2)
    running_reward = 10.0
    get_action = lambda state: get_action_value(state, policy)

    for episode in count(1):
        # We use the Runner collector, but could've written our own
        replay = env.run(get_action, episodes=1)

        # Update policy
        update(replay, optimizer)

        # Compute termination criterion
        running_reward = running_reward * 0.99 + len(replay) * 0.01
        if episode % 10 == 0:
            # Should start with 10.41, 12.21, 14.60, then 100:71.30, 200:135.74
            print(episode, running_reward)
        if running_reward > 190.0:
            print('Solved! Running reward now {} and '
                  'the last episode runs to {} time steps!'.format(
                      running_reward, len(replay)))
Пример #58
0
def isPrime(n):
    return n > 1 and all(n%i for i in islice(count(2), int(sqrt(n)-1)))
Пример #59
0
class Seq_Generator():
    newid = itertools.count().next
    def __init__(self):
        self.id = Seq_Generator.newid()
Пример #60
0
    def append_table(self, filename):
        if self.cross:
            print 'ERROR: append_table not supported for type cross!'
            sys.exit(1)

        # append columns from another table (read it from disk) to this
        # it is assumed that a row exists for each subject.tp in this table
        print 'Parsing the qdec table: ' + filename
        statstable = LongQdecTable()
        statstable.parse(filename,
                         False)  #don't warn about being cross sectional table
        #print statstable.variables

        self.variables = list(
            itertools.chain(*[self.variables, statstable.variables]))
        first = True
        crossnames = True
        #iterate through current table
        for subjectid, tplist in self.subjects_tp_map.items():

            if not statstable.cross:
                print 'statstable is not corss (= long)\n'
                # table to append is in long format
                #  check if subject is here
                if subjectid not in statstable.subjects_tp_map:
                    print 'ERROR: did not find ' + subjectid + ' in table ' + filename + '!'
                    sys.exit(1)

                # get that data
                addtplist = statstable.subjects_tp_map[subjectid]

                # check if all time points are in same order
                for i, tpdata, addtpdata in itertools.izip(
                        itertools.count(), tplist, addtplist):
                    if tpdata[0] != addtpdata[0]:
                        print 'ERROR: time point id' + tpdata[
                            0] + ' not found in other table!'
                        sys.exit(1)
                    # append all columns (except the id)
                    self.subjects_tp_map[subjectid][i] = list(
                        itertools.chain(*[
                            self.subjects_tp_map[subjectid][i], addtplist[i]
                            [1:]
                        ]))
            else:
                # if saved in cross format
                for i, tpdata in itertools.izip(itertools.count(), tplist):
                    #determin if fsid is cross or long (only once)
                    if first:
                        first = False
                        tpid = tpdata[0]
                        if tpid in statstable.subjects_tp_map:
                            crossnames = True
                        elif tpid + '.long.' + subjectid in statstable.subjects_tp_map:
                            crossnames = False
                        else:
                            print 'ERROR: time point id' + tpid + ' not found in other table!'
                            sys.exit(1)
                    # get the name
                    tpid = tpdata[0]
                    if not crossnames:
                        tpid = tpdata[0] + '.long.' + subjectid
                    # get the data
                    addtplist = statstable.subjects_tp_map[tpid]
                    # append all columns (except the id)
                    self.subjects_tp_map[subjectid][i] = list(
                        itertools.chain(*[
                            self.subjects_tp_map[subjectid][i], addtplist[0]
                            [1:]
                        ]))