示例#1
0
    def __init__(
        self,
        message,
        validator=_unset,
        path=(),
        cause=None,
        context=(),
        validator_value=_unset,
        instance=_unset,
        schema=_unset,
        schema_path=(),
        parent=None,
    ):
        self.message = message
        self.path = self.relative_path = deque(path)
        self.schema_path = self.relative_schema_path = deque(schema_path)
        self.context = list(context)
        self.cause = self.__cause__ = cause
        self.validator = validator
        self.validator_value = validator_value
        self.instance = instance
        self.schema = schema
        self.parent = parent

        for error in context:
            error.parent = self
示例#2
0
    def __init__(self, host='127.0.0.1', port=9042, authenticator=None,
                 ssl_options=None, sockopts=None, compression=True,
                 cql_version=None, protocol_version=MAX_SUPPORTED_VERSION, is_control_connection=False,
                 user_type_map=None, connect_timeout=None):
        self.host = host
        self.port = port
        self.authenticator = authenticator
        self.ssl_options = ssl_options
        self.sockopts = sockopts
        self.compression = compression
        self.cql_version = cql_version
        self.protocol_version = protocol_version
        self.is_control_connection = is_control_connection
        self.user_type_map = user_type_map
        self.connect_timeout = connect_timeout
        self._push_watchers = defaultdict(set)
        self._requests = {}
        self._iobuf = io.BytesIO()

        if protocol_version >= 3:
            self.max_request_id = (2 ** 15) - 1
            # Don't fill the deque with 2**15 items right away. Start with 300 and add
            # more if needed.
            self.request_ids = deque(range(300))
            self.highest_request_id = 299
        else:
            self.max_request_id = (2 ** 7) - 1
            self.request_ids = deque(range(self.max_request_id + 1))
            self.highest_request_id = self.max_request_id

        self.lock = RLock()
        self.connected_event = Event()
示例#3
0
    def test_imul(self):
        for n in (-10, -1, 0, 1, 2, 10, 1000):
            d = deque()
            d *= n
            self.assertEqual(d, deque())
            self.assertIsNone(d.maxlen)

        for n in (-10, -1, 0, 1, 2, 10, 1000):
            d = deque('a')
            d *= n
            self.assertEqual(d, deque('a' * n))
            self.assertIsNone(d.maxlen)

        for n in (-10, -1, 0, 1, 2, 10, 499, 500, 501, 1000):
            d = deque('a', 500)
            d *= n
            self.assertEqual(d, deque('a' * min(n, 500)))
            self.assertEqual(d.maxlen, 500)

        for n in (-10, -1, 0, 1, 2, 10, 1000):
            d = deque('abcdef')
            d *= n
            self.assertEqual(d, deque('abcdef' * n))
            self.assertIsNone(d.maxlen)

        for n in (-10, -1, 0, 1, 2, 10, 499, 500, 501, 1000):
            d = deque('abcdef', 500)
            d *= n
            self.assertEqual(d, deque(('abcdef' * n)[-500:]))
            self.assertEqual(d.maxlen, 500)
示例#4
0
def test_hash_methods():
    # Check that hashing instance methods works
    a = io.StringIO(unicode('a'))
    assert hash(a.flush) == hash(a.flush)
    a1 = collections.deque(range(10))
    a2 = collections.deque(range(9))
    assert hash(a1.extend) != hash(a2.extend)
 def levelOrderBottom(self, root):
     """
     :type root: TreeNode
     :rtype: List[List[int]]
     """
     if not root:
         return []
     
     result = []
     
     temp = deque([root])
     next_temp = deque()
     _result = []
     while 1:
         if temp:
             node = temp.popleft()
             _result.append(node.val)
             if node.left:
                 next_temp.append(node.left)
                 
             if node.right:
                 next_temp.append(node.right)
         else:
             result.append(_result)
             _result = []
             temp = next_temp
             next_temp = deque()
         
         if not temp and not next_temp:
             if _result:
                 result.append(_result)
             return result[::-1]
示例#6
0
 def assign_target(self, target_db):
     for ((tbl_schema, tbl_name), tbl) in self.tables.items():
         tbl._random_row_gen_fn = types.MethodType(_random_row_gen_fn, tbl)
         tbl.random_rows = tbl._random_row_gen_fn()
         tbl.next_row = types.MethodType(_next_row, tbl)
         target = target_db.tables[(tbl_schema, tbl_name)]
         target.requested = deque()
         target.required = deque()
         target.pending = dict()
         target.done = set()
         target.fetch_all = False
         if _table_matches_any_pattern(tbl.schema, tbl.name, self.args.full_tables):
             target.n_rows_desired = tbl.n_rows
             target.fetch_all = True
         else:
             if tbl.n_rows:
                 if self.args.logarithmic:
                     target.n_rows_desired = int(math.pow(10, math.log10(tbl.n_rows) * self.args.fraction)) or 1
                 else:
                     target.n_rows_desired = int(tbl.n_rows * self.args.fraction) or 1
             else:
                 target.n_rows_desired = 0
         target.source = tbl
         tbl.target = target
         target.completeness_score = types.MethodType(_completeness_score, target)
         logging.debug("assigned methods to %s" % target.name)
示例#7
0
    def test_count(self):
        for s in ('', 'abracadabra', 'simsalabim'*500+'abc'):
            s = list(s)
            d = deque(s)
            for letter in 'abcdefghijklmnopqrstuvwxyz':
                self.assertEqual(s.count(letter), d.count(letter), (s, d, letter))
        self.assertRaises(TypeError, d.count)       # too few args
        self.assertRaises(TypeError, d.count, 1, 2) # too many args
        class BadCompare:
            def __eq__(self, other):
                raise ArithmeticError
        d = deque([1, 2, BadCompare(), 3])
        self.assertRaises(ArithmeticError, d.count, 2)
        d = deque([1, 2, 3])
        self.assertRaises(ArithmeticError, d.count, BadCompare())
        class MutatingCompare:
            def __eq__(self, other):
                self.d.pop()
                return True
        m = MutatingCompare()
        d = deque([1, 2, 3, m, 4, 5])
        m.d = d
        self.assertRaises(RuntimeError, d.count, 3)

        # test issue11004
        # block advance failed after rotation aligned elements on right side of block
        d = deque([None]*16)
        for i in range(len(d)):
            d.rotate(-1)
        d.rotate(1)
        self.assertEqual(d.count(1), 0)
        self.assertEqual(d.count(None), 16)
示例#8
0
文件: seqs.py 项目: Suor/funcy
def ilen(seq):
    """Consumes an iterable not reading it into memory
       and returns the number of items."""
    # NOTE: implementation borrowed from http://stackoverflow.com/a/15112059/753382
    counter = count()
    deque(zip(seq, counter), maxlen=0)  # (consume at C speed)
    return next(counter)
示例#9
0
 def __init__(self, capacity):
     super().__init__()
     self.html_formatter = None
     if capacity != -1:
         self._data = collections.deque(maxlen=capacity)
     else:
         self._data = collections.deque()
示例#10
0
 def __init__( self, size = None ):
     
     self._lock = threading.Lock()
     
     # {} is just something that python
     # will always return True for when
     # asking if it's greater than the
     # any integer
     self._max = (
         {} if size == None else size
         )
     
     # values placed in the channel
     self._pending = collections.deque()
     
     # blocking writers awaiting writing a value
     # [ ( OnWriteEvent, writtenValue ), ... ]
     self._waiting_writers = collections.deque()
     
     # blocking readers awaiting reading a value
     # [ Selector, ... ]
     # the empty list is a box used to pass the
     # value from the writer to the reader
     self._waiting_readers = collections.deque()
     
     # oft-read and rarely written queues can
     # build up stale selector entries
     # if this is over the max, flush out the
     # stale entries. the check is done each
     # time the queue is called with an
     # external selector ( if select() is never
     # used this is never used either
     self._stale = 0
     self._max_stale = 256
    def split_edge_loop(vert_loop):
        other_loop = deque()
        new_loop = deque()
        for vert in vert_loop:
            #print('OPERATING ON VERT', vert.index)
            edges = selected_edges(vert)
            v_new = bmesh.utils.vert_separate(vert, edges)
            #print('RIPPING vert %d into' %vert.index, [v.index for v in v_new][:], \
            #       'along edges', [e.index for e in edges])
            if not closed:
                if len(v_new) == 2:
                    other_loop.append([v for v in v_new if v != vert][0])
                else:
                    other_loop.append(vert)

            if closed:
                if not new_loop:
                    #print('start_new_loop')
                    new_loop.append(v_new[0])
                    other_loop.append(v_new[1])
                else:
                    neighbours = [e.other_vert(v_new[0]) for e in v_new[0].link_edges]
                    #print('neighbours', [n.index for n in neighbours])
                    for n in neighbours:
                        if n in new_loop and v_new[0] not in new_loop:
                            #print('v_detect')
                            new_loop.append(v_new[0])
                            other_loop.append(v_new[1])
                        if n in other_loop and v_new[0] not in other_loop:
                            #print('v_not_detect')
                            new_loop.append(v_new[1])
                            other_loop.append(v_new[0])

        return other_loop, new_loop
示例#12
0
	def startThread(self):
		"""The response time ot the StreamFrames command is used to establish the network delay."""
		# I dont care about warnings that my markers are not moving
		warnings.simplefilter('ignore', np.RankWarning)
		logging.info("starting client thread")
		self.p = deque() # positions
		self.t = deque() # time
		self.ta = deque() # arrival time
		
		self.sendCommand("SetByteOrder BigEndian")
		self.receive()
		
		# start frames and measure network delay and clock difference
		if self.verbose:
			self.show()
		t0 = self.time()
		self.sendCommand("StreamFrames FrequencyDivisor:1")
		self.receive()
		delay = self.time() - t0
		tDifference = []
		nPackage = 0
		for i in range(6):
			retval = self.receive()
			if retval != 3: # not data
				continue
			nPackage += 1
			tClient = self.time()
			(pp, tServer) = self.parse3D() # position of markers and fp server time
			tDifference.append(tClient-tServer)
			
		logging.info("Timing error estimate: delay: {:f}, variation: {:f} (from {} packages)".format(delay, np.std(tDifference), nPackage))
		self.tDifference = np.mean(tDifference)+delay # add this to server time to get client time

		# main data retrieval loop
		tSync = 0
		nSync = 0
		while not self.stoppingStream:
			retval = self.receive()
			if retval != 3: # not data
				logging.info("not data: {}".format(retval))
				continue
			(pp, tServer) = self.parse3D() # position of markers and fp server time
			self.t.append(tServer + self.tDifference) # push
			self.p.append(pp) # push
			self.ta.append(self.time()) # arrival time
			if len(self.p) > self.nBuffer:
				self.p.popleft()
				self.t.popleft()
				
			# print syncing frequency
			#tNew = self.time()
			#nSync += 1
			#if math.floor(tNew) != tSync:
				#tSync = math.floor(tNew)
				#logging.info("syncing at {} Hz".format(nSync))
				#nSync=0
		
		self.sendCommand("Bye")
		logging.info("stopped")
		self.stoppingStream = False;
示例#13
0
def co_occurrences(words, window_size_before, window_size_after):
    """Yield word co-occurrence.

    :param iter words: the sequence of words.
    :param int window_size_before: window size before the target token.
    :param int window_size_after: window size after the target token.

    """
    words = iter(words)

    target = next(words)
    before = deque([], maxlen=window_size_before)
    after = deque(islice(words, window_size_after))

    while True:
        for context in chain(before, after):
            yield target + context

        before.append(target)

        try:
            after.append(next(words))
        except StopIteration:
            pass

        try:
            target = after.popleft()
        except IndexError:
            break
示例#14
0
def try_pattern(pattern):
    # s = start send, r = start recv, R = wait for a recv, S = wait for a send
    print "in try_pattern"
    assert (pattern.count("s") == pattern.count("S") ==
            pattern.count("r") == pattern.count("R"))
    print "assert passed"
    cs = 0
    cr = 0
    scbs = deque()
    rcbs = deque()
    ch = Channel()

    for c in pattern:
        if c == "s":
            print "starting send"
            scbs.append(ch.send(cs))
            cs += 1
            print "started send"
        elif c == "S":
            print "waiting for send"
            yield scbs.popleft()
            print "done waiting for send"
        elif c == "r":
            print "starting recv"
            rcbs.append(ch.recv())
            print "started recv"
        elif c == "R":
            print "waiting for recv"
            x = yield rcbs.popleft()
            print "done waiting for recv"
            assert x == cr, "%s != %s" % (x, cr)
            print "post-recv assert passed"
            cr += 1
    print "leaving try_pattern"
示例#15
0
    def build(self, configSets, worklog):
        """Does the work described by each configSet, in order, returning nothing"""

        worklog.clear_except_metadata()

        configSets = collections.deque(configSets)
        log.info("Running configSets: %s", ', '.join(configSets))

        while configSets:
            configSetName = configSets.popleft()
            if not configSetName in self._configSets:
                raise NoSuchConfigSetError("Error: no ConfigSet named %s exists" % configSetName)

            worklog.put('configSets', configSets)

            configSet = collections.deque(self._configSets[configSetName])
            log.info("Running configSet %s", configSetName)
            while configSet:
                config = configSet.popleft()

                worklog.put('configs', configSet)

                self.run_config(config, worklog)

        log.info("ConfigSets completed")
        worklog.clear()
        platform_utils.clear_reboot_trigger()
示例#16
0
 def __init__(self, instream = None, infile = None, posix = False):
     if isinstance(instream, basestring):
         instream = StringIO(instream)
     if instream is not None:
         self.instream = instream
         self.infile = infile
     else:
         self.instream = sys.stdin
         self.infile = None
     self.posix = posix
     if posix:
         self.eof = None
     else:
         self.eof = ''
     self.commenters = '#'
     self.wordchars = 'abcdfeghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
     if self.posix:
         self.wordchars += '\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd8\xd9\xda\xdb\xdc\xdd\xde'
     self.whitespace = ' \t\r\n'
     self.whitespace_split = False
     self.quotes = '\'"'
     self.escape = '\\'
     self.escapedquotes = '"'
     self.state = ' '
     self.pushback = deque()
     self.lineno = 1
     self.debug = 0
     self.token = ''
     self.filestack = deque()
     self.source = None
     if self.debug:
         print 'shlex: reading from %s, line %d' % (self.instream, self.lineno)
     return
示例#17
0
def couples(generation):
    men = deque()
    women = deque()

    ps = people(generation)
    while True:
        i, fam = next(ps)
        if childless(generation, i):
            continue
        if sex(generation, i):
            if women and women[0][1] != fam:
                if women[0][2] > 1:
                    women[0][2] -= 1
                    mom = women[0][0]
                else:
                    mom = women.popleft()[0]
                    
                yield mom, i
            else:
                men.append([i,fam,spouses(generation,i)])
        else:
            if men and men[0][1] != fam:
                if men[0][2] > 1:
                    men[0][2] -= 1
                    dad = men[0][0]
                else:
                    dad = men.popleft()[0]
                yield i, dad
            else:
                women.append([i,fam,spouses(generation,i)])
示例#18
0
文件: fm.py 项目: nfnty/ranger
    def __init__(self, ui=None, bookmarks=None, tags=None, paths=None):
        """Initialize FM."""
        Actions.__init__(self)
        SignalDispatcher.__init__(self)
        self.ui = ui if ui is not None else UI()
        self.start_paths = paths if paths is not None else ['.']
        self.directories = dict()
        self.bookmarks = bookmarks
        self.current_tab = 1
        self.tabs = {}
        self.tags = tags
        self.restorable_tabs = deque([], ranger.MAX_RESTORABLE_TABS)
        self.py3 = sys.version_info >= (3, )
        self.previews = {}
        self.default_linemodes = deque()
        self.loader = Loader()
        self.copy_buffer = set()
        self.do_cut = False
        self.metadata = MetadataManager()
        self.image_displayer = None
        self.run = None
        self.rifle = None
        self.thistab = None

        try:
            self.username = pwd.getpwuid(os.geteuid()).pw_name
        except KeyError:
            self.username = '******' + str(os.geteuid())
        self.hostname = socket.gethostname()
        self.home_path = os.path.expanduser('~')

        mimetypes.knownfiles.append(os.path.expanduser('~/.mime.types'))
        mimetypes.knownfiles.append(self.relpath('data/mime.types'))
        self.mimetypes = mimetypes.MimeTypes()
示例#19
0
def ipartition(pred,it):
    """Partition an iterable based on a predicate.

    Returns two iterables, for those with pred False and those True."""
    falses, trues=deque(), deque()
    it=iter(it)
    def get_false():
        while True:
            if falses:
                yield falses.pop(0)
            else:
                while True:
                    val=next(it)
                    if pred(val): trues.append(val)
                    else: break
                yield val
    def get_true():
        while True:
            if trues:
                yield trues.pop(0)
            else:
                while True:
                    val=next(it)
                    if not pred(val): falses.append(val)
                    else: break
                yield val
    return get_false(),get_true()
示例#20
0
def getFitInstructions(functionID):
    fitInstructions = deque([])
    if functionID == 1:
        fitInstructions = deque([
                              { 'dataType': 'askPoint', 'xID': 'X1', 'yID': 'Y1',
                                'messageTitle': 'Step 1', 'messageText': 'Please click on the first point' },
                              { 'dataType': 'askPoint', 'xID': 'X2', 'yID': 'Y2',
                                'messageTitle': 'Step 2', 'messageText': 'Please click on the second point' }
                          ])
    if functionID == 2:
        fitInstructions = deque([
                              { 'dataType': 'askDrag', 'xIDstart': 'X1', 'yIDstart': 'Y1', 'xIDend': 'X2', 'yIDend': 'Y2',
                                'messageTitle': 'Step 1', 'messageText': 'Please drag from the first point to the second point' }
                          ])
    elif functionID == 11 or functionID == 21:
        fitInstructions = deque([
                              { 'dataType': 'askPoint', 'xID': 'backgroundX', 'yID': 'backgroundY',
                                'messageTitle': 'Step 1', 'messageText': 'Please click on the background of the data' },
                              { 'dataType': 'askPoint', 'xID': 'peakX', 'yID': 'peakY',
                                'messageTitle': 'Step 2', 'messageText': 'Please click on the peak of the data' },
                              { 'dataType': 'askPoint', 'xID': 'widthX', 'yID': 'widthY',
                                'messageTitle': 'Step 3', 'messageText': 'Please click on the width of the data' }
                          ])
    elif functionID == 12 or functionID == 22:
        fitInstructions = deque([
                              { 'dataType': 'askPoint', 'xID': 'backgroundX', 'yID': 'backgroundY',
                                'messageTitle': 'Step 1', 'messageText': 'Please click on the background of the data' },
                              { 'dataType': 'askPoint', 'xID': 'peakX', 'yID': 'peakY',
                                'messageTitle': 'Step 2', 'messageText': 'Please click on the peak of the data' },
                              { 'dataType': 'askDrag', 'xIDstart': 'widthYstart', 'yIDstart': 'widthYstart', 'xIDend': 'widthX', 'yIDend': 'widthY',
                                'messageTitle': 'Step 3', 'messageText': 'Please drag on the width of the data' }
                          ])
    return fitInstructions
 def verticalOrder_ver1(self, root):
     """
     :type root: TreeNode
     :rtype: List[List[int]]
     """
     if not root:
         return []
     left = 0
     right = 0
     nodes = deque([(root, 0)])
     ans = deque([[]])
     while nodes:
         root, index = nodes.popleft()
         if root.left:
             nodes.append((root.left, index-1))
         if root.right:
             nodes.append((root.right, index+1))
         if index<-left:
             ans.appendleft([root.val])
             left += 1
         elif index>right:
             ans.append([root.val])
             right += 1
         else:
             ans[left+index].append(root.val)
     return list(ans)
示例#22
0
    def __init__(self, server):
        super(WorldServer, self).__init__()
        import savingsystem #This module doesn't like being imported at modulescope
        self.savingsystem = savingsystem
        if not os.path.lexists(os.path.join(G.game_dir, "world", "players")):
            os.makedirs(os.path.join(G.game_dir, "world", "players"))

        self.sectors = defaultdict(list)
        self.exposed_cache = dict()

        self.urgent_queue = deque()
        self.lazy_queue = deque()
        self.sector_queue = OrderedDict()
        self.generation_queue = deque()
        self.spreading_mutable_blocks = deque()

        self.server_lock = threading.Lock()
        self.server = server

        if os.path.exists(os.path.join(G.game_dir, G.SAVE_FILENAME, "seed")):
            with open(os.path.join(G.game_dir, G.SAVE_FILENAME, "seed"), "rb") as f:
                G.SEED = f.read()
        else:
            if not os.path.exists(os.path.join(G.game_dir, G.SAVE_FILENAME)): os.makedirs(os.path.join(G.game_dir, G.SAVE_FILENAME))
            with open(os.path.join(G.game_dir, G.SAVE_FILENAME, "seed"), "wb") as f:
                f.write(self.generate_seed())

        self.terraingen = terrain.TerrainGeneratorSimple(self, G.SEED)
示例#23
0
	def __init__(self, origin, endLocation, color = (0xAA, 0x55, 0x00), fire=None, impact=None):
		
		self.particals = deque()
		self.maxParticals = 10
		
		self.start = Vector2(origin)
		self.start.center()
		self.pos = Vector2(self.start)
		self.end = Vector2(endLocation)
		self.end.center()
		self.target = Vector2(self.end)
		self.target.center()

		self.color = color
		self.fire = fire
		self.impact = impact
		
		dx = abs(float(origin[0]) - endLocation[0])
		dy = abs(float(origin[1]) - endLocation[1])
		dist = int(hypot(dx, dy))
		if dist is 0:
			dist = 1
		
		self.vx = (float(self.end[0]) - self.start[0]) / (3 * dist)
		self.vy = (float(self.end[1]) - self.start[1]) / (3 * dist)
		
		self.trail = deque()
		
		for i in xrange(10):
			self.particals.append((self.vx * i * 2, self.vy * i * 2))

		if self.fire is not None:
			self.fire()

		self.done = False
    def __init__(self, parent=None, adapter=None, depth_limit=0, padding=0,
                 **kwargs):
        super().__init__(parent)

        # Instance variables
        # The tree adapter parameter will be handled at the end of init
        self.tree_adapter = None
        # The root tree node instance which is calculated inside the class
        self._tree = None
        self._padding = padding

        self.setSizePolicy(QSizePolicy.Expanding,
                           QSizePolicy.Expanding)

        # Necessary settings that need to be set from the outside
        self._depth_limit = depth_limit
        # Provide a nice green default in case no color function is provided
        self.__calc_node_color_func = kwargs.get('node_color_func')
        self.__get_tooltip_func = kwargs.get('tooltip_func')
        self._interactive = kwargs.get('interactive', True)

        self._square_objects = {}
        self._drawn_nodes = deque()
        self._frontier = deque()

        # If a tree adapter was passed, set and draw the tree
        if adapter is not None:
            self.set_tree(adapter)
    def __init__(self, connection_manager=None):
        super(GearmanAdminClientCommandHandler, self).__init__(connection_manager=connection_manager)
        self._sent_commands = collections.deque()
        self._recv_responses = collections.deque()

        self._status_response = []
        self._workers_response = []
示例#26
0
    def parse_options(self, args):
        """ Parse command line options """

        self.parser = parser = OptionParserExtended(option_class=SosOption)
        parser.add_option("-l", "--list-plugins", action="store_true",
                             dest="listPlugins", default=False,
                             help="list plugins and available plugin options")
        parser.add_option("-n", "--skip-plugins", action="extend",
                             dest="noplugins", type="string",
                             help="disable these plugins", default = deque())
        parser.add_option("-e", "--enable-plugins", action="extend",
                             dest="enableplugins", type="string",
                             help="enable these plugins", default = deque())
        parser.add_option("-o", "--only-plugins", action="extend",
                             dest="onlyplugins", type="string",
                             help="enable these plugins only", default = deque())
        parser.add_option("-k", "--plugin-option", action="append",
                             dest="plugopts", type="string",
                             help="plugin options in plugname.option=value format (see -l)")
        parser.add_option("-a", "--alloptions", action="store_true",
                             dest="usealloptions", default=False,
                             help="enable all options for loaded plugins")
        parser.add_option("-u", "--upload", action="store",
                             dest="upload", default=False,
                             help="upload the report to an ftp server")
        parser.add_option("--batch", action="store_true",
                             dest="batch", default=False,
                             help="batch mode - do not prompt interactively")
        parser.add_option("-v", "--verbose", action="count",
                             dest="verbosity",
                             help="increase verbosity")
        parser.add_option("", "--quiet", action="store_true",
                             dest="quiet", default=False,
                             help="only print fatal errors")
        parser.add_option("--debug", action="count",
                             dest="debug",
                             help="enable interactive debugging using the python debugger")
        parser.add_option("--ticket-number", action="store",
                             dest="ticketNumber",
                             help="specify ticket number")
        parser.add_option("--name", action="store",
                             dest="customerName",
                             help="specify report name")
        parser.add_option("--config-file", action="store",
                             dest="config_file",
                             help="specify alternate configuration file")
        parser.add_option("--tmp-dir", action="store",
                             dest="tmp_dir",
                             help="specify alternate temporary directory", default=tempfile.gettempdir())
        parser.add_option("--report", action="store_true",
                             dest="report",
                             help="Enable HTML/XML reporting", default=False)
        parser.add_option("--profile", action="store_true",
                             dest="profiler",
                             help="turn on profiling", default=False)
        parser.add_option("-z", "--compression-type", dest="compression_type",
                            help="compression technology to use [auto, zip, gzip, bzip2, xz] (default=auto)",
                            default="auto")

        return parser.parse_args(args)
示例#27
0
def merge_posting (line1, line2):
  # don't forget to return the resulting line at the end
  ans = [line1[0]]
  posting1 = deque(line1[1:])
  posting2 = deque(line2[1:])
  pp1 = popLeftOrNone(posting1)
  pp2 = popLeftOrNone(posting2)
  while pp1 is not None and pp2 is not None:
    if pp1 == pp2:
      ans.append(pp1)
      pp1 = popLeftOrNone(posting1)
      pp2 = popLeftOrNone(posting2)
    elif pp1 < pp2:
      ans.append(pp1)
      pp1 = popLeftOrNone(posting1)
    else:
      ans.append(pp2)
      pp2 = popLeftOrNone(posting2)
  while pp1 is not None:
    ans.append(pp1)
    pp1 = popLeftOrNone(posting1)
  while pp2 is not None:
    ans.append(pp2)
    pp2 = popLeftOrNone(posting2)
  return ans
示例#28
0
文件: utils.py 项目: snth/zipline
    def __init__(self, market_aware=True, days=None, delta=None):

        self.market_aware = market_aware
        self.days = days
        self.delta = delta

        self.ticks = deque()

        # Market-aware mode only works with full-day windows.
        if self.market_aware:
            assert self.days and self.delta is None, \
                "Market-aware mode only works with full-day windows."
            self.all_holidays = deque(non_trading_days)
            self.cur_holidays = deque()

        # Non-market-aware mode requires a timedelta.
        else:
            assert self.delta and not self.days, \
                "Non-market-aware mode requires a timedelta."

        # Set the behavior for dropping events from the back of the
        # event window.
        if self.market_aware:
            self.drop_condition = self.out_of_market_window
        else:
            self.drop_condition = self.out_of_delta
示例#29
0
def listProximityEvents(intervals1, intervals2):
   if len(intervals1) == 0 or len(intervals2) == 0:
      print("Found an empty interval list?")
      return []

   D1, D2 = deque(intervals1), deque(intervals2)
   events = deque()

   print('Processing new pairs of intervals')
   dateInterval1, towerId1 = D1.popleft()
   dateInterval2, towerId2 = D2.popleft()
   while len(D1) > 0 and len(D2) > 0:
      if dateInterval2[0] >= dateInterval1[1]:
         dateInterval1, towerId1 = D1.popleft()
      elif dateInterval1[0] >= dateInterval2[1]:
         dateInterval2, towerId2 = D2.popleft()
      else:
         if towerId1 == towerId2:
            theOverlap = dateIntervalOverlap(dateInterval1, dateInterval2)
            if (theOverlap[1] - theOverlap[0]).total_seconds() > 1:
               events.append((theOverlap, towerId1))
               #print('Found a match! %s, %s at tower %s' % (theOverlap[0], theOverlap[1], towerId1))

         if dateInterval1[0] < dateInterval2[0]:
            dateInterval1, towerId1 = D1.popleft()
         else:
            dateInterval2, towerId2 = D2.popleft()

   return events
示例#30
0
 def __init__(self):
     self.queueLock = threading.RLock()  # Protects inputQueue, outputQueue, and rawMessageQueue
     self.inputQueue = collections.deque()
     self.outputQueue = collections.deque()
     self.rawMessageQueue = collections.deque()
     self.cmdQueue = collections.deque()
     self.messagesDropped = 0
示例#31
0
import collections

while True:
    n = int(input("Введите количество компаний: "))
    break

#вызывает функцию для предоставления отсутствующих значений
companies = collections.defaultdict() #Возвращает новый объект, подобный словарю
profit_col = collections.deque() #контейнер в виде списка, обобщение стеков и очередей
unprofit_col = collections.deque()
all_profit = 0
quarter = 4

for i in range(n):
    name = input(f'\nВведите название {i + 1}й компании: ')
    profit = 0
    q = 1
    while q <= quarter:
        profit += float(input(f'Введите прибыль за {q}й квартал: '))
        q += 1
    companies[name] = profit
    all_profit += profit

midlle_profit = all_profit / n
for i, item in companies.items():
    if item >= midlle_profit:
        profit_col.append(i)
    else:
        unprofit_col.append(i)

print(f'Общая средняя прибыль: {midlle_profit}')
示例#32
0
def triggerOnset(charfct, thres1, thres2, max_len=9e99, max_len_delete=False):
    """
    Calculate trigger on and off times.

    Given thres1 and thres2 calculate trigger on and off times from
    characteristic function.

    This method is written in pure Python and gets slow as soon as there
    are more then 1e6 triggerings ("on" AND "off") in charfct --- normally
    this does not happen.

    :type charfct: NumPy :class:`~numpy.ndarray`
    :param charfct: Characteristic function of e.g. STA/LTA trigger
    :type thres1: float
    :param thres1: Value above which trigger (of characteristic function)
                   is activated (higher threshold)
    :type thres2: float
    :param thres2: Value below which trigger (of characteristic function)
        is deactivated (lower threshold)
    :type max_len: int
    :param max_len: Maximum length of triggered event in samples. A new
                    event will be triggered as soon as the signal reaches
                    again above thres1.
    :type max_len_delete: bool
    :param max_len_delete: Do not write events longer than max_len into
                           report file.
    :rtype: List
    :return: Nested List of trigger on and of times in samples
    """
    # 1) find indices of samples greater than threshold
    # 2) calculate trigger "of" times by the gap in trigger indices
    #    above the threshold i.e. the difference of two following indices
    #    in ind is greater than 1
    # 3) in principle the same as for "of" just add one to the index to get
    #    start times, this operation is not supported on the compact
    #    syntax
    # 4) as long as there is a on time greater than the actual of time find
    #    trigger on states which are greater than last of state an the
    #    corresponding of state which is greater than current on state
    # 5) if the signal stays above thres2 longer than max_len an event
    #    is triggered and following a new event can be triggered as soon as
    #    the signal is above thres1
    ind1 = np.where(charfct > thres1)[0]
    if len(ind1) == 0:
        return []
    ind2 = np.where(charfct > thres2)[0]
    #
    on = deque([ind1[0]])
    of = deque([-1])
    of.extend(ind2[np.diff(ind2) > 1].tolist())
    on.extend(ind1[np.where(np.diff(ind1) > 1)[0] + 1].tolist())
    # include last pick if trigger is on or drop it
    if max_len_delete:
        # drop it
        of.extend([1e99])
        on.extend([on[-1]])
    else:
        # include it
        of.extend([ind2[-1]])
    #
    pick = []
    while on[-1] > of[0]:
        while on[0] <= of[0]:
            on.popleft()
        while of[0] < on[0]:
            of.popleft()
        if of[0] - on[0] > max_len:
            if max_len_delete:
                on.popleft()
                continue
            of.appendleft(on[0] + max_len)
        pick.append([on[0], of[0]])
    return np.array(pick, dtype=np.int64)
def split_large_components(list_comp_graph, k,large_threshold = 0):


    list_comp = list_graph_to_list_comp(list_comp_graph)
    set_all_genes_before = set([n for comp in list_comp for n in comp])

    threshold_small_comp = k


    gene_set = set()
    for i in range(len(list_comp)):
        gene_set = gene_set.union(set(list_comp[i]))
    #print "gene_set", gene_set

    max_out_degree_all = 0
    #finding large graph threshold, take the max of max-out-degree of each component
    for comp in list_comp_graph:
         max_out_degree_pre, largest_node_id_pre, largest_gene_id_pre = find_max_outdegree(comp)
         print ("maxoutdegree", max_out_degree_pre)
         if max_out_degree_all < max_out_degree_pre:
               max_out_degree_all = max_out_degree_pre


    if large_threshold == 0:
        threshold_large_comp = max_out_degree_all
    else:
        threshold_large_comp = large_threshold


    list_graph_leftover = []
    list_large_components = []
    for comp in list_comp_graph:
        if len(comp)>= threshold_large_comp:
            list_large_components.append(comp)
            print ("\nanother large component added\n")
        else:
            list_graph_leftover.append(comp)

    #going through all large components
    all_modified_component_list = []
    print ("\nlenght of large comps\n",len(list_large_components))
    for lc in list_large_components:
        main_comp_list = []
        small_comp_list = []
        #large_graph_queue
        large_comp_queue = deque()
        large_comp_queue.append(lc)
        while len(large_comp_queue) >0:
            print ("\nnew element in large queue\n")
            largest_comp = large_comp_queue.popleft()
            print ("len", len(largest_comp))
            max_out_degree, largest_node_id, largest_gene_id = find_max_outdegree(largest_comp)

            reduced_comps = largest_comp.copy()

            removable_nodes_list = star_construction(largest_comp,largest_node_id)
            print ("\nremovable nodes list", removable_nodes_list)

            print ("\nnodes before removal", len(reduced_comps))
            print ("edges before removal", len(reduced_comps.edges))

            #reduced comps -> largest graph - large gene and neighbors
            reduced_comps.remove_nodes_from(removable_nodes_list)

            print ("\nnodes after removal", len(reduced_comps))
            print ("edges after removal", len(reduced_comps.edges))

            #adding to LOM or SCL
            #checking if star construction is less than k
            largest_gene_graph = largest_comp.subgraph(removable_nodes_list).copy()
            if len(largest_gene_graph) < threshold_small_comp:
                small_comp_list.append(largest_gene_graph)
            else:
                main_comp_list.append(largest_gene_graph)

            scc = nx.strongly_connected_components(reduced_comps)

            for comp in scc:
                print ("comp", comp)
                if len(comp)> threshold_large_comp:
                    g = reduced_comps.subgraph(comp).copy()
                    large_comp_queue.append(g)
                    print ("\nlarge_comp_queue")
                elif len(comp)< threshold_small_comp:
                    g = reduced_comps.subgraph(comp).copy()
                    small_comp_list.append(g)
                    print ("\nsmall")
                else:
                    g = reduced_comps.subgraph(comp).copy()
                    main_comp_list.append(g)
                    print ("\nLOM")

        print ("len list main b4", len(main_comp_list ))

        temp_list = []
        for scm in range(len(main_comp_list)):

            if len(main_comp_list[scm]) < threshold_small_comp:
                small_comp_list.append(main_comp_list[scm])

            else:
                temp_list.append(main_comp_list[scm])

        main_comp_list = temp_list[:]
        print ("len list main aftr", len(main_comp_list))

        for scomp in range(len(small_comp_list)):
            max_comp_score = 0
            max_comp_index = 0
            for comp_index in  range(len(main_comp_list)):
                comp_score = 0
                for gene_m in main_comp_list[comp_index]:

                    for gene_s in small_comp_list[scomp]:

                        if (gene_s, gene_m) in lc.edges:
                            comp_score +=1
                        if (gene_m, gene_s) in lc.edges:
                            comp_score += 1

                if comp_score > max_comp_score:
                    max_comp_score = comp_score
                    max_comp_index = comp_index

            print ("\ncomponent", small_comp_list[scomp].nodes, "maxcomp score+index", max_comp_score, max_comp_index)
            print ("list of comps nodes before", len (main_comp_list[max_comp_index]),main_comp_list[max_comp_index].nodes)
            main_comp_list[max_comp_index].add_nodes_from(small_comp_list[scomp])

            temp_subgraph_nodes = main_comp_list[max_comp_index].nodes
            print ("tempsubg", temp_subgraph_nodes)
            # also add the edges
            main_comp_list[max_comp_index] = lc.subgraph(temp_subgraph_nodes).copy()
            print( "list of comps nodes after", len(main_comp_list[max_comp_index]), main_comp_list[max_comp_index].nodes)

        all_modified_component_list.extend(main_comp_list[:])

    for x in all_modified_component_list:
         print (x.nodes)

    set_all_genes_after = set([n for comp in list_graph_leftover[:] + all_modified_component_list[:] for n in comp.nodes])

    print('Set before: ', set_all_genes_before )
    print('Set after: ', set_all_genes_after )

    assert set_all_genes_after == set_all_genes_before
    return list_graph_leftover[:] + all_modified_component_list[:]
示例#34
0
    clipped_error = tf.clip_by_value(error, 0.0, 1.0)
    linear_error = 2 * (error - clipped_error)
    loss = tf.reduce_mean(tf.square(clipped_error) + linear_error)

    global_step = tf.Variable(0, trainable=False, name='global_step')
    optimizer = tf.train.MomentumOptimizer(learning_rate,
                                           momentum,
                                           use_nesterov=True)
    training_op = optimizer.minimize(loss, global_step=global_step)

init = tf.global_variables_initializer()
saver = tf.train.Saver()

# Let's implement a simple replay memory
replay_memory_size = 20000
replay_memory = deque([], maxlen=replay_memory_size)


def sample_memories(batch_size):
    indices = np.random.permutation(len(replay_memory))[:batch_size]
    cols = [[], [], [], [], []]  # state, action, reward, next_state, continue
    for idx in indices:
        memory = replay_memory[idx]
        for col, value in zip(cols, memory):
            col.append(value)
    cols = [np.array(col) for col in cols]
    return (cols[0], cols[1], cols[2].reshape(-1, 1), cols[3],
            cols[4].reshape(-1, 1))


# And on to the epsilon-greedy policy with decaying epsilon
示例#35
0
            next_states = np.vstack(
                [e.next_state for e in experiences if e is not None])
            dones = np.vstack([e.done for e in experiences if e is not None])

            ret = (states, actions, rewards, next_states, dones)
        return ret

    def async_save_step(self, state, action, reward, next_state, done):
        e = self.experience(state, action, reward, next_state, done)
        self.memory.append(e)


if __name__ == '__main__':
    print("test  deque dropout when maxlen is reached")
    deque_size = 5
    test_memory = deque(maxlen=deque_size)
    for i in range(deque_size + 5):
        test_memory.append(i)
        print(test_memory)
    experiences = random.sample(test_memory, k=int(deque_size / 2))
    print(experiences)
    print(test_memory.maxlen)
    print((1, 2))

    class TestReplayMemory(ReplayMemory):
        """Fixed-size buffer to store experience tuples."""
        def __init__(self, id, batch_size, buffer_size, seed):
            """Initialize a ReplayBuffer object.
            Params
            ======
                buffer_size (int): maximum size of buffer
示例#36
0
def test_get_valid_user_input(monkeypatch, initiator):
    monkeypatch.setattr('builtins.input', generate_multiple_inputs(deque(['InvalidInput', '100', '1'])))
    user_choice = initiator.get_valid_user_input(INTEGRATION_CATEGORIES, 'Choose category')
    assert user_choice == INTEGRATION_CATEGORIES[0]
 def __init__(self,
              trading_pairs: Optional[List[str]] = None,
              domain: str = "com"):
     super().__init__(
         data_source=BinanceAPIOrderBookDataSource(trading_pairs=trading_pairs, domain=domain),
         trading_pairs=trading_pairs,
         domain=domain
     )
     self._order_book_diff_stream: asyncio.Queue = asyncio.Queue()
     self._order_book_snapshot_stream: asyncio.Queue = asyncio.Queue()
     self._ev_loop: asyncio.BaseEventLoop = asyncio.get_event_loop()
     self._domain = domain
     self._saved_message_queues: Dict[str, Deque[OrderBookMessage]] = defaultdict(lambda: deque(maxlen=1000))
示例#38
0
文件: ddpg.py 项目: zhongjieGDUT/hcp
    def train(self):
        self.net_mode(train=True)
        tfirststart = time.time()
        epoch_episode_rewards = deque(maxlen=1)
        epoch_episode_steps = deque(maxlen=1)
        total_rollout_steps = 0
        for epoch in range(self.global_step, self.num_iters):
            episode_reward = 0
            episode_step = 0
            self.action_noise.reset()
            obs = self.env.reset()
            obs = obs[0]
            epoch_actor_losses = []
            epoch_critic_losses = []
            if self.use_her:
                ep_experi = {
                    'obs': [],
                    'act': [],
                    'reward': [],
                    'new_obs': [],
                    'ach_goals': [],
                    'done': []
                }
            for t_rollout in range(self.rollout_steps):
                total_rollout_steps += 1
                ran = np.random.random(1)[0]
                if self.pretrain_dir is None and epoch < self.warmup_iter or \
                        ran < self.random_prob:
                    act = self.random_action().flatten()
                else:
                    act = self.policy(obs).flatten()
                new_obs, r, done, info = self.env.step(act)
                ach_goals = new_obs[1].copy()
                new_obs = new_obs[0].copy()
                episode_reward += r
                episode_step += 1
                self.memory.append(obs, act, r * self.reward_scale, new_obs,
                                   ach_goals, done)
                if self.use_her:
                    ep_experi['obs'].append(obs)
                    ep_experi['act'].append(act)
                    ep_experi['reward'].append(r * self.reward_scale)
                    ep_experi['new_obs'].append(new_obs)
                    ep_experi['ach_goals'].append(ach_goals)
                    ep_experi['done'].append(done)
                if self.ob_norm:
                    self.obs_oms.update(new_obs)
                obs = new_obs
            epoch_episode_rewards.append(episode_reward)
            epoch_episode_steps.append(episode_step)
            if self.use_her:
                for t in range(episode_step - self.k_future):
                    ob = ep_experi['obs'][t]
                    act = ep_experi['act'][t]
                    new_ob = ep_experi['new_obs'][t]
                    ach_goal = ep_experi['ach_goals'][t]
                    k_futures = np.random.choice(np.arange(
                        t + 1, episode_step),
                                                 self.k_future - 1,
                                                 replace=False)
                    k_futures = np.concatenate((np.array([t]), k_futures))
                    for future in k_futures:
                        new_goal = ep_experi['ach_goals'][future]
                        her_ob = np.concatenate(
                            (ob[:-self.goal_dim], new_goal), axis=0)
                        her_new_ob = np.concatenate(
                            (new_ob[:-self.goal_dim], new_goal), axis=0)
                        res = self.env.cal_reward(ach_goal.copy(), new_goal,
                                                  act)
                        her_reward, _, done = res
                        self.memory.append(her_ob, act,
                                           her_reward * self.reward_scale,
                                           her_new_ob, ach_goal.copy(), done)
            self.global_step += 1
            if epoch >= self.warmup_iter:
                for t_train in range(self.train_steps):
                    act_loss, cri_loss = self.train_net()
                    epoch_critic_losses.append(cri_loss)
                    epoch_actor_losses.append(act_loss)

            if epoch % self.log_interval == 0:
                tnow = time.time()
                stats = {}
                if self.ob_norm:
                    stats['ob_oms_mean'] = safemean(self.obs_oms.mean.numpy())
                    stats['ob_oms_std'] = safemean(self.obs_oms.std.numpy())
                stats['total_rollout_steps'] = total_rollout_steps
                stats['rollout/return'] = safemean(
                    [rew for rew in epoch_episode_rewards])
                stats['rollout/ep_steps'] = safemean(
                    [l for l in epoch_episode_steps])
                if epoch >= self.warmup_iter:
                    stats['actor_loss'] = np.mean(epoch_actor_losses)
                    stats['critic_loss'] = np.mean(epoch_critic_losses)
                stats['epoch'] = epoch
                stats['actor_lr'] = self.actor_optim.param_groups[0]['lr']
                stats['critic_lr'] = self.critic_optim.param_groups[0]['lr']
                stats['time_elapsed'] = tnow - tfirststart
                for name, value in stats.items():
                    logger.logkv(name, value)
                logger.dumpkvs()
            if (epoch == 0 or epoch >= self.warmup_iter) and \
                    self.save_interval and\
                    epoch % self.save_interval == 0 and \
                    logger.get_dir():
                mean_final_dist, succ_rate = self.rollout()
                logger.logkv('epoch', epoch)
                logger.logkv('test/total_rollout_steps', total_rollout_steps)
                logger.logkv('test/mean_final_dist', mean_final_dist)
                logger.logkv('test/succ_rate', succ_rate)

                tra_mean_dist, tra_succ_rate = self.rollout(train_test=True)
                logger.logkv('train/mean_final_dist', tra_mean_dist)
                logger.logkv('train/succ_rate', tra_succ_rate)

                # self.log_model_weights()
                logger.dumpkvs()
                if mean_final_dist < self.closest_dist:
                    self.closest_dist = mean_final_dist
                    is_best = True
                else:
                    is_best = False
                self.save_model(is_best=is_best, step=self.global_step)
示例#39
0
def test_simple_serialization(ray_start_regular):
    primitive_objects = [
        # Various primitive types.
        0,
        0.0,
        0.9,
        1 << 62,
        1 << 999,
        b"",
        b"a",
        "a",
        string.printable,
        "\u262F",
        u"hello world",
        u"\xff\xfe\x9c\x001\x000\x00",
        None,
        True,
        False,
        [],
        (),
        {},
        type,
        int,
        set(),
        # Collections types.
        collections.Counter([np.random.randint(0, 10) for _ in range(100)]),
        collections.OrderedDict([("hello", 1), ("world", 2)]),
        collections.defaultdict(lambda: 0, [("hello", 1), ("world", 2)]),
        collections.defaultdict(lambda: [], [("hello", 1), ("world", 2)]),
        collections.deque([1, 2, 3, "a", "b", "c", 3.5]),
        # Numpy dtypes.
        np.int8(3),
        np.int32(4),
        np.int64(5),
        np.uint8(3),
        np.uint32(4),
        np.uint64(5),
        np.float32(1.9),
        np.float64(1.9),
    ]

    composite_objects = (
        [[obj]
         for obj in primitive_objects] + [(obj, )
                                          for obj in primitive_objects] + [{
                                              (): obj
                                          } for obj in primitive_objects])

    @ray.remote
    def f(x):
        return x

    # Check that we can pass arguments by value to remote functions and
    # that they are uncorrupted.
    for obj in primitive_objects + composite_objects:
        new_obj_1 = ray.get(f.remote(obj))
        new_obj_2 = ray.get(ray.put(obj))
        assert obj == new_obj_1
        assert obj == new_obj_2
        # TODO(rkn): The numpy dtypes currently come back as regular integers
        # or floats.
        if type(obj).__module__ != "numpy":
            assert type(obj) == type(new_obj_1)
            assert type(obj) == type(new_obj_2)
示例#40
0
def learn(
        make_env,
        make_policy,
        *,
        n_episodes,
        horizon,
        delta,
        gamma,
        max_iters,
        sampler=None,
        use_natural_gradient=False,  #can be 'exact', 'approximate'
        fisher_reg=1e-2,
        iw_method='is',
        iw_norm='none',
        bound='J',
        line_search_type='parabola',
        save_weights=0,
        improvement_tol=0.,
        center_return=False,
        render_after=None,
        max_offline_iters=100,
        callback=None,
        clipping=False,
        entropy='none',
        positive_return=False,
        reward_clustering='none',
        capacity=10):

    np.set_printoptions(precision=3)
    max_samples = horizon * n_episodes

    if line_search_type == 'binary':
        line_search = line_search_binary
    elif line_search_type == 'parabola':
        line_search = line_search_parabola
    else:
        raise ValueError()

    # Building the environment
    env = make_env()
    ob_space = env.observation_space
    ac_space = env.action_space

    # Creating the memory buffer
    memory = Memory(capacity=capacity,
                    batch_size=n_episodes,
                    horizon=horizon,
                    ob_space=ob_space,
                    ac_space=ac_space)

    # Building the target policy and saving its parameters
    pi = make_policy('pi', ob_space, ac_space)
    all_var_list = pi.get_trainable_variables()
    var_list = [
        v for v in all_var_list if v.name.split('/')[1].startswith('pol')
    ]
    shapes = [U.intprod(var.get_shape().as_list()) for var in var_list]
    n_parameters = sum(shapes)

    # Building a set of behavioral policies
    behavioral_policies = memory.build_policies(make_policy, pi)

    # Placeholders
    ob_ = ob = U.get_placeholder_cached(name='ob')
    ac_ = pi.pdtype.sample_placeholder([None], name='ac')
    mask_ = tf.placeholder(dtype=tf.float32, shape=(None), name='mask')
    rew_ = tf.placeholder(dtype=tf.float32, shape=(None), name='rew')
    disc_rew_ = tf.placeholder(dtype=tf.float32, shape=(None), name='disc_rew')
    clustered_rew_ = tf.placeholder(dtype=tf.float32, shape=(None))
    gradient_ = tf.placeholder(dtype=tf.float32,
                               shape=(n_parameters, 1),
                               name='gradient')
    iter_number_ = tf.placeholder(dtype=tf.int32, name='iter_number')
    active_policies = tf.placeholder(dtype=tf.float32,
                                     shape=(capacity),
                                     name='active_policies')
    losses_with_name = []

    # Total number of trajectories
    N_total = tf.reduce_sum(active_policies) * n_episodes

    # Split operations
    disc_rew_split = tf.reshape(disc_rew_ * mask_, [-1, horizon])
    rew_split = tf.reshape(rew_ * mask_, [-1, horizon])
    mask_split = tf.reshape(mask_, [-1, horizon])

    # Policy densities
    target_log_pdf = pi.pd.logp(ac_) * mask_
    target_log_pdf_split = tf.reshape(target_log_pdf, [-1, horizon])
    behavioral_log_pdfs = tf.stack([
        bpi.pd.logp(ac_) * mask_ for bpi in memory.policies
    ])  # Shape is (capacity, ntraj*horizon)
    behavioral_log_pdfs_split = tf.reshape(behavioral_log_pdfs,
                                           [memory.capacity, -1, horizon])

    # Compute renyi divergencies and sum over time, then exponentiate
    emp_d2_split = tf.reshape(
        tf.stack([pi.pd.renyi(bpi.pd, 2) * mask_ for bpi in memory.policies]),
        [memory.capacity, -1, horizon])
    emp_d2_split_cum = tf.exp(tf.reduce_sum(emp_d2_split, axis=2))
    # Compute arithmetic and harmonic mean of emp_d2
    emp_d2_mean = tf.reduce_mean(emp_d2_split_cum, axis=1)
    emp_d2_arithmetic = tf.reduce_sum(
        emp_d2_mean * active_policies) / tf.reduce_sum(active_policies)
    emp_d2_harmonic = tf.reduce_sum(active_policies) / tf.reduce_sum(
        1 / emp_d2_mean)

    # Return processing: clipping, centering, discounting
    ep_return = clustered_rew_  #tf.reduce_sum(mask_split * disc_rew_split, axis=1)
    if clipping:
        rew_split = tf.clip_by_value(rew_split, -1, 1)
    if center_return:
        ep_return = ep_return - tf.reduce_mean(ep_return)
        rew_split = rew_split - (tf.reduce_sum(rew_split) /
                                 (tf.reduce_sum(mask_split) + 1e-24))
    discounter = [pow(gamma, i) for i in range(0, horizon)]  # Decreasing gamma
    discounter_tf = tf.constant(discounter)
    disc_rew_split = rew_split * discounter_tf

    # Reward statistics
    return_mean = tf.reduce_mean(ep_return)
    return_std = U.reduce_std(ep_return)
    return_max = tf.reduce_max(ep_return)
    return_min = tf.reduce_min(ep_return)
    return_abs_max = tf.reduce_max(tf.abs(ep_return))
    return_step_max = tf.reduce_max(tf.abs(rew_split))  # Max step reward
    return_step_mean = tf.abs(tf.reduce_mean(rew_split))
    positive_step_return_max = tf.maximum(0.0, tf.reduce_max(rew_split))
    negative_step_return_max = tf.maximum(0.0, tf.reduce_max(-rew_split))
    return_step_maxmin = tf.abs(positive_step_return_max -
                                negative_step_return_max)
    losses_with_name.extend([(return_mean, 'InitialReturnMean'),
                             (return_max, 'InitialReturnMax'),
                             (return_min, 'InitialReturnMin'),
                             (return_std, 'InitialReturnStd'),
                             (emp_d2_arithmetic, 'EmpiricalD2Arithmetic'),
                             (emp_d2_harmonic, 'EmpiricalD2Harmonic'),
                             (return_step_max, 'ReturnStepMax'),
                             (return_step_maxmin, 'ReturnStepMaxmin')])

    if iw_method == 'is':
        # Sum the log prob over time. Shapes: target(Nep, H), behav (Cap, Nep, H)
        target_log_pdf_episode = tf.reduce_sum(target_log_pdf_split, axis=1)
        behavioral_log_pdf_episode = tf.reduce_sum(behavioral_log_pdfs_split,
                                                   axis=2)
        # To avoid numerical instability, compute the inversed ratio
        log_inverse_ratio = behavioral_log_pdf_episode - target_log_pdf_episode
        abc = tf.exp(log_inverse_ratio) * tf.expand_dims(active_policies, -1)
        iw = 1 / tf.reduce_sum(
            tf.exp(log_inverse_ratio) * tf.expand_dims(active_policies, -1),
            axis=0)
        iwn = iw / n_episodes

        # Compute the J
        w_return_mean = tf.reduce_sum(ep_return * iwn)
        # Empirical D2 of the mixture and relative ESS
        ess_renyi_arithmetic = N_total / emp_d2_arithmetic
        ess_renyi_harmonic = N_total / emp_d2_harmonic
        # Log quantities
        losses_with_name.extend([
            (tf.reduce_max(iw), 'MaxIW'), (tf.reduce_min(iw), 'MinIW'),
            (tf.reduce_mean(iw), 'MeanIW'), (U.reduce_std(iw), 'StdIW'),
            (tf.reduce_min(target_log_pdf_episode), 'MinTargetPdf'),
            (tf.reduce_min(behavioral_log_pdf_episode), 'MinBehavPdf'),
            (ess_renyi_arithmetic, 'ESSRenyiArithmetic'),
            (ess_renyi_harmonic, 'ESSRenyiHarmonic')
        ])
    else:
        raise NotImplementedError()

    if bound == 'J':
        bound_ = w_return_mean
    elif bound == 'max-d2-harmonic':
        bound_ = w_return_mean - tf.sqrt(
            (1 - delta) / (delta * ess_renyi_harmonic)) * return_abs_max
    elif bound == 'max-d2-arithmetic':
        bound_ = w_return_mean - tf.sqrt(
            (1 - delta) / (delta * ess_renyi_arithmetic)) * return_abs_max
    else:
        raise NotImplementedError()

    # Policy entropy for exploration
    ent = pi.pd.entropy()
    meanent = tf.reduce_mean(ent)
    losses_with_name.append((meanent, 'MeanEntropy'))
    # Add policy entropy bonus
    if entropy != 'none':
        scheme, v1, v2 = entropy.split(':')
        if scheme == 'step':
            entcoeff = tf.cond(iter_number_ < int(v2), lambda: float(v1),
                               lambda: float(0.0))
            losses_with_name.append((entcoeff, 'EntropyCoefficient'))
            entbonus = entcoeff * meanent
            bound_ = bound_ + entbonus
        elif scheme == 'lin':
            ip = tf.cast(iter_number_ / max_iters, tf.float32)
            entcoeff_decay = tf.maximum(
                0.0,
                float(v2) + (float(v1) - float(v2)) * (1.0 - ip))
            losses_with_name.append((entcoeff_decay, 'EntropyCoefficient'))
            entbonus = entcoeff_decay * meanent
            bound_ = bound_ + entbonus
        elif scheme == 'exp':
            ent_f = tf.exp(
                -tf.abs(tf.reduce_mean(iw) - 1) * float(v2)) * float(v1)
            losses_with_name.append((ent_f, 'EntropyCoefficient'))
            bound_ = bound_ + ent_f * meanent
        else:
            raise Exception('Unrecognized entropy scheme.')

    losses_with_name.append((w_return_mean, 'ReturnMeanIW'))
    losses_with_name.append((bound_, 'Bound'))
    losses, loss_names = map(list, zip(*losses_with_name))
    '''
    if use_natural_gradient:
        p = tf.placeholder(dtype=tf.float32, shape=[None])
        target_logpdf_episode = tf.reduce_sum(target_log_pdf_split * mask_split, axis=1)
        grad_logprob = U.flatgrad(tf.stop_gradient(iwn) * target_logpdf_episode, var_list)
        dot_product = tf.reduce_sum(grad_logprob * p)
        hess_logprob = U.flatgrad(dot_product, var_list)
        compute_linear_operator = U.function([p, ob_, ac_, disc_rew_, mask_], [-hess_logprob])
    '''

    assert_ops = tf.group(*tf.get_collection('asserts'))
    print_ops = tf.group(*tf.get_collection('prints'))

    compute_lossandgrad = U.function([
        ob_, ac_, rew_, disc_rew_, clustered_rew_, mask_, iter_number_,
        active_policies
    ], losses + [U.flatgrad(bound_, var_list), assert_ops, print_ops])
    compute_grad = U.function([
        ob_, ac_, rew_, disc_rew_, clustered_rew_, mask_, iter_number_,
        active_policies
    ], [U.flatgrad(bound_, var_list), assert_ops, print_ops])
    compute_bound = U.function([
        ob_, ac_, rew_, disc_rew_, clustered_rew_, mask_, iter_number_,
        active_policies
    ], [bound_, assert_ops, print_ops])
    compute_losses = U.function([
        ob_, ac_, rew_, disc_rew_, clustered_rew_, mask_, iter_number_,
        active_policies
    ], losses)
    #compute_temp = U.function([ob_, ac_, rew_, disc_rew_, clustered_rew_, mask_, iter_number_, active_policies], [log_inverse_ratio, abc, iw])

    set_parameter = U.SetFromFlat(var_list)
    get_parameter = U.GetFlat(var_list)

    if sampler is None:
        seg_gen = traj_segment_generator(pi,
                                         env,
                                         n_episodes,
                                         horizon,
                                         stochastic=True)
        sampler = type("SequentialSampler", (object, ), {
            "collect": lambda self, _: seg_gen.__next__()
        })()

    U.initialize()

    # Starting optimizing
    episodes_so_far = 0
    timesteps_so_far = 0
    iters_so_far = 0
    tstart = time.time()
    lenbuffer = deque(maxlen=n_episodes)
    rewbuffer = deque(maxlen=n_episodes)

    while True:

        iters_so_far += 1

        if render_after is not None and iters_so_far % render_after == 0:
            if hasattr(env, 'render'):
                render(env, pi, horizon)

        if callback:
            callback(locals(), globals())

        if iters_so_far >= max_iters:
            print('Finished...')
            break

        logger.log('********** Iteration %i ************' % iters_so_far)

        theta = get_parameter()

        with timed('sampling'):
            seg = sampler.collect(theta)

        add_disc_rew(seg, gamma)

        lens, rets = seg['ep_lens'], seg['ep_rets']
        lenbuffer.extend(lens)
        rewbuffer.extend(rets)
        episodes_so_far += len(lens)
        timesteps_so_far += sum(lens)

        # Adding batch of trajectories to memory
        memory.add_trajectory_batch(seg)

        # Get multiple batches from memory
        seg_with_memory = memory.get_trajectories()

        # Get clustered reward
        reward_matrix = np.reshape(
            seg_with_memory['disc_rew'] * seg_with_memory['mask'],
            (-1, horizon))
        ep_reward = np.sum(reward_matrix, axis=1)
        ep_reward = cluster_rewards(ep_reward, reward_clustering)

        args = ob, ac, rew, disc_rew, clustered_rew, mask, iter_number, active_policies = (
            seg_with_memory['ob'], seg_with_memory['ac'],
            seg_with_memory['rew'], seg_with_memory['disc_rew'], ep_reward,
            seg_with_memory['mask'], iters_so_far,
            memory.get_active_policies_mask())

        def evaluate_loss():
            loss = compute_bound(*args)
            return loss[0]

        def evaluate_gradient():
            gradient = compute_grad(*args)
            return gradient[0]

        if use_natural_gradient:

            def evaluate_fisher_vector_prod(x):
                return compute_linear_operator(x, *args)[0] + fisher_reg * x

            def evaluate_natural_gradient(g):
                return cg(evaluate_fisher_vector_prod,
                          g,
                          cg_iters=10,
                          verbose=0)
        else:
            evaluate_natural_gradient = None

        with timed('summaries before'):
            logger.record_tabular("Iteration", iters_so_far)
            logger.record_tabular("InitialBound", evaluate_loss())
            logger.record_tabular("EpLenMean", np.mean(lenbuffer))
            logger.record_tabular("EpRewMean", np.mean(rewbuffer))
            logger.record_tabular("EpThisIter", len(lens))
            logger.record_tabular("EpisodesSoFar", episodes_so_far)
            logger.record_tabular("TimestepsSoFar", timesteps_so_far)
            logger.record_tabular("TimeElapsed", time.time() - tstart)

        if save_weights > 0 and iters_so_far % save_weights == 0:
            logger.record_tabular('Weights', str(get_parameter()))
            import pickle
            file = open('checkpoint' + str(iters_so_far) + '.pkl', 'wb')
            pickle.dump(theta, file)

        with timed("offline optimization"):
            theta, improvement = optimize_offline(
                theta,
                set_parameter,
                line_search,
                evaluate_loss,
                evaluate_gradient,
                evaluate_natural_gradient,
                max_offline_ite=max_offline_iters)

        set_parameter(theta)

        with timed('summaries after'):
            meanlosses = np.array(compute_losses(*args))
            for (lossname, lossval) in zip(loss_names, meanlosses):
                logger.record_tabular(lossname, lossval)

        logger.dump_tabular()

    env.close()
示例#41
0
文件: DQN.py 项目: shilx001/DQNR
 def __init__(self, max_len=100000):
     self.storage = collections.deque(maxlen=max_len)
from pynput.keyboard import Key, Controller

keyboard = Controller()
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=50, help="max buffer size")
args = vars(ap.parse_args())

# define the lower and upper boundaries of the "green"
# ball in the HSV color space, then initialize the
# list of tracked points
greenLower = (29, 86, 6)
greenUpper = (64, 255, 255)
pts = deque(maxlen=args["buffer"])

# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
    vs = VideoStream(src=0).start()

# otherwise, grab a reference to the video file
else:
    vs = cv2.VideoCapture(args["video"])

# allow the camera or video file to warm up
time.sleep(5.0)

# keep looping
while True:
示例#43
0
def train(env, args, writer):
    # RL Model for Player 1
    p1_current_model = DQN(env, args).to(args.device)
    p1_target_model = DQN(env, args).to(args.device)
    update_target(p1_current_model, p1_target_model)

    # RL Model for Player 2
    p2_current_model = DQN(env, args).to(args.device)
    p2_target_model = DQN(env, args).to(args.device)
    update_target(p2_current_model, p2_target_model)

    # SL Model for Player 1, 2
    p1_policy = Policy(env).to(args.device)
    p2_policy = Policy(env).to(args.device)

    if args.load_model and os.path.isfile(args.load_model):
        load_model(models={
            "p1": p1_current_model,
            "p2": p2_current_model
        },
                   policies={
                       "p1": p1_policy,
                       "p2": p2_policy
                   },
                   args=args)

    epsilon_by_frame = epsilon_scheduler(args.eps_start, args.eps_final,
                                         args.eps_decay)

    # Replay Buffer for Reinforcement Learning - Best Response
    p1_replay_buffer = ReplayBuffer(args.buffer_size)
    p2_replay_buffer = ReplayBuffer(args.buffer_size)

    # Reservoir Buffer for Supervised Learning - Average Strategy
    # TODO(Aiden): How to set buffer size of SL?
    p1_reservoir_buffer = ReservoirBuffer(args.buffer_size)
    p2_reservoir_buffer = ReservoirBuffer(args.buffer_size)

    # Deque data structure for multi-step learning
    p1_state_deque = deque(maxlen=args.multi_step)
    p1_reward_deque = deque(maxlen=args.multi_step)
    p1_action_deque = deque(maxlen=args.multi_step)

    p2_state_deque = deque(maxlen=args.multi_step)
    p2_reward_deque = deque(maxlen=args.multi_step)
    p2_action_deque = deque(maxlen=args.multi_step)

    # RL Optimizer for Player 1, 2
    p1_rl_optimizer = optim.Adam(p1_current_model.parameters(), lr=args.lr)
    p2_rl_optimizer = optim.Adam(p2_current_model.parameters(), lr=args.lr)

    # SL Optimizer for Player 1, 2
    # TODO(Aiden): Is it necessary to seperate learning rate for RL/SL?
    p1_sl_optimizer = optim.Adam(p1_policy.parameters(), lr=args.lr)
    p2_sl_optimizer = optim.Adam(p2_policy.parameters(), lr=args.lr)

    # Logging
    length_list = []
    p1_reward_list, p1_rl_loss_list, p1_sl_loss_list = [], [], []
    p2_reward_list, p2_rl_loss_list, p2_sl_loss_list = [], [], []
    p1_episode_reward, p2_episode_reward = 0, 0
    tag_interval_length = 0
    prev_time = time.time()
    prev_frame = 1

    # Main Loop
    (p1_state, p2_state) = env.reset()
    for frame_idx in range(1, args.max_frames + 1):
        is_best_response = False
        # TODO(Aiden):
        # Action should be decided by a combination of Best Response and Average Strategy
        if random.random() > args.eta:
            p1_action = p1_policy.act(
                torch.FloatTensor(p1_state).to(args.device))
            p2_action = p2_policy.act(
                torch.FloatTensor(p1_state).to(args.device))
        else:
            is_best_response = True
            epsilon = epsilon_by_frame(frame_idx)
            p1_action = p1_current_model.act(
                torch.FloatTensor(p1_state).to(args.device), epsilon)
            p2_action = p2_current_model.act(
                torch.FloatTensor(p2_state).to(args.device), epsilon)

        actions = {"1": p1_action, "2": p2_action}
        (p1_next_state, p2_next_state), reward, done, info = env.step(actions)
        # print(actions)  # {'1': 3, '2': 2}
        # print(p1_next_state) # [[[127 127 .....
        #print(reward, done, info) # [0 0] False None

        # Save current state, reward, action to deque for multi-step learning
        p1_state_deque.append(p1_state)
        p2_state_deque.append(p2_state)

        p1_reward = reward[0] - 1 if args.negative else reward[0]
        p2_reward = reward[1] - 1 if args.negative else reward[1]
        p1_reward_deque.append(p1_reward)
        p2_reward_deque.append(p2_reward)

        p1_action_deque.append(p1_action)
        p2_action_deque.append(p2_action)

        # Store (state, action, reward, next_state) to Replay Buffer for Reinforcement Learning
        if len(p1_state_deque) == args.multi_step or done:
            n_reward = multi_step_reward(p1_reward_deque, args.gamma)
            n_state = p1_state_deque[0]
            n_action = p1_action_deque[0]
            p1_replay_buffer.push(n_state, n_action, n_reward, p1_next_state,
                                  np.float32(done))

            n_reward = multi_step_reward(p2_reward_deque, args.gamma)
            n_state = p2_state_deque[0]
            n_action = p2_action_deque[0]
            p2_replay_buffer.push(n_state, n_action, n_reward, p2_next_state,
                                  np.float32(done))

        # Store (state, action) to Reservoir Buffer for Supervised Learning
        if is_best_response:
            p1_reservoir_buffer.push(p1_state, p1_action)
            p2_reservoir_buffer.push(p2_state, p2_action)

        (p1_state, p2_state) = (p1_next_state, p2_next_state)

        # Logging
        p1_episode_reward += p1_reward
        p2_episode_reward += p2_reward
        tag_interval_length += 1

        if info is not None:
            length_list.append(tag_interval_length)
            tag_interval_length = 0

        # Episode done. Reset environment and clear logging records
        if done or tag_interval_length >= args.max_tag_interval:
            (p1_state, p2_state) = env.reset()
            p1_reward_list.append(p1_episode_reward)
            p2_reward_list.append(p2_episode_reward)
            writer.add_scalar("p1/episode_reward", p1_episode_reward,
                              frame_idx)
            writer.add_scalar("p2/episode_reward", p2_episode_reward,
                              frame_idx)
            writer.add_scalar("data/tag_interval_length", tag_interval_length,
                              frame_idx)
            p1_episode_reward, p2_episode_reward, tag_interval_length = 0, 0, 0
            p1_state_deque.clear(), p2_state_deque.clear()
            p1_reward_deque.clear(), p2_reward_deque.clear()
            p1_action_deque.clear(), p2_action_deque.clear()

        if (len(p1_replay_buffer) > args.rl_start
                and len(p1_reservoir_buffer) > args.sl_start
                and frame_idx % args.train_freq == 0):

            # Update Best Response with Reinforcement Learning
            loss = compute_rl_loss(p1_current_model, p1_target_model,
                                   p1_replay_buffer, p1_rl_optimizer, args)
            p1_rl_loss_list.append(loss.item())
            writer.add_scalar("p1/rl_loss", loss.item(), frame_idx)

            loss = compute_rl_loss(p2_current_model, p2_target_model,
                                   p2_replay_buffer, p2_rl_optimizer, args)
            p2_rl_loss_list.append(loss.item())
            writer.add_scalar("p2/rl_loss", loss.item(), frame_idx)

            # Update Average Strategy with Supervised Learning
            loss = compute_sl_loss(p1_policy, p1_reservoir_buffer,
                                   p1_sl_optimizer, args)
            p1_sl_loss_list.append(loss.item())
            writer.add_scalar("p1/sl_loss", loss.item(), frame_idx)

            loss = compute_sl_loss(p2_policy, p2_reservoir_buffer,
                                   p2_sl_optimizer, args)
            p2_sl_loss_list.append(loss.item())
            writer.add_scalar("p2/sl_loss", loss.item(), frame_idx)

        if frame_idx % args.update_target == 0:
            update_target(p1_current_model, p1_target_model)
            update_target(p2_current_model, p2_target_model)

        # Logging and Saving models
        if frame_idx % args.evaluation_interval == 0:
            print_log(frame_idx, prev_frame, prev_time,
                      (p1_reward_list, p2_reward_list), length_list,
                      (p1_rl_loss_list, p2_rl_loss_list),
                      (p1_sl_loss_list, p2_sl_loss_list))
            p1_reward_list.clear(), p2_reward_list.clear(), length_list.clear()
            p1_rl_loss_list.clear(), p2_rl_loss_list.clear()
            p1_sl_loss_list.clear(), p2_sl_loss_list.clear()
            prev_frame = frame_idx
            prev_time = time.time()
            save_model(models={
                "p1": p1_current_model,
                "p2": p2_current_model
            },
                       policies={
                           "p1": p1_policy,
                           "p2": p2_policy
                       },
                       args=args)

        # Render if rendering argument is on
        if args.render:
            env.render()

        save_model(models={
            "p1": p1_current_model,
            "p2": p2_current_model
        },
                   policies={
                       "p1": p1_policy,
                       "p2": p2_policy
                   },
                   args=args)
示例#44
0
 def __init__(self, **kwargs):
     self.prev_msgids = collections.deque([],
                                          maxlen=self.DUP_MSG_CHECK_SIZE)
示例#45
0
    def plot_svg(self, frame):
        # global current_idx, axes_max
        global current_frame
        current_frame = frame
        fname = "snapshot%08d.svg" % frame
        full_fname = os.path.join(self.output_dir, fname)
        # with debug_view:
            # print("plot_svg:", full_fname) 
        # print("-- plot_svg:", full_fname) 
        if not os.path.isfile(full_fname):
            print("Once output files are generated, click the slider.")   
            return

        xlist = deque()
        ylist = deque()
        rlist = deque()
        rgb_list = deque()

        #  print('\n---- ' + fname + ':')
#        tree = ET.parse(fname)
        tree = ET.parse(full_fname)
        root = tree.getroot()
        #  print('--- root.tag ---')
        #  print(root.tag)
        #  print('--- root.attrib ---')
        #  print(root.attrib)
        #  print('--- child.tag, child.attrib ---')
        numChildren = 0
        for child in root:
            #    print(child.tag, child.attrib)
            #    print("keys=",child.attrib.keys())
            if self.use_defaults and ('width' in child.attrib.keys()):
                self.axes_max = float(child.attrib['width'])
                # print("debug> found width --> axes_max =", axes_max)
            if child.text and "Current time" in child.text:
                svals = child.text.split()
                # remove the ".00" on minutes
                self.title_str += "   cells: " + svals[2] + "d, " + svals[4] + "h, " + svals[7][:-3] + "m"

                # self.cell_time_mins = int(svals[2])*1440 + int(svals[4])*60 + int(svals[7][:-3])
                # self.title_str += "   cells: " + str(self.cell_time_mins) + "m"   # rwh

            # print("width ",child.attrib['width'])
            # print('attrib=',child.attrib)
            # if (child.attrib['id'] == 'tissue'):
            if ('id' in child.attrib.keys()):
                # print('-------- found tissue!!')
                tissue_parent = child
                break

        # print('------ search tissue')
        cells_parent = None

        for child in tissue_parent:
            # print('attrib=',child.attrib)
            if (child.attrib['id'] == 'cells'):
                # print('-------- found cells, setting cells_parent')
                cells_parent = child
                break
            numChildren += 1

        num_cells = 0
        #  print('------ search cells')
        for child in cells_parent:
            #    print(child.tag, child.attrib)
            #    print('attrib=',child.attrib)
            for circle in child:  # two circles in each child: outer + nucleus
                #  circle.attrib={'cx': '1085.59','cy': '1225.24','fill': 'rgb(159,159,96)','r': '6.67717','stroke': 'rgb(159,159,96)','stroke-width': '0.5'}
                #      print('  --- cx,cy=',circle.attrib['cx'],circle.attrib['cy'])
                xval = float(circle.attrib['cx'])

                # map SVG coords into comp domain
                # xval = (xval-self.svg_xmin)/self.svg_xrange * self.x_range + self.xmin
                xval = xval/self.x_range * self.x_range + self.xmin

                s = circle.attrib['fill']
                # print("s=",s)
                # print("type(s)=",type(s))
                if (s[0:3] == "rgb"):  # if an rgb string, e.g. "rgb(175,175,80)" 
                    rgb = list(map(int, s[4:-1].split(",")))  
                    rgb[:] = [x / 255. for x in rgb]
                else:     # otherwise, must be a color name
                    rgb_tuple = mplc.to_rgb(mplc.cnames[s])  # a tuple
                    rgb = [x for x in rgb_tuple]

                # test for bogus x,y locations (rwh TODO: use max of domain?)
                too_large_val = 10000.
                if (np.fabs(xval) > too_large_val):
                    print("bogus xval=", xval)
                    break
                yval = float(circle.attrib['cy'])
                # yval = (yval - self.svg_xmin)/self.svg_xrange * self.y_range + self.ymin
                yval = yval/self.y_range * self.y_range + self.ymin
                if (np.fabs(yval) > too_large_val):
                    print("bogus xval=", xval)
                    break

                rval = float(circle.attrib['r'])
                # if (rgb[0] > rgb[1]):
                #     print(num_cells,rgb, rval)
                xlist.append(xval)
                ylist.append(yval)
                rlist.append(rval)
                rgb_list.append(rgb)

                # For .svg files with cells that *have* a nucleus, there will be a 2nd
                if (not self.show_nucleus):
                #if (not self.show_nucleus):
                    break

            num_cells += 1

            # if num_cells > 3:   # for debugging
            #   print(fname,':  num_cells= ',num_cells," --- debug exit.")
            #   sys.exit(1)
            #   break

            # print(fname,':  num_cells= ',num_cells)

        xvals = np.array(xlist)
        yvals = np.array(ylist)
        rvals = np.array(rlist)
        rgbs = np.array(rgb_list)
        # print("xvals[0:5]=",xvals[0:5])
        # print("rvals[0:5]=",rvals[0:5])
        # print("rvals.min, max=",rvals.min(),rvals.max())

        # rwh - is this where I change size of render window?? (YES - yipeee!)
        #   plt.figure(figsize=(6, 6))
        #   plt.cla()
        # if (self.substrates_toggle.value):
        self.title_str += " (" + str(num_cells) + " agents)"
            # title_str = " (" + str(num_cells) + " agents)"
        # else:
            # mins= round(int(float(root.find(".//current_time").text)))  # TODO: check units = mins
            # hrs = int(mins/60)
            # days = int(hrs/24)
            # title_str = '%dd, %dh, %dm' % (int(days),(hrs%24), mins - (hrs*60))
        plt.title(self.title_str)

        plt.xlim(self.xmin, self.xmax)
        plt.ylim(self.ymin, self.ymax)

        #   plt.xlim(axes_min,axes_max)
        #   plt.ylim(axes_min,axes_max)
        #   plt.scatter(xvals,yvals, s=rvals*scale_radius, c=rgbs)

        # TODO: make figsize a function of plot_size? What about non-square plots?
        # self.fig = plt.figure(figsize=(9, 9))

#        axx = plt.axes([0, 0.05, 0.9, 0.9])  # left, bottom, width, height
#        axx = fig.gca()
#        print('fig.dpi=',fig.dpi) # = 72

        #   im = ax.imshow(f.reshape(100,100), interpolation='nearest', cmap=cmap, extent=[0,20, 0,20])
        #   ax.xlim(axes_min,axes_max)
        #   ax.ylim(axes_min,axes_max)

        # convert radii to radii in pixels
        # ax2 = self.fig.gca()
        # N = len(xvals)
        # rr_pix = (ax2.transData.transform(np.vstack([rvals, rvals]).T) -
        #             ax2.transData.transform(np.vstack([np.zeros(N), np.zeros(N)]).T))
        # rpix, _ = rr_pix.T

        # markers_size = (144. * rpix / self.fig.dpi)**2   # = (2*rpix / fig.dpi * 72)**2
        # markers_size = markers_size/4000000.
        # print('max=',markers_size.max())

        #rwh - temp fix - Ah, error only occurs when "edges" is toggled on
        if (self.show_edge):
            try:
                # plt.scatter(xvals,yvals, s=markers_size, c=rgbs, edgecolor='black', linewidth=0.5)
                self.circles(xvals,yvals, s=rvals, color=rgbs, edgecolor='black', linewidth=0.5)
                # cell_circles = self.circles(xvals,yvals, s=rvals, color=rgbs, edgecolor='black', linewidth=0.5)
                # plt.sci(cell_circles)
            except (ValueError):
                pass
        else:
            # plt.scatter(xvals,yvals, s=markers_size, c=rgbs)
            self.circles(xvals,yvals, s=rvals, color=rgbs)
示例#46
0
    4 3 2
    1 1 1 1
    1 1 1 1
    1 1 1 1
    1 1 1 1
    -1 -1 -1 -1
    1 1 1 -1
out
    0
'''

from collections import deque

m, n, h = map(int, input().split())
tomato = []
q = deque()
res = -2
dx, dy, dz = [1, 0, -1, 0, 0, 0], [0, 1, 0, -1, 0, 0], [0, 0, 0, 0, -1,
                                                        1]  # dz = 위아래 검사

for i in range(h):
    temp = []

    for j in range(n):
        temp.append(list(map(int, input().split())))

        for k in range(m):
            if temp[j][k] == 1:
                q.append([i, j, k])
    tomato.append(temp)
示例#47
0
    def __init__(self,
                 observation_size,
                 num_actions,
                 observation_to_actions,
                 optimizer,
                 session,
                 random_action_probability=0.05,
                 exploration_period=1000,
                 store_every_nth=5,
                 train_every_nth=5,
                 minibatch_size=32,
                 discount_rate=0.95,
                 max_experience=30000,
                 target_network_update_rate=0.01,
                 summary_writer=None):
        # Этот большой комментарий я просто переведу ниже
        """Initialized the Deepq object.

        Based on:
            https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf

        Parameters
        -------
        observation_size : int
            length of the vector passed as observation
        num_actions : int
            number of actions that the model can execute
        observation_to_actions: dali model
            model that implements activate function
            that can take in observation vector or a batch
            and returns scores (of unbounded values) for each
            action for each observation.
            input shape:  [batch_size, observation_size]
            output shape: [batch_size, num_actions]
        optimizer: tf.solver.*
            optimizer for prediction error
        session: tf.Session
            session on which to execute the computation
        random_action_probability: float (0 to 1)
        exploration_period: int
            probability of choosing a random
            action (epsilon form paper) annealed linearly
            from 1 to random_action_probability over
            exploration_period
        store_every_nth: int
            to further decorrelate samples do not all
            transitions, but rather every nth transition.
            For example if store_every_nth is 5, then
            only 20% of all the transitions is stored.
        train_every_nth: int
            normally training_step is invoked every
            time action is executed. Depending on the
            setup that might be too often. When this
            variable is set set to n, then only every
            n-th time training_step is called will
            the training procedure actually be executed.
        minibatch_size: int
            number of state,action,reward,newstate
            tuples considered during experience reply
        dicount_rate: float (0 to 1)
            how much we care about future rewards.
        max_experience: int
            maximum size of the reply buffer
        target_network_update_rate: float
            how much to update target network after each
            iteration. Let's call target_network_update_rate
            alpha, target network T, and network N. Every
            time N gets updated we execute:
                T = (1-alpha)*T + alpha*N
        summary_writer: tf.train.SummaryWriter
            writer to log metrics
        """
        """Инициализация Deepq

        Основано на:
            https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf

        Параметры
        -------
        observation_size : int
            длина вектора входных данных (этот вектор
            будем называть наблюдением или состоянием)
            
        num_actions : int
            количество возможных действий или же
            длина вектора выходных данных нейросети
            
        observation_to_actions: dali model
            модель (в нашем случае нейросеть),
            которая принимает наблюдение или набор наблюдений
            и возвращает оценку очками каждого действия или
            набор оценок для каждого действия каждого из наблюдений
            входной размер: матрица [batch_size, observation_size]
            выходной размер: матрица [batch_size, num_actions]
            
        optimizer: tf.solver.*
            алгоритм рассчета обратого распространения ошибки
            в нашем случае будет использоваться RMSProp
            
        session: tf.Session
            сессия TensorFlow в которой будут производится вычисления
            
        random_action_probability: float (0 to 1)
            вероятность случайного действия,
            для обогощения опыта нейросети и улучшения качесва управления
            с определенной вероятностью выполняется случайное действие, а не
            действие выданное нейросетью
            
        exploration_period: int
            период поискового поведения в итерациях,
            в течении которого вероятность выполнения случайного
            действия падает от 1 до random_action_probability
            
        store_every_nth: int
            параметр нужен чтобы сохранять не все обучающие примеры
            а только определенную часть из них.
            Сохранение происходит один раз в указаное в параметре
            количество обучающих примеров
            
        train_every_nth: int
            обычно training_step (шаг обучения)
            запускается после каждого действия.
            Иногда получается так, что это слишком часто.
            Эта переменная указывает сколько шагов
            пропустить перед тем как запускать шаг обучения
            
        minibatch_size: int
            размер набора обучающих примеров который
            используется на одном шаге обучения
            алгоритмом RMSProp.
            Обучающий пример включает в себя
            состояние, предпринятое действие, награду и
            новое состояние
            
        dicount_rate: float (0 to 1)
            параметр Q-learning
            насколько сильно влияет будущая награда при
            расчете пользы действия
            
        max_experience: int
            максимальное количество сохраненных
            обучающих примеров
            
        target_network_update_rate: float
            параметр скорости обучения нейросети,
            здесь используется 2 нейросети
            T - target_q_network
            она используется для расчета вклада будущей пользы и
            N - q_network
            она испольщуется для выбора действия,
            также эта сеть подвергается обучению
            методом обратного распространения ошибки.
            Сеть T с определенной скоростью стремится к сети N.
            Каждый раз при обучении N,
            Т модифицируется следующим образом:
                alpha = target_network_update_rate
                T = (1-alpha)*T + alpha*N
                
        summary_writer: tf.train.SummaryWriter
            запись логов
        """

        # memorize arguments
        self.observation_size = observation_size
        self.num_actions = num_actions

        self.q_network = observation_to_actions
        self.optimizer = optimizer
        self.s = session

        self.random_action_probability = random_action_probability
        self.exploration_period = exploration_period
        self.store_every_nth = store_every_nth
        self.train_every_nth = train_every_nth
        self.minibatch_size = minibatch_size
        self.discount_rate = tf.constant(discount_rate)
        self.max_experience = max_experience
        self.target_network_update_rate = \
                tf.constant(target_network_update_rate)

        # deepq state
        self.actions_executed_so_far = 0
        self.experience = deque()

        self.iteration = 0
        self.summary_writer = summary_writer

        self.number_of_times_store_called = 0
        self.number_of_times_train_called = 0

        self.create_variables()
示例#48
0
 def __init__(self, allowed_requests, seconds):
     self.allowed_requests = allowed_requests
     self.seconds = seconds
     self.made_requests = deque()
示例#49
0
from collections import deque

T = int(raw_input())
for case in xrange(1, T + 1):
    coaster = deque()

    R, k, N = map(int, raw_input().split())

    queue = deque(map(int, raw_input().split()))
    assert len(queue) == N

    money = 0
    for ride in xrange(R):
        size = 0
        while queue:
            next = queue[0]
            new_size = size + next
            if new_size > k:
                break
            coaster.append(queue.popleft())
            size = new_size
        money += size

    #    print coaster

        queue.extend(coaster)
        coaster.clear()

    print 'Case #%d: %d' % (case, money)

示例#50
0
      # Check the tty so that we don't hang waiting for input in an
      # non-interactive scenario.
      if FLAGS.pdb_post_mortem and sys.stdout.isatty():
        traceback.print_exc()
        print()
        print(' *** Entering post-mortem debugging ***')
        print()
        pdb.post_mortem()
      raise
  except Exception as e:
    _call_exception_handlers(e)
    raise

# Callbacks which have been deferred until after _run_init has been called.
_init_callbacks = collections.deque()


def call_after_init(callback):
  """Calls the given callback only once ABSL has finished initialization.

  If ABSL has already finished initialization when `call_after_init` is
  called then the callback is executed immediately, otherwise `callback` is
  stored to be executed after `app.run` has finished initializing (aka. just
  before the main function is called).

  If called after `app.run`, this is equivalent to calling `callback()` in the
  caller thread. If called before `app.run`, callbacks are run sequentially (in
  an undefined order) in the same thread as `app.run`.

  Args:
示例#51
0
def main(output, dataset, datadir, batch_size, lr, step, iterations, momentum,
         snapshot, downscale, augmentation, fyu, crop_size, weights, model,
         gpu, num_cls, nthreads, model_weights, data_flag, serial_batches,
         resize_to, start_step, preprocessing, small, rundir_flag, force_split,
         adam):
    if weights is not None:
        raise RuntimeError("weights don't work because eric is bad at coding")
    os.environ['CUDA_VISIBLE_DEVICES'] = gpu
    config_logging()
    logdir_flag = data_flag
    if rundir_flag != "":
        logdir_flag += "_{}".format(rundir_flag)

    logdir = 'runs/{:s}/{:s}/{:s}'.format(model, '-'.join(dataset),
                                          logdir_flag)
    writer = SummaryWriter(log_dir=logdir)
    if model == 'fcn8s':
        net = get_model(model, num_cls=num_cls, weights_init=model_weights)
    else:
        net = get_model(model,
                        num_cls=num_cls,
                        finetune=True,
                        weights_init=model_weights)
    net.cuda()

    str_ids = gpu.split(',')
    gpu_ids = []
    for str_id in str_ids:
        id = int(str_id)
        if id >= 0:
            gpu_ids.append(id)

    # set gpu ids
    if len(gpu_ids) > 0:
        torch.cuda.set_device(gpu_ids[0])
        assert (torch.cuda.is_available())
        net.to(gpu_ids[0])
        net = torch.nn.DataParallel(net, gpu_ids)

    transform = []
    target_transform = []

    if preprocessing:
        transform.extend([
            torchvision.transforms.Resize(
                [int(resize_to), int(int(resize_to) * 1.8)])
        ])
        target_transform.extend([
            torchvision.transforms.Resize(
                [int(resize_to), int(int(resize_to) * 1.8)],
                interpolation=Image.NEAREST)
        ])

    transform.extend([net.module.transform])
    target_transform.extend([to_tensor_raw])
    transform = torchvision.transforms.Compose(transform)
    target_transform = torchvision.transforms.Compose(target_transform)

    if force_split:
        datasets = []
        datasets.append(
            get_dataset(dataset[0],
                        os.path.join(datadir, dataset[0]),
                        num_cls=num_cls,
                        transform=transform,
                        target_transform=target_transform,
                        data_flag=data_flag))
        datasets.append(
            get_dataset(dataset[1],
                        os.path.join(datadir, dataset[1]),
                        num_cls=num_cls,
                        transform=transform,
                        target_transform=target_transform))
    else:
        datasets = [
            get_dataset(name,
                        os.path.join(datadir, name),
                        num_cls=num_cls,
                        transform=transform,
                        target_transform=target_transform,
                        data_flag=data_flag) for name in dataset
        ]

    if weights is not None:
        weights = np.loadtxt(weights)

    if adam:
        print("Using Adam")
        opt = torch.optim.Adam(net.module.parameters(), lr=1e-4)
    else:
        print("Using SGD")
        opt = torch.optim.SGD(net.module.parameters(),
                              lr=lr,
                              momentum=momentum,
                              weight_decay=0.0005)

    if augmentation:
        collate_fn = lambda batch: augment_collate(
            batch, crop=crop_size, flip=True)
    else:
        collate_fn = torch.utils.data.dataloader.default_collate

    loaders = [
        torch.utils.data.DataLoader(dataset,
                                    batch_size=batch_size,
                                    shuffle=not serial_batches,
                                    num_workers=nthreads,
                                    collate_fn=collate_fn,
                                    pin_memory=True) for dataset in datasets
    ]
    iteration = start_step
    losses = deque(maxlen=10)

    for loader in loaders:
        loader.dataset.__getitem__(0, debug=True)

    for im, label in roundrobin_infinite(*loaders):
        # Clear out gradients
        opt.zero_grad()

        # load data/label
        im = make_variable(im, requires_grad=False)
        label = make_variable(label, requires_grad=False)

        if iteration == 0:
            print("im size: {}".format(im.size()))
            print("label size: {}".format(label.size()))

        # forward pass and compute loss
        preds = net(im)
        loss = supervised_loss(preds, label)

        # backward pass
        loss.backward()
        losses.append(loss.item())

        # step gradients
        opt.step()

        # log results
        if iteration % 10 == 0:
            logging.info('Iteration {}:\t{}'.format(iteration,
                                                    np.mean(losses)))
            writer.add_scalar('loss', np.mean(losses), iteration)
        iteration += 1
        if step is not None and iteration % step == 0:
            logging.info('Decreasing learning rate by 0.1.')
            step_lr(opt, 0.1)

        if iteration % snapshot == 0:
            torch.save(net.module.state_dict(),
                       '{}/iter_{}.pth'.format(output, iteration))

        if iteration >= iterations:
            logging.info('Optimization complete.')
示例#52
0
 def __init__(self, base=None):
     self.__stack = deque([{} if base is None else base])
示例#53
0
文件: connection.py 项目: smira/txZMQ
    def __init__(self, factory, endpoint=None, identity=None):
        """
        Constructor.

        One endpoint is passed to the constructor, more could be added
        via call to :meth:`addEndpoints`.

        :param factory: ZeroMQ Twisted factory
        :type factory: :class:`ZmqFactory`
        :param endpoint: ZeroMQ address for connect/bind
        :type endpoint:  :class:`ZmqEndpoint`
        :param identity: socket identity (ZeroMQ), don't set unless you know
            how it works
        :type identity: str
        """
        self.factory = factory
        self.endpoints = []
        self.identity = identity
        self.socket = Socket(factory.context, self.socketType)
        self.queue = deque()
        self.recv_parts = []
        self.read_scheduled = None

        self.fd = self.socket.get(constants.FD)
        self.socket.set(constants.LINGER, factory.lingerPeriod)

        if not ZMQ3:
            self.socket.set(
                constants.MCAST_LOOP, int(self.allowLoopbackMulticast))

        self.socket.set(constants.RATE, self.multicastRate)

        if not ZMQ3:
            self.socket.set(constants.HWM, self.highWaterMark)
        else:
            self.socket.set(constants.SNDHWM, self.highWaterMark)
            self.socket.set(constants.RCVHWM, self.highWaterMark)

        if ZMQ3 and self.tcpKeepalive:
            self.socket.set(
                constants.TCP_KEEPALIVE, self.tcpKeepalive)
            self.socket.set(
                constants.TCP_KEEPALIVE_CNT, self.tcpKeepaliveCount)
            self.socket.set(
                constants.TCP_KEEPALIVE_IDLE, self.tcpKeepaliveIdle)
            self.socket.set(
                constants.TCP_KEEPALIVE_INTVL, self.tcpKeepaliveInterval)

        if ZMQ3 and self.reconnectInterval:
            self.socket.set(
                constants.RECONNECT_IVL, self.reconnectInterval)

        if ZMQ3 and self.reconnectIntervalMax:
            self.socket.set(
                constants.RECONNECT_IVL_MAX, self.reconnectIntervalMax)

        if self.identity is not None:
            self.socket.set(constants.IDENTITY, self.identity)

        if endpoint:
            self.addEndpoints([endpoint])

        self.factory.connections.add(self)

        self.factory.reactor.addReader(self)
        self.doRead()
示例#54
0
 def __init__(self, maxsize=0, max_priority=20):
     queue.Queue.__init__(self, maxsize=maxsize)
     self.priorities = [deque([]) for i in range(max_priority + 1)]
     self.default_priority = max_priority
示例#55
0
 def empty_cache(self):
     self.states = {}
     self.stateq = deque()
     gc.collect()
示例#56
0
from collections import deque
for _ in range(10):
    n = int(input())
    d = deque(list(map(int, input().split())))
    print(f'#{n}', end=" ")

    i = 1
    while True:
        if i == 6:
            i = 1
        num = d.popleft()-i
        if num <= 0:
            d.append(0)
            break
        d.append(num)
        i += 1

    print(*d)
示例#57
0
 def __init__(self, capacity):
     self.observations = deque(maxlen=capacity)
     self.actions = deque(maxlen=capacity)
     self.rewards = deque(maxlen=capacity)
     self.dones = deque(maxlen=capacity)
示例#58
0
b = a.copy()
print(b)
c = a[:]
print("this is c", c)
c[0] = 100
print("this is a", a)
print("this is c", c)

# 将列表 作为堆栈使用
stack = [3, 4, 5]
stack.append(6)
stack.pop()
print(stack)

from collections import deque
queue = deque(["eric", 'john', 'michael'])
queue.append("terry")
queue.append("graham")
print(queue.popleft())
print(queue.pop())

#列表解析
squares = []
for x in range(10):
    squares.append(x**2)
print(squares)

squares2 = list(map(lambda x: x**2, range(10)))
print(squares2)

print([x**2 for x in range(10)])
示例#59
0
for i in data:
    heappush(heap, i)
print(heap)

print(heappop(heap))
print(heappop(heap))
heappush(heap, 0.5)
print(heap)

heapreplace(heap, 100)
print(heap)

heapify(data)
print(data)

dqu = deque(range(6))
print(dqu)
dqu.append(10)
print(dqu)
dqu.appendleft(100)
print(dqu)
print(dqu.pop())
print(dqu)
print(dqu.popleft())
print(dqu)
dqu.rotate(3)
print(dqu)
#[8, 7, 1, 4, 6, 3, 5, 0, 2, 9]
#[0, 1, 3, 2, 6, 7, 5, 8, 4, 9]
#0
#1
示例#60
0
        x = x / (self.std + 1e-8)

        x = np.clip(x, -5, +5)


        return x



ppo = Ppo(N_S,N_A)
nomalize = Nomalize(N_S)
episodes = 0
eva_episodes = 0
for iter in range(Iter):
    memory = deque()
    scores = []
    steps = 0
    while steps <2048: #Horizen
        episodes += 1
        s = nomalize(env.reset())
        score = 0
        for _ in range(MAX_STEP):
            steps += 1
            #选择行为
            a=ppo.actor_net.choose_action(torch.from_numpy(np.array(s).astype(np.float32)).unsqueeze(0))[0]
            s_ , r ,done,info = env.step(a)
            s_ = nomalize(s_)

            mask = (1-done)*1
            memory.append([s,a,r,mask])