Exemple #1
0
def main():
    #handle argument
    parser = argparse.ArgumentParser(description='fetch and check free proxies')
    parser.add_argument('-v', '--verbose', action='store_true')
    parser.add_argument('-l', '--load', action='store_true', help='load hub.plk file')
    args = parser.parse_args(sys.argv[1:])

    if args.verbose:
        set_console_log_level(logging.DEBUG)

    if args.load:
        with (open("hub.pkl", "rb")) as f:
            _hub = pickle.load(f)
            hub = Hub(_hub)
    else:
        hub = Hub()

    log.info('Start')
    loop = asyncio.get_event_loop()
    queue = asyncio.Queue()
    factory = Factory(loop, queue, fetchers, hub)
    tasks = factory.get_tasks()
    tasks.append(asyncio.ensure_future(Server(hub).run()))
    tasks = asyncio.gather(*tasks, loop=loop)
    try:
        loop.run_until_complete(tasks)
    except KeyboardInterrupt:
        with open('hub.pkl', 'wb') as f:
            pickle.dump(hub.copy(), f, pickle.HIGHEST_PROTOCOL)
    finally:
        loop.close()
Exemple #2
0
    def __init__(self, nLines=64, associativity=8, pageSize=0x1000, tlb=None, cache=None, hub=None):
        """Simple associative cache.

        Parameters
        ----------

        size (int):
            Cache size in bytes. (Default 0x8000 (32 kB))
        associtivilty (int):
            Number of ways for an associative cache, -1 for fully associative.
        cacheLine (int):
            Number of bytes per cache line, determiines the number of offset bits.
        child (Cache):
            The next level of cache, must be a hub for etlb, default is None, which means default Hub.
        """
        self.nLines = nLines
        self.associativity = associativity
        self.hub = hub
        self.cache = cache
        self.tlb = tlb
        self.pageSize = pageSize


        if self.associativity == -1:
            self.associativity = self.nLines

        if self.hub is None:
            self.hub = Hub(associativity=self.associativity, pageSize=self.pageSize)
        if self.cache is None:
            self.cache = Cache(size=0x8000, associativity=16)
            self.cache.accessEnergy = 0.0111033
            self.cache.accessTime = 4
            self.cache.tagTime = 1
            self.cache.tagEnergy = 0.000539962

        self.hub.eTLB = self

        self.cacheLine = self.cache.cacheLine

        self.nSets = self.nLines // self.associativity

        self.offsetBits = int(math.ceil(math.log2(self.cacheLine)))
        self.wayBits = int(math.ceil(math.log2(self.associativity)))
        self.pageBits = int(math.ceil(math.log2(self.pageSize))) - self.offsetBits
        self.setBits = int(math.ceil(math.log2(self.nSets)))
        self.tagBits = 48 - self.setBits - self.pageBits - self.offsetBits

        if self.tlb is None:
            self.tlb = TLB(512, self.tagBits + self.setBits)

        self.freeList = [list(range(self.associativity)) for i in range(self.nSets)]

        self.counter = 0
        self.entries = [[ETLBEntry(self.pageSize, self.cacheLine) for i in range(self.associativity)] for j in range(self.nSets)]

        self.hit = [0,0,0,0] #DRAM,L1I,L1D,L2  Note: L1 is actually a unified cache at present, for forward compatability if separate caches are ever implemented
        self.miss = 0
        self.cycles = 0
        self.energy = 0.
Exemple #3
0
def main():
    # print(os.path.abspath(os.path.dirname(__file__)))

    # print(schema_etl)

    order_load = ["hub", "lnk", "sat", 'raw', 'hashed']
    for type_obj in order_load:
        for etl_val_obj in schema_etl[type_obj]:
            hub = Hub()
            next_sql = hub.generate_general_fill(etl_val_obj)
            print(next_sql)
Exemple #4
0
def restaurant_endpoint(table_id=''):
    """
    Handles creation and queries for restaurant data.
    :param table_id: hub device ID.
    :return: JSON or CSV response body and status code.
    """

    if request.method == 'GET':
        hub_id = request.args.get('table_id', table_id)

        hub = Hub('')
        hub.get_restaurant_id(hub_id=hub_id)

        # if Mime Type is CSV, respond with simple restaurant ID string.
        if request.content_type == 'text/csv':
            print hub.restaurant_id
            if hub.restaurant_id:
                return hub.restaurant_id, OK
            else:
                return 'error', BAD_REQUEST

        # Otherwise, respond in JSON format.
        else:
            if hub.restaurant_id:
                return jsonify({
                    'table_id': hub_id,
                    'restaurant_id': hub.restaurant_id
                }), OK
            else:
                return jsonify({
                    'table_id':
                    hub_id,
                    'error':
                    'Specified table ID is not affiliated with a restaurant.'
                }), BAD_REQUEST

    elif request.method == 'POST':
        request_body = flask.request.get_json()

        # Restaurant name must be supplied in request body.
        if 'name' in request_body:
            restaurant_name = request_body['name']
        else:
            request_body.update({'error': 'Restaurant name not specified.'})
            return jsonify(request_body), BAD_REQUEST

        # Create new restaurant, and return result to user.
        restaurant = Restaurant(restaurant_id='')
        restaurant_info = restaurant.create(name=restaurant_name)

        if 'error' in restaurant_info:
            return jsonify(restaurant_info), SERVER_ERROR
        else:
            return jsonify(restaurant_info), OK
    def test_post_success(self):
        r1 = self.delete_server_hub_map()

        self.assertEqual(r1.status_code, server.OK)

        r2 = self.post_server_hub_map()

        hub = Hub(restaurant_id=self.restaurant_id)

        self.assertEqual(r2.status_code, server.CREATED)
        self.assertTrue(hub.get_attendant_id(self.hub_id), self.attendant_id)
Exemple #6
0
 def __init__(self, _engine):
     super(Scene, self).__init__()
     self._ais = []
     self._engine = _engine
     self._resx, self._resy = _engine.getResolution()
     self.surface = pygame.Surface((self._resx, self._resy))
     drawText(self.surface, "Wczytywanie mapy...", 48, (255, 255, 255),
              (self._resx / 2, self._resy / 2))
     self._map = Map(_engine)
     self._hub = Hub(_engine, self._map)
     self._cursor = Cursor(_engine, self._map)
     self._ais.append(AI(_engine, self._map, _engine.players[0], 0))
     self._ais.append(AI(_engine, self._map, _engine.players[1], 1))
Exemple #7
0
 def send_folder(self, dirpath = None):
     from hub import Hub
     if dirpath is None:
         dirpath = Hub.getInstance().get_dir('Choose a directory to send '
                                             'to %s.' % self.name)
     if dirpath:
         finfo = fileinfo(dirpath)
         if len(finfo.files) == 0:
             Hub.getInstance().on_error(ValueError('No files in that directory.'))
         elif finfo.size == 0:
             Hub.getInstance().on_error(ValueError('There are zero bytes in that directory.'))
         else:
             self.protocol.send_file(self, finfo)
Exemple #8
0
    def send_file(self, filepath = None):
        if filepath is None:
            from hub import Hub
            filepath = Hub.getInstance().get_file('Sending file to %s' % self.name)

        if filepath:
            finfo = fileinfo(filepath)
            if finfo.size:
                if self.online:
                    xfer = self.protocol.send_file(self, finfo)
                    profile.xfers.insert(0, xfer)
            else:
                from hub import Hub
                Hub.getInstance().on_error(FileTransferException('%s is an empty file' % finfo.name))
Exemple #9
0
 def send_folder(self, dirpath=None):
     from hub import Hub
     if dirpath is None:
         dirpath = Hub.getInstance().get_dir('Choose a directory to send '
                                             'to %s.' % self.name)
     if dirpath:
         finfo = fileinfo(dirpath)
         if len(finfo.files) == 0:
             Hub.getInstance().on_error(
                 ValueError('No files in that directory.'))
         elif finfo.size == 0:
             Hub.getInstance().on_error(
                 ValueError('There are zero bytes in that directory.'))
         else:
             self.protocol.send_file(self, finfo)
Exemple #10
0
    def run(self):
        hub = Hub(
            self.args.src,
            self.args.dst,
            self.args.dst_token,
            account_type=self.args.account_type,
            clone_style=self.args.clone_style,
            src_account_type=self.args.src_account_type,
            dst_account_type=self.args.dst_account_type,
        )
        src_type, src_account = self.args.src.split('/')

        # Using static list when static_list is set
        repos = self.static_list
        src_repos = repos if repos else hub.dynamic_list()

        total, success, skip = len(src_repos), 0, 0
        failed_list = []
        for src_repo in src_repos:
            # Set dst_repo to src_repo mapping or src_repo directly
            dst_repo = self.mappings.get(src_repo, src_repo)
            print("Map %s to %s" % (src_repo, dst_repo))
            if self.test_black_white_list(src_repo):
                print("Backup %s" % src_repo)
                try:
                    mirror = Mirror(
                        hub,
                        src_repo,
                        dst_repo,
                        cache=self.args.cache_path,
                        timeout=self.args.timeout,
                        force_update=self.args.force_update,
                    )
                    mirror.download()
                    mirror.create()
                    mirror.push()
                    success += 1
                except Exception as e:
                    print(e)
                    failed_list.append(src_repo)
            else:
                skip += 1
        failed = total - success - skip
        res = (total, skip, success, failed)
        print("Total: %s, skip: %s, successed: %s, failed: %s." % res)
        print("Failed: %s" % failed_list)
        if failed_list:
            sys.exit(1)
Exemple #11
0
def default_hub(hub_name, genome, short_label, long_label, email):
    """
    Returns a fully-connected set of hub components using default filenames.
    """
    hub = Hub(
        hub=hub_name,
        short_label=short_label,
        long_label=long_label,
        email=email)
    genome = Genome(genome)
    genomes_file = GenomesFile()
    trackdb = TrackDb()
    hub.add_genomes_file(genomes_file)
    genomes_file.add_genome(genome)
    genome.add_trackdb(trackdb)
    return hub, genomes_file, genome, trackdb
Exemple #12
0
 def __init__(self, name, short_label, email):
     self.hub = Hub(name,
           short_label=short_label,
           long_label='Tracks for %s' % short_label,
           email=email)
     self.genomes_file = GenomesFile()
     self.hub.add_genomes_file(self.genomes_file)
Exemple #13
0
    def __init__(self, num_sensors, num_actions, show=True, 
                 agent_name='test_agent'):
        """
        Configure the Agent

        num_sensors and num_actions are the only absolutely necessary
        arguments. They define the number of elements in the 
        sensors and actions arrays that the agent and the world use to
        communicate with each other. 
        """
        self.BACKUP_PERIOD = 10 ** 4
        self.show = show
        self.name = agent_name
        self.pickle_filename ="log/" + agent_name + ".pickle"
        # TODO: Automatically adapt to the number of sensors pass in
        self.num_sensors = num_sensors
        self.num_actions = num_actions

        # Initialize agent infrastructure
        self.num_blocks =  1
        first_block_name = ''.join(('block_', str(self.num_blocks - 1)))
        self.blocks = [Block(self.num_actions + self.num_sensors, 
                             name=first_block_name)]
        self.hub = Hub(self.blocks[0].max_cables)
        self.action = np.zeros((self.num_actions,1))
        self.cumulative_reward = 0
        self.time_since_reward_log = 0 
        self.reward_history = []
        self.reward_steps = []
        self.surprise_history = []
        self.recent_surprise_history = [0.] * 100
        self.timestep = 0
        self.graphing = True
Exemple #14
0
    def send_file(self, filepath=None):
        if filepath is None:
            from hub import Hub
            filepath = Hub.getInstance().get_file('Sending file to %s' %
                                                  self.name)

        if filepath:
            finfo = fileinfo(filepath)
            if finfo.size:
                if self.online:
                    xfer = self.protocol.send_file(self, finfo)
                    profile.xfers.insert(0, xfer)
            else:
                from hub import Hub
                Hub.getInstance().on_error(
                    FileTransferException('%s is an empty file' % finfo.name))
Exemple #15
0
    def send_file(self, filepath = None):
        if filepath is None:
            from hub import Hub
            filepath = Hub.getInstance().get_file('Sending file to %s' % self.name)

        if filepath:
            self.protocol.send_file(self, fileinfo(filepath))
Exemple #16
0
class UserHub(object):
    """
    Create hub for a set of NGS data from one user/group
    """
    def __init__(self, name, short_label, email):
        self.hub = Hub(name,
              short_label=short_label,
              long_label='Tracks for %s' % short_label,
              email=email)
        self.genomes_file = GenomesFile()
        self.hub.add_genomes_file(self.genomes_file)

    def add_genomehub(self, genome_hub):
        self.genomes_file.add_genome(genome_hub.genome)

    def render(self):
        return self.hub.render()
Exemple #17
0
def setup_value(line_name, alist, conn=False):
    """
    Set value in instance type for all stations and all transfer points.
    """
    from station import Station
    from hub import Hub

    ls_instance = []
    for item in alist:
        ls_inform = item.split(':')
        flag_set = False
        if conn and len(ls_inform) == 4:
            new_instance = Hub()
            new_instance.conn = ls_inform[-1]
            flag_set = True

        if not conn:
            new_instance = Station()
            flag_set = True

        if flag_set:
            new_instance.name = ls_inform[1]
            new_instance.line = line_name
            new_instance.id = ls_inform[0]

        if not conn or conn and len(ls_inform) == 4:
            ls_instance.append(new_instance)

    return ls_instance
    def test_delete_success(self):
        r1 = self.delete_server_hub_map()

        self.assertEqual(r1.status_code, server.OK)
        self.assertTrue('message' in r1.json())

        r2 = self.delete_server_hub_map()

        self.assertEqual(r2.status_code, server.OK)
        self.assertTrue('message' in r2.json())

        # Place mapping back in db.
        r3 = self.post_server_hub_map()

        hub = Hub(restaurant_id=self.restaurant_id)

        self.assertEqual(r3.status_code, server.CREATED)
        self.assertTrue(hub.get_attendant_id(self.hub_id), self.attendant_id)
Exemple #19
0
    def __init__(self, _engine):
        super(Scene, self).__init__()

        if not Scene.image:
            Scene.image = loadImage('data/gfx/background.png')

        self._engine = _engine
        self._resx, self._resy = _engine.getResolution()
        self._background = pygame.transform.smoothscale(
            self.image, (self._resx, self._resy))
        self.surface = self._background.copy()
        drawText(self.surface, "Poziom %d..." % self._engine.game['level'], 48,
                 (255, 255, 255), (self._resx / 2, self._resy / 2))
        self._engine.show(self.surface)
        self._hub = Hub(_engine)
        self.planets = pygame.sprite.Group()
        self.rocket = pygame.sprite.GroupSingle()
        self.stars = pygame.sprite.Group()
        self.canisters = pygame.sprite.Group()
        self._first = True
Exemple #20
0
	def __init__(self, _engine):
		super(Scene, self).__init__()
		self._ais = []
		self._engine = _engine
		self._resx, self._resy = _engine.getResolution()
		self.surface = pygame.Surface((self._resx, self._resy))
		drawText(self.surface, "Wczytywanie mapy...", 48, (255, 255, 255), (self._resx / 2, self._resy / 2))
		self._map = Map(_engine)
		self._hub = Hub(_engine, self._map)
		self._cursor = Cursor(_engine, self._map)
		self._ais.append(AI(_engine, self._map, _engine.players[0], 0))
		self._ais.append(AI(_engine, self._map, _engine.players[1], 1))
def run_game():
    """ Main game structure that runs the whole program """
    # Initialize pygame
    pygame.init()

    # Set up the hub
    hub = Hub()
    pygame.display.set_caption(hub.WINDOW_TITLE)
    pygame.display.set_icon(hub.WINDOW_ICON)

    while True:
        """ Game Loop, as long as this is true the game will run. """
        # Clear Screen
        hub.main_screen.fill(hub.BG_COLOR)

        # Decide what screen to display
        hub.display_screen()

        # Display the screen onto the window
        pygame.display.flip()
        dt = hub.CLOCK.tick(hub.FRAMERATE)
        hub.speed = 1 / float(dt)
Exemple #22
0
def call_server_endpoint():
    """
    Gateway for mobile notifications as initiated by a table hub.
    :return: JSON response body and status code.
    """
    request_body = flask.request.get_json()

    # Restaurant ID and Table ID must be specified in request body.
    if 'restaurant_id' in request_body:
        restaurant_id = request_body['restaurant_id']
    else:
        request_body.update({'error': 'Restaurant ID is not specified.'})
        return jsonify(request_body), BAD_REQUEST
    if 'table_id' in request_body:
        table_id = request_body['table_id']
    else:
        request_body.update({'error': 'Table ID is not specified.'})
        return jsonify(request_body), BAD_REQUEST

    hub = Hub(restaurant_id=restaurant_id)
    user = User('')

    # Get current attendant user from hub information.
    attendant_id = hub.get_attendant_id(hub_id=table_id)
    attendant_app_id = user.get_app_id(attendant_id)

    # Get table name for message body.
    table_name = hub.get_table_name(hub_id=table_id)

    # Trigger notification to attendant.
    success = hub.trigger_notification(attendant_app_id=attendant_app_id,
                                       table_name=table_name)
    if success:
        request_body.update({'message': 'Notification Successful.'})
        return jsonify(request_body), OK
    else:
        request_body.update({'error': 'Could not send Notification.'})
        return jsonify(request_body), SERVER_ERROR
Exemple #23
0
    def __init__(self,
                 num_sensors,
                 num_actions,
                 show=True,
                 agent_name='test_agent'):
        """
        Configure the Agent

        num_sensors and num_actions are the only absolutely necessary
        arguments. They define the number of elements in the 
        sensors and actions arrays that the agent and the world use to
        communicate with each other. 
        """
        self.BACKUP_PERIOD = 10**4
        self.show = show
        self.name = agent_name
        self.pickle_filename = "log/" + agent_name + ".pickle"
        # TODO: Automatically adapt to the number of sensors pass in
        self.num_sensors = num_sensors
        self.num_actions = num_actions

        # Initialize agent infrastructure
        self.num_blocks = 1
        first_block_name = ''.join(('block_', str(self.num_blocks - 1)))
        self.blocks = [
            Block(self.num_actions + self.num_sensors, name=first_block_name)
        ]
        self.hub = Hub(self.blocks[0].max_cables)
        self.action = np.zeros((self.num_actions, 1))
        self.cumulative_reward = 0
        self.time_since_reward_log = 0
        self.reward_history = []
        self.reward_steps = []
        self.surprise_history = []
        self.recent_surprise_history = [0.] * 100
        self.timestep = 0
        self.graphing = True
Exemple #24
0
 def init(self, hubs_data):
     for data in hubs_data:
         data = json.loads(data.__repr__())
         # print(data)
         # print(data["location"])
         # print(data["sequence"])
         # sys.stdout.flush()
         self.hubs.append(Hub(id, data["location"]))
         for l in json.loads(data["sequence"]):
             # print("l: ", l)
             # sys.stdout.flush()
             object = l
             self.hubs[-1].addLayer(
                 SequenceLayer(object["user_id"], object["sound_id"],
                               object["rhythm"]))
Exemple #25
0
	def __init__(self):
		self.device = None
		for device in Hub.enumerate(vid=0x067B, pid=0x2303):
			device.connect()
			self.device = device
			break
		assert self.device != None
		
		self.device.controlWrite(requestType=0x40, request=0x01, value=0x0002, index=0x0044) #w(0x0002, 0x0044)
		
		baud = 9600
		setupBuf = struct.pack('<L', baud)
		setupBuf += '\0' # 1 stop bit
		setupBuf += '\0' # No parity
		setupBuf += '\x08' # Unk
		self.device.controlWrite(request=0x20, requestType=0x21, buf=setupBuf)
Exemple #26
0
    def init(self, hub_files):
        id = 0
        for f in hub_files:
            with open(f, 'r') as data:
                location = f.replace(".hub", "")
                id += 1
                self.hubs.append(Hub(id, location))

                for l in data:
                    object = json.loads(l)
                    self.hubs[-1].addLayer(
                        SequenceLayer(object["user_id"], object["sound_id"],
                                      object["rhythm"]))

        for h in self.hubs:
            print(h.getLocation())
            print(h.getHubObject())
Exemple #27
0
	def __init__(self, _engine):
		super(Scene, self).__init__()

		if not Scene.image:
			Scene.image = loadImage('data/gfx/background.png')

		self._engine = _engine
		self._resx, self._resy = _engine.getResolution()
		self._background = pygame.transform.smoothscale(self.image, (self._resx, self._resy))
		self.surface = self._background.copy()
		drawText(self.surface, "Poziom %d..." % self._engine.game['level'], 48, (255, 255, 255), (self._resx / 2, self._resy / 2))
		self._engine.show(self.surface)
		self._hub = Hub(_engine)
		self.planets = pygame.sprite.Group()
		self.rocket = pygame.sprite.GroupSingle()
		self.stars = pygame.sprite.Group()
		self.canisters = pygame.sprite.Group()
		self._first = True
Exemple #28
0
    ref = f'{org}/{project}/{peer}'
else:
    ref = None

# single hub virtual network with gateway, firewall, DMZ and shared services
hub = Hub(
    'hub',  # stem of child resource names (<4 chars)
    HubProps(
        resource_group_name=resource_group_name,
        tags=default_tags,
        stack=stack,
        dmz_ar=config.require('firewall_dmz_subnet'),
        fwm_ar=config.get('firewall_management_subnet'),
        fws_ar=config.require('firewall_subnet'),
        fwz_as=config.require('firewall_address_space'),
        gws_ar=config.require('hub_gateway_subnet'),
        hbs_ar=config.get('hub_bastion_subnet'),
        hub_ar=config.require('hub_first_subnet'),
        hub_as=config.require('hub_address_space'),
        peer=peer,
        ref=ref,
        subnets=[  # extra columns for future NSGs
            ('domain', 'any', 'any'),
            ('files', 'any', 'none'),
        ],
    ),
)

# multiple spoke virtual networks for application environments
spoke1 = Spoke(
    's01',  # stem of child resource names (<6 chars)
    SpokeProps(
Exemple #29
0
# set required vdc variables before calling function
vdc.tags = config.default_tags
# all resources will be created in configuration location
resource_group_name = vdc.resource_group(config.stack)

# single hub with gateways, firewall, DMZ, shared services, bastion (optional)
hub = Hub(
    'hub',  # stem of child resource names (<4 chars)
    HubProps(
        azure_bastion=config.azure_bastion,
        forced_tunnel=config.forced_tunnel,
        firewall_address_space=config.firewall_address_space,
        hub_address_space=config.hub_address_space,
        peer=config.peer,
        reference=config.reference,
        resource_group_name=resource_group_name,
        stack=config.stack,
        subnets=[  # extra columns for future ASGs
            ('domain', 'any', 'any'),
            ('files', 'any', 'none'),
        ],
        tags=config.default_tags,
    ),
)

# multiple spokes for application environments with bastion access (optional)
spoke1 = Spoke(
    's01',  # stem of child resource names (<6 chars)
    SpokeProps(
        azure_bastion=config.azure_bastion,
        fw_rt_name=hub.fw_rt_name,
Exemple #30
0
    auto_unregister = True
    for opt, val in opts:
        if opt in ('-h', '--help'):
            usage()

        if opt == "--disable-unregister":
            auto_unregister = False

    if len(args) != 1:
        usage("incorrect number of arguments")
    instance_id = args[0]

    apikey = os.getenv('HUB_APIKEY', None)
    if not apikey:
        fatal("HUB_APIKEY not specified in environment")

    hub = Hub(apikey)

    try:
        server = hub.servers.get(instance_id)[0]
        server.destroy(auto_unregister=auto_unregister)
    except hub.Error, e:
        fatal(e.description)

    print fmt_server_header()
    print fmt_server(server)

if __name__ == "__main__":
    main()

 def generateHubs(self):
     hub = Hub((50, 50))
     for row in range(self.gridsize):
Exemple #32
0
loop = ZMQIOLoop()
loop.install()

settings = {
    'xsrf_cookies': False,
    'debug': True,
    'autoreload': True,
    'websocket_ping_interval': 60  # 定时发送ping, 保持心跳
}

app = tornado.web.Application([
    (r'/ws', PushWebSocket),
], **settings)

if __name__ == '__main__':
    port = '8000'
    remote = '127.0.0.1:5560'  # MPC 消息发布中心 发布 地址

    opts, argvs = getopt.getopt(sys.argv[1:], 'r:p:')
    for op, value in opts:
        if op == '-r':
            remote = value
        if op == '-p':
            port = int(value)

    Hub(*remote.split(':'))

    app.listen(port)

    loop.start()
import config
from hub import Hub

hub = Hub(
    count=-1,
    battery=100,
)

from internet import Internet
internet = hub.add(Internet)

from counter import Counter
hub.add(Counter)

if config.BATT:
    from components.battery import Battery
    hub.add(Battery)

hub.run()
Exemple #34
0
 def __init__(self, spec, config_path):
     self.spec = spec
     self.config_path = config_path
     self.hubs = [Hub(self, hub_spec) for hub_spec in self.spec["hubs"]]
     self.support = self.spec.get("support", {})
Exemple #35
0
class Agent(object):
    """ 
    A general reinforcement learning agent
    
    Takes in a time series of sensory input vectors and 
    a scalar reward and puts out a time series of action commands."""
    def __init__(self, num_sensors, num_actions, show=True, 
                 agent_name='test_agent'):
        """
        Configure the Agent

        num_sensors and num_actions are the only absolutely necessary
        arguments. They define the number of elements in the 
        sensors and actions arrays that the agent and the world use to
        communicate with each other. 
        """
        self.BACKUP_PERIOD = 10 ** 4
        self.show = show
        self.name = agent_name
        self.pickle_filename ="log/" + agent_name + ".pickle"
        # TODO: Automatically adapt to the number of sensors pass in
        self.num_sensors = num_sensors
        self.num_actions = num_actions

        # Initialize agent infrastructure
        self.num_blocks =  1
        first_block_name = ''.join(('block_', str(self.num_blocks - 1)))
        self.blocks = [Block(self.num_actions + self.num_sensors, 
                             name=first_block_name)]
        self.hub = Hub(self.blocks[0].max_cables)
        self.action = np.zeros((self.num_actions,1))
        self.cumulative_reward = 0
        self.time_since_reward_log = 0 
        self.reward_history = []
        self.reward_steps = []
        self.surprise_history = []
        self.recent_surprise_history = [0.] * 100
        self.timestep = 0
        self.graphing = True

    def step(self, sensors, reward):
        """ Step through one time interval of the agent's operation """
        self.timestep += 1
        if sensors.ndim == 1:
            sensors = sensors[:,np.newaxis]
        self.reward = reward
        # Propogate the new sensor inputs up through the blocks
        cable_activities = np.vstack((self.action, sensors))
        for block in self.blocks:
            cable_activities = block.step_up(cable_activities) 
        # Create a new block if the top block has had enough bundles assigned
        block_bundles_full = (float(block.bundles_created()) / 
                              float(block.max_bundles))
        block_initialization_threshold = .5
        if block_bundles_full > block_initialization_threshold:
            self.num_blocks +=  1
            next_block_name = ''.join(('block_', str(self.num_blocks - 1)))
            self.blocks.append(Block(self.num_actions + self.num_sensors,
                                     name=next_block_name, 
                                     level=self.num_blocks))
            cable_activities = self.blocks[-1].step_up(cable_activities) 
            self.hub.add_cables(self.blocks[-1].max_cables)
            print "Added block", self.num_blocks - 1

        self.hub.step(self.blocks, self.reward) 
        
        # Propogate the deliberation_goal_votes down through the blocks
        # debug
        agent_surprise = 0.0
        cable_goals = np.zeros((cable_activities.size,1))
       
        for block in reversed(self.blocks):
            cable_goals = block.step_down(cable_goals)
            if np.nonzero(block.surprise)[0].size > 0:
                agent_surprise = np.sum(block.surprise)
        self.recent_surprise_history.pop(0)
        self.recent_surprise_history.append(agent_surprise)
        self.typical_surprise = np.median(np.array(
                self.recent_surprise_history))
        mod_surprise = agent_surprise - self.typical_surprise
        self.surprise_history.append(mod_surprise)

        # Strip the actions off the deliberation_goal_votes to make 
        # the current set of actions.
        # For actions, each goal is a probability threshold. If a roll of
        # dice comes up lower than the goal value, the action is taken
        # with a magnitude of 1.
        self.action = cable_goals[:self.num_actions,:] 
        if (self.timestep % self.BACKUP_PERIOD) == 0:
                self._save()    
        # Log reward
        self.cumulative_reward += reward
        self.time_since_reward_log += 1
        # debug
        if np.random.random_sample() < 0.001:
            self.visualize()
        return self.action

    def get_index_projections(self, to_screen=False):
        """
        Get representations of all the bundles in each block 
        
        Every feature is projected down through its own block and
        the blocks below it until its cable_contributions on sensor inputs 
        and actions is obtained. This is a way to represent the
        receptive field of each feature.

        Returns a list containing the cable_contributions for each feature 
        in each block.
        """
        all_projections = []
        all_bundle_activities = []
        for block_index in range(len(self.blocks)):
            block_projections = []
            block_bundle_activities = []
            num_bundles = self.blocks[block_index].max_bundles
            for bundle_index in range(num_bundles):    
                bundles = np.zeros((num_bundles, 1))
                bundles[bundle_index, 0] = 1.
                cable_contributions = self._get_index_projection(
                        block_index,bundles)
                if np.nonzero(cable_contributions)[0].size > 0:
                    block_projections.append(cable_contributions)
                    block_bundle_activities.append(self.blocks[block_index].
                            bundle_activities[bundle_index])
                    # Display the cable_contributions in text form if desired
                    if to_screen:
                        print 'cable_contributions', \
                            self.blocks[block_index].name, \
                            'feature', bundle_index
                        for i in range(cable_contributions.shape[1]):
                            print np.nonzero(cable_contributions)[0][
                                    np.where(np.nonzero(
                                    cable_contributions)[1] == i)]
            if len(block_projections) > 0:
                all_projections.append(block_projections)
                all_bundle_activities.append(block_bundle_activities)
        return (all_projections, all_bundle_activities)
  
    def _get_index_projection(self, block_index, bundles):
        """
        Get the cable_contributions for bundles
        
        Recursively project bundles down through blocks
        until the bottom block is reached. Feature values is a 
        two-dimensional array and can contain
        several columns. Each column represents a state, and their
        order represents a temporal progression. During cable_contributions
        to the next lowest block, the number of states
        increases by one. 
        
        Return the cable_contributions in terms of basic sensor 
        inputs and actions. 
        """
        if block_index == -1:
            return bundles
        cable_contributions = np.zeros((self.blocks[block_index].max_cables, 
                               bundles.shape[1] + 1))
        for bundle_index in range(bundles.shape[0]):
            for time_index in range(bundles.shape[1]):
                if bundles[bundle_index, time_index] > 0:
                    new_contribution = self.blocks[
                            block_index].get_index_projection(bundle_index)
                    cable_contributions[:,time_index:time_index + 2] = (
                            np.maximum(
                            cable_contributions[:,time_index:time_index + 2], 
                            new_contribution))
        cable_contributions = self._get_index_projection(block_index - 1, 
                                                   cable_contributions)
        return cable_contributions

    def visualize(self):
        """ Show the current state and some history of the agent """
        print ' '.join([self.name, 'is', str(self.timestep), 'time steps old'])
        self.reward_history.append(float(self.cumulative_reward) / 
                                   (self.time_since_reward_log + 1))
        self.cumulative_reward = 0    
        self.time_since_reward_log = 0
        self.reward_steps.append(self.timestep)
        self._show_reward_history()
        for block in self.blocks:
            block.visualize()
            pass
        return
 
    def report_performance(self):
        """ Report on the reward amassed by the agent """
        performance = np.mean(self.reward_history)
        print("Final performance is %f" % performance)
        self._show_reward_history(hold_plot=self.show)
        return performance
    
    def _show_reward_history(self, hold_plot=False, 
                            filename='log/reward_history.png'):
        """ Show the agent's reward history and save it to a file """
        if self.graphing:
            fig = plt.figure(1)
            plt.plot(self.reward_steps, self.reward_history)
            plt.xlabel("time step")
            plt.ylabel("average reward")
            plt.title(''.join(('Reward history for ', self.name)))
            fig.show()
            fig.canvas.draw()
            plt.savefig(filename, format='png')
            if hold_plot:
                plt.show()
        return
    
    def _save(self):
        """ Archive a copy of the agent object for future use """
        success = False
        make_backup = True
        print "Attempting to save agent..."
        try:
            with open(self.pickle_filename, 'wb') as agent_data:
                pickle.dump(self, agent_data)
            if make_backup:
                with open(''.join((self.pickle_filename, '.bak')), 
                          'wb') as agent_data_bak:
                    pickle.dump(self, agent_data_bak)
            print("Agent data saved at " + str(self.timestep) + " time steps")
        except IOError as err:
            print("File error: " + str(err) + 
                  " encountered while saving agent data")
        except pickle.PickleError as perr: 
            print("Pickling error: " + str(perr) + 
                  " encountered while saving agent data")        
        else:
            success = True
        return success
        
    def restore(self):
        """ Reconstitute the agent from a previously saved agent """
        restored_agent = self
        try:
            with open(self.pickle_filename, 'rb') as agent_data:
                loaded_agent = pickle.load(agent_data)

            # Compare the number of channels in the restored agent with 
            # those in the already initialized agent. If it matches, 
            # accept the agent. If it doesn't,
            # print a message, and keep the just-initialized agent.
            if((loaded_agent.num_sensors == self.num_sensors) and 
               (loaded_agent.num_actions == self.num_actions)):
                print(''.join(('Agent restored at timestep ', 
                               str(loaded_agent.timestep),
                               ' from ', self.pickle_filename)))
                restored_agent = loaded_agent
            else:
                print("The agent " + self.pickle_filename + " does not have " +
                      "the same number of input and output elements as " + 
                      "the world.")
                print("Creating a new agent from scratch.")
        except IOError:
            print("Couldn't open %s for loading" % self.pickle_filename)
        except pickle.PickleError, e:
            print("Error unpickling world: %s" % e)
        return restored_agent
Exemple #36
0
from util.primitives.funcs import do
from util.primitives.mapping import Storage

import gui

from gui.uberwidgets.panelframe import PanelFrame
from gui.buddylist.accountlist import AccountList
from gui.uberwidgets.connectionlist import ConnectionsPanel
from gui.buddylist.accounttray import AccountTrayIcon
from gui.native import memory_event
from common import profile, bind
from gui.toolbox import AddInOrder, calllimit


from hub import Hub
hub = Hub.getInstance()
from gui.toolbox import saveWindowPos
from gui.toolbox import Monitor
from gui.statuscombo import StatusCombo
from common import pref
from cgui import SimplePanel

# keys which are ignored for starting searches.
_function_keys = [getattr(wx, 'WXK_F' + str(i)) for i in xrange(1, 13)]

platform_disallowed_keys = []
if config.platform == 'win':
    platform_disallowed_keys.extend([wx.WXK_WINDOWS_LEFT, wx.WXK_WINDOWS_RIGHT])

disallowed_search_keys = frozenset([wx.WXK_ESCAPE, wx.WXK_MENU, wx.WXK_TAB,
    wx.WXK_BACK] + platform_disallowed_keys + _function_keys)
Exemple #37
0
from util.primitives.error_handling import traceguard, try_this
from util.primitives.funcs import do
from util.primitives.mapping import Storage

import gui

from gui.uberwidgets.panelframe import PanelFrame
from gui.buddylist.accountlist import AccountList
from gui.uberwidgets.connectionlist import ConnectionsPanel
from gui.buddylist.accounttray import AccountTrayIcon
from gui.native import memory_event
from common import profile, bind
from gui.toolbox import AddInOrder, calllimit

from hub import Hub
hub = Hub.getInstance()
from gui.toolbox import saveWindowPos
from gui.toolbox import Monitor
from gui.statuscombo import StatusCombo
from common import pref
from cgui import SimplePanel

# keys which are ignored for starting searches.
_function_keys = [getattr(wx, 'WXK_F' + str(i)) for i in xrange(1, 13)]

platform_disallowed_keys = []
if config.platform == 'win':
    platform_disallowed_keys.extend(
        [wx.WXK_WINDOWS_LEFT, wx.WXK_WINDOWS_RIGHT])

disallowed_search_keys = frozenset(
Exemple #38
0
class Agent(object):
    """ 
    A general reinforcement learning agent
    
    Takes in a time series of sensory input vectors and 
    a scalar reward and puts out a time series of action commands."""
    def __init__(self,
                 num_sensors,
                 num_actions,
                 show=True,
                 agent_name='test_agent'):
        """
        Configure the Agent

        num_sensors and num_actions are the only absolutely necessary
        arguments. They define the number of elements in the 
        sensors and actions arrays that the agent and the world use to
        communicate with each other. 
        """
        self.BACKUP_PERIOD = 10**4
        self.show = show
        self.name = agent_name
        self.pickle_filename = "log/" + agent_name + ".pickle"
        # TODO: Automatically adapt to the number of sensors pass in
        self.num_sensors = num_sensors
        self.num_actions = num_actions

        # Initialize agent infrastructure
        self.num_blocks = 1
        first_block_name = ''.join(('block_', str(self.num_blocks - 1)))
        self.blocks = [
            Block(self.num_actions + self.num_sensors, name=first_block_name)
        ]
        self.hub = Hub(self.blocks[0].max_cables)
        self.action = np.zeros((self.num_actions, 1))
        self.cumulative_reward = 0
        self.time_since_reward_log = 0
        self.reward_history = []
        self.reward_steps = []
        self.surprise_history = []
        self.recent_surprise_history = [0.] * 100
        self.timestep = 0
        self.graphing = True

    def step(self, sensors, reward):
        """ Step through one time interval of the agent's operation """
        self.timestep += 1
        if sensors.ndim == 1:
            sensors = sensors[:, np.newaxis]
        self.reward = reward
        # Propogate the new sensor inputs up through the blocks
        cable_activities = np.vstack((self.action, sensors))
        for block in self.blocks:
            cable_activities = block.step_up(cable_activities)
        # Create a new block if the top block has had enough bundles assigned
        block_bundles_full = (float(block.bundles_created()) /
                              float(block.max_bundles))
        block_initialization_threshold = .5
        if block_bundles_full > block_initialization_threshold:
            self.num_blocks += 1
            next_block_name = ''.join(('block_', str(self.num_blocks - 1)))
            self.blocks.append(
                Block(self.num_actions + self.num_sensors,
                      name=next_block_name,
                      level=self.num_blocks))
            cable_activities = self.blocks[-1].step_up(cable_activities)
            self.hub.add_cables(self.blocks[-1].max_cables)
            print "Added block", self.num_blocks - 1

        self.hub.step(self.blocks, self.reward)

        # Propogate the deliberation_goal_votes down through the blocks
        # debug
        agent_surprise = 0.0
        cable_goals = np.zeros((cable_activities.size, 1))

        for block in reversed(self.blocks):
            cable_goals = block.step_down(cable_goals)
            if np.nonzero(block.surprise)[0].size > 0:
                agent_surprise = np.sum(block.surprise)
        self.recent_surprise_history.pop(0)
        self.recent_surprise_history.append(agent_surprise)
        self.typical_surprise = np.median(
            np.array(self.recent_surprise_history))
        mod_surprise = agent_surprise - self.typical_surprise
        self.surprise_history.append(mod_surprise)

        # Strip the actions off the deliberation_goal_votes to make
        # the current set of actions.
        # For actions, each goal is a probability threshold. If a roll of
        # dice comes up lower than the goal value, the action is taken
        # with a magnitude of 1.
        self.action = cable_goals[:self.num_actions, :]
        if (self.timestep % self.BACKUP_PERIOD) == 0:
            self._save()
        # Log reward
        self.cumulative_reward += reward
        self.time_since_reward_log += 1
        # debug
        if np.random.random_sample() < 0.001:
            self.visualize()
        return self.action

    def get_index_projections(self, to_screen=False):
        """
        Get representations of all the bundles in each block 
        
        Every feature is projected down through its own block and
        the blocks below it until its cable_contributions on sensor inputs 
        and actions is obtained. This is a way to represent the
        receptive field of each feature.

        Returns a list containing the cable_contributions for each feature 
        in each block.
        """
        all_projections = []
        all_bundle_activities = []
        for block_index in range(len(self.blocks)):
            block_projections = []
            block_bundle_activities = []
            num_bundles = self.blocks[block_index].max_bundles
            for bundle_index in range(num_bundles):
                bundles = np.zeros((num_bundles, 1))
                bundles[bundle_index, 0] = 1.
                cable_contributions = self._get_index_projection(
                    block_index, bundles)
                if np.nonzero(cable_contributions)[0].size > 0:
                    block_projections.append(cable_contributions)
                    block_bundle_activities.append(
                        self.blocks[block_index].
                        bundle_activities[bundle_index])
                    # Display the cable_contributions in text form if desired
                    if to_screen:
                        print 'cable_contributions', \
                            self.blocks[block_index].name, \
                            'feature', bundle_index
                        for i in range(cable_contributions.shape[1]):
                            print np.nonzero(cable_contributions)[0][np.where(
                                np.nonzero(cable_contributions)[1] == i)]
            if len(block_projections) > 0:
                all_projections.append(block_projections)
                all_bundle_activities.append(block_bundle_activities)
        return (all_projections, all_bundle_activities)

    def _get_index_projection(self, block_index, bundles):
        """
        Get the cable_contributions for bundles
        
        Recursively project bundles down through blocks
        until the bottom block is reached. Feature values is a 
        two-dimensional array and can contain
        several columns. Each column represents a state, and their
        order represents a temporal progression. During cable_contributions
        to the next lowest block, the number of states
        increases by one. 
        
        Return the cable_contributions in terms of basic sensor 
        inputs and actions. 
        """
        if block_index == -1:
            return bundles
        cable_contributions = np.zeros(
            (self.blocks[block_index].max_cables, bundles.shape[1] + 1))
        for bundle_index in range(bundles.shape[0]):
            for time_index in range(bundles.shape[1]):
                if bundles[bundle_index, time_index] > 0:
                    new_contribution = self.blocks[
                        block_index].get_index_projection(bundle_index)
                    cable_contributions[:, time_index:time_index + 2] = (
                        np.maximum(
                            cable_contributions[:, time_index:time_index + 2],
                            new_contribution))
        cable_contributions = self._get_index_projection(
            block_index - 1, cable_contributions)
        return cable_contributions

    def visualize(self):
        """ Show the current state and some history of the agent """
        print ' '.join([self.name, 'is', str(self.timestep), 'time steps old'])
        self.reward_history.append(
            float(self.cumulative_reward) / (self.time_since_reward_log + 1))
        self.cumulative_reward = 0
        self.time_since_reward_log = 0
        self.reward_steps.append(self.timestep)
        self._show_reward_history()
        for block in self.blocks:
            block.visualize()
            pass
        return

    def report_performance(self):
        """ Report on the reward amassed by the agent """
        performance = np.mean(self.reward_history)
        print("Final performance is %f" % performance)
        self._show_reward_history(hold_plot=self.show)
        return performance

    def _show_reward_history(self,
                             hold_plot=False,
                             filename='log/reward_history.png'):
        """ Show the agent's reward history and save it to a file """
        if self.graphing:
            fig = plt.figure(1)
            plt.plot(self.reward_steps, self.reward_history)
            plt.xlabel("time step")
            plt.ylabel("average reward")
            plt.title(''.join(('Reward history for ', self.name)))
            fig.show()
            fig.canvas.draw()
            plt.savefig(filename, format='png')
            if hold_plot:
                plt.show()
        return

    def _save(self):
        """ Archive a copy of the agent object for future use """
        success = False
        make_backup = True
        print "Attempting to save agent..."
        try:
            with open(self.pickle_filename, 'wb') as agent_data:
                pickle.dump(self, agent_data)
            if make_backup:
                with open(''.join((self.pickle_filename, '.bak')),
                          'wb') as agent_data_bak:
                    pickle.dump(self, agent_data_bak)
            print("Agent data saved at " + str(self.timestep) + " time steps")
        except IOError as err:
            print("File error: " + str(err) +
                  " encountered while saving agent data")
        except pickle.PickleError as perr:
            print("Pickling error: " + str(perr) +
                  " encountered while saving agent data")
        else:
            success = True
        return success

    def restore(self):
        """ Reconstitute the agent from a previously saved agent """
        restored_agent = self
        try:
            with open(self.pickle_filename, 'rb') as agent_data:
                loaded_agent = pickle.load(agent_data)

            # Compare the number of channels in the restored agent with
            # those in the already initialized agent. If it matches,
            # accept the agent. If it doesn't,
            # print a message, and keep the just-initialized agent.
            if ((loaded_agent.num_sensors == self.num_sensors)
                    and (loaded_agent.num_actions == self.num_actions)):
                print(''.join(
                    ('Agent restored at timestep ', str(loaded_agent.timestep),
                     ' from ', self.pickle_filename)))
                restored_agent = loaded_agent
            else:
                print("The agent " + self.pickle_filename + " does not have " +
                      "the same number of input and output elements as " +
                      "the world.")
                print("Creating a new agent from scratch.")
        except IOError:
            print("Couldn't open %s for loading" % self.pickle_filename)
        except pickle.PickleError, e:
            print("Error unpickling world: %s" % e)
        return restored_agent
import config
from hub import Hub

hub = Hub(
    solar_temp = None,
    tank_temp = None,
    tank_target_temp = config.TANK_TARGET_TEMP,
    pump = False,
    mode = 'auto',
)

from internet import Internet
internet = hub.add(Internet)

from temperature import Temperature
hub.add(Temperature)

from display import Display
hub.add(Display, priority=True)

from controller import Controller
hub.add(Controller, priority=True)

from pump import Pump
hub.add(Pump, priority=True)

from components.retain import Retain
hub.add(Retain)

hub.run()
Exemple #40
0
class Scene(object):
	image = None
	def __init__(self, _engine):
		super(Scene, self).__init__()

		if not Scene.image:
			Scene.image = loadImage('data/gfx/background.png')

		self._engine = _engine
		self._resx, self._resy = _engine.getResolution()
		self._background = pygame.transform.smoothscale(self.image, (self._resx, self._resy))
		self.surface = self._background.copy()
		drawText(self.surface, "Poziom %d..." % self._engine.game['level'], 48, (255, 255, 255), (self._resx / 2, self._resy / 2))
		self._engine.show(self.surface)
		self._hub = Hub(_engine)
		self.planets = pygame.sprite.Group()
		self.rocket = pygame.sprite.GroupSingle()
		self.stars = pygame.sprite.Group()
		self.canisters = pygame.sprite.Group()
		self._first = True

	def screenUpdated(self):
		self._resx, self._resy = self._engine.getResolution()
		self._background = pygame.transform.smoothscale(self.image, (self._resx, self._resy))
		self.surface = pygame.transform.scale(self.surface, (self._resx, self._resy))
		self._hub.screenUpdated()

	def show(self):
		if self._first:
			self.surface = self._background.copy()
			drawText(self.surface, "Poziom %d(0%%)..." % self._engine.game['level'], 48, (255, 255, 255), (self._resx / 2, self._resy / 2))
			self._engine.show(self.surface)
			count = 0
			planets = random.randint(1, min(self._engine.game['level'] * 2, 8))
			stars = random.randint(self._engine.game['level'], self._engine.game['level'] * 5)
			canisters = random.randint(0, self._engine.game['level'])
			whole = (planets + stars + canisters + 1) * 0.01
			try:
				for _ in xrange(planets):
					self.planets.add(Planet(random.randint(planet.MIN_MASS, planet.MAX_MASS), randomPlace([p for p in self.planets], 100, (self._resx, self._resy))))
					count += 1
					self.surface = self._background.copy()
					drawText(self.surface, "Poziom %d(%d%%)..." % (self._engine.game['level'], count / whole), 48, (255, 255, 255), (self._resx / 2, self._resy / 2))
					self._engine.show(self.surface)

			except NoMore:
				pass

			try:
				for _ in xrange(stars):
					self.stars.add(Star(randomPlace([p for p in self.planets] + [s for s in self.stars], 16, (self._resx, self._resy))))
					count += 1
					self.surface = self._background.copy()
					drawText(self.surface, "Poziom %d(%d%%)..." % (self._engine.game['level'], count / whole), 48, (255, 255, 255), (self._resx / 2, self._resy / 2))
					self._engine.show(self.surface)

			except NoMore:
				pass

			try:
				for _ in xrange(canisters):
					self.canisters.add(Canister(randomPlace([p for p in self.planets] + [s for s in self.stars] + [c for c in self.canisters], 16, (self._resx, self._resy))))
					count += 1
					self.surface = self._background.copy()
					drawText(self.surface, "Poziom %d(%d%%)..." % (self._engine.game['level'], count / whole), 48, (255, 255, 255), (self._resx / 2, self._resy / 2))
					self._engine.show(self.surface)

			except NoMore:
				pass

			self.rocket.add(Rocket(self._engine, self, randomPlace([p for p in self.planets], 48, (self._resx, self._resy))))
			count += 1
			self.surface = self._background.copy()
			drawText(self.surface, "Poziom %d(%d%%)..." % (self._engine.game['level'], count / whole), 48, (255, 255, 255), (self._resx / 2, self._resy / 2))
			self._engine.show(self.surface)
			self._first = False

		try:
			while self._engine.tick():
				for event in pygame.event.get():
					if event.type == QUIT:
						self._engine.quit()

					if event.type == KEYUP and event.key == K_ESCAPE:
						raise Pause()

					if event.type == KEYUP and event.key == K_q:
						if self._engine.game['godmode']:
							self._engine.game['godmode'] = False
							self._engine.game['godlevel'] = self._engine.game['level']

				if self._engine.state & engine.STATE_END:
					self.surface.fill((0, 0, 0))
					drawText(self.surface, "Gratulacje! Twoj wynik: %d!" % self._engine.game['score'], 50, (0, 140, 0), (self._resx / 2, self._resy / 2))
					self._engine.show(self.surface)
					continue

				if len(self.stars) == 0:
					raise NextLevel()

				if self._engine.game['life'] <= 0 or not self._engine.game['fuel']:
					raise GameEnd()

				self.surface = self._background.copy()
				self.planets.update()
				self.stars.update()
				self.canisters.update()
				self.rocket.update()
				self._hub.update()
				self.planets.draw(self.surface)
				self.stars.draw(self.surface)
				self.canisters.draw(self.surface)
				self.rocket.draw(self.surface)
				self._hub.draw(self.surface)
				self._engine.show(self.surface)

		except Pause:
			self._engine.state ^= engine.STATE_GAME | engine.STATE_MENU

		except GameEnd:
			self._engine.state |= engine.STATE_END

		except NextLevel:
			self._engine.nextLevel()
import config
from hub import Hub

hub = Hub(
    light=False,
    light_cmd=False,
    brightness=config.INIT_BRI,
    motion=False,
    enable=False,
    auto=False,
    battery=100,
)

config.RETAIN = set(('light', ))

if config.BRIGHTNESS:
    config.RETAIN.add('brightness')

if config.BATT:
    config.RETAIN.add('battery')

if config.MOTN:
    config.RETAIN.add('auto')
    config.RETAIN.add('enable')

if len(config.RETAIN) > 0:
    from components.retain import Retain
    hub.add(Retain)

from light import Light
hub.add(Light, priority=True)  # put light above retain
Exemple #42
0
class Scene(object):
	image = None
	def __init__(self, _engine):
		super(Scene, self).__init__()
		self._ais = []
		self._engine = _engine
		self._resx, self._resy = _engine.getResolution()
		self.surface = pygame.Surface((self._resx, self._resy))
		drawText(self.surface, "Wczytywanie mapy...", 48, (255, 255, 255), (self._resx / 2, self._resy / 2))
		self._map = Map(_engine)
		self._hub = Hub(_engine, self._map)
		self._cursor = Cursor(_engine, self._map)
		self._ais.append(AI(_engine, self._map, _engine.players[0], 0))
		self._ais.append(AI(_engine, self._map, _engine.players[1], 1))

	def screenUpdated(self):
		self._resx, self._resy = self._engine.getResolution()
		self.surface = pygame.transform.smoothscale(self.surface, (self._resx, self._resy))
		self._map.screenUpdated()
		self._hub.screenUpdated()
		self._cursor.screenUpdated()

	def show(self):
		try:
			while self._engine.tick():
				for event in pygame.event.get():
					if event.type == QUIT:
						self._engine.quit()

					elif event.type == KEYUP:
						if event.key == K_ESCAPE:
							raise Pause()

						elif event.key == K_TAB:
							for n, tank in enumerate(self._engine.players[0]['tanks']):
								if tank.focus:
									tank.setFocus(False)
									n = (n + 1) % len(self._engine.players[0]['tanks'])
									self._engine.players[0]['tanks'][n].setFocus(True)
									break

				if self._engine.state & engine.STATE_FAIL:
					self.surface.fill((0, 0, 0))
					drawText(self.surface, "Game Over", 50, (140, 0, 0), (self._resx / 2, self._resy / 2))
					self._engine.show(self.surface)
					continue

				if self._engine.state & engine.STATE_WIN:
					self.surface.fill((0, 0, 0))
					drawText(self.surface, "Gratulacje!", 50, (0, 140, 0), (self._resx / 2, self._resy / 2))
					self._engine.show(self.surface)
					continue

				if self._engine.timeLeft() == (0, 0):
					if self._engine.players[0]['score'] > self._engine.players[1]['score']:
						raise GameWin()

					raise GameOver()

				if not len(self._engine.players[0]['tanks']):
					raise GameOver()

				if not len(self._engine.players[1]['tanks']):
					raise GameWin()

				for tank in self._engine.players[0]['tanks']:
					if tank.focus: break

				else:
					self._engine.players[0]['tanks'][0].setFocus(True)

				keys = pygame.key.get_pressed()
				if keys[K_LEFT]:
					self._map.move(map.DIRECTION_LEFT)

				if keys[K_RIGHT]:
					self._map.move(map.DIRECTION_RIGHT)

				if keys[K_UP]:
					self._map.move(map.DIRECTION_UP)

				if keys[K_DOWN]:
					self._map.move(map.DIRECTION_DOWN)

				for ai in self._ais:
					ai.update()

				self._map.update()
				self._hub.update()
				self._cursor.update()
				self._map.draw(self.surface)
				self._hub.draw(self.surface)
				self._cursor.draw(self.surface)
				self._engine.show(self.surface)

		except Pause:
			self._engine.state ^= engine.STATE_GAME | engine.STATE_MENU

		except GameOver:
			self._engine.state |= engine.STATE_FAIL

		except GameWin:
			self._engine.state |= engine.STATE_WIN