示例#1
0
 def __init__(self, withGrapics=True):
     self.bestbest = 0
     self.done = False
     self.pool = Pool(Config.WIDTH, Config.HEIGHT, self.callback)
     self.graphics = Graphics()
     if withGrapics:
         self.graphics.init("PoolGame", Config.SIZE)
示例#2
0
    def __init__(self,
                 pool_size,
                 start_training,
                 country,
                 region=None,
                 covid_data=None,
                 statistic='Cases'):
        # The object containing all case, fatality, and movement data
        self.covid_data = covid_data
        if covid_data is None:
            self.covid_data = DataGenerator()

        # The country and possibly specific region to generate a model for
        self.country = country
        self.region = region

        # The number of models for each generation
        self.pool_size = pool_size
        # What metric to create a model for, either 'Cases' or 'Fatalities'
        self.statistic = statistic

        # The date when training takes data from through 2 weeks later
        self.start_training = start_training

        # The object which handles all the models in a generation
        self.pool = Pool(pool_size)

        # The actual data which the predictions will be compared to
        self.test_case = []

        self.generate_test_case()
示例#3
0
    def Predict(
            self,
            model,  # can be the model itself or model path.
            spectrograms,  # spectrograms that are generated from the given track.
            output_directory=None,
            track_name=None):
        ''' this function takes a series of spectrograms and then predict them in parallel then returns a concatenated spectrograms'''

        if type(model) == str:  # if model is a path not real object.
            if not os.path.exists(model):
                print('Model does not exist')
                return None
            model = tf.keras.models.load_model(model)

        # Prediction part.----------------------------------------------
        start_time = time.time()

        manager = Pool()
        outputs = manager.go(model.predict, spectrograms)
        outputs = [outputs[i][0] for i in range(len(spectrograms))]
        full_output_stem = reduce(lambda a, b: np.concatenate([a, b], axis=0),
                                  outputs)

        prediction_time = time.time() - start_time
        print(
            f'[LOG] prediction done. Prediction time: {prediction_time:.3f} seconds'
        )

        return full_output_stem
示例#4
0
文件: Role.py 项目: poorboy/openulteo
	def __init__(self, main_instance):
		
		AbstractRole.__init__(self, main_instance)
		self.virt_co = libvirt.open(Config.libvirt_uri)
		self.has_run = False
		self.queue = Queue.Queue()
		self.pool = Pool(Config.ulteo_pool_name,self.virt_co)
		self.virtual_machine = {}
		self.network = Network(Config.network_name, self.virt_co)
		self.webserver = None
示例#5
0
def make_player(use_block, result_data, board_size):
    mock_pool = Pool(len(result_data), Blocks())
    mock_pool.get_blocks = Mock()
    mock_pool.get_blocks.return_value = use_block

    mock_brain = Brain(Blocks())
    mock_brain.get_setting_info = Mock()
    mock_brain.get_setting_info.side_effect = \
        lambda *a,**k : (copy.deepcopy(deque(result_data)) , 0)
    return Player(mock_pool, Board(board_size), mock_brain, max=10)
示例#6
0
class Trainer():
    def __init__(self, withGrapics=True):
        self.bestbest = 0
        self.done = False
        self.pool = Pool(Config.WIDTH, Config.HEIGHT, self.callback)
        self.graphics = Graphics()
        if withGrapics:
            self.graphics.init("PoolGame", Config.SIZE)

    def callback(self, gen, avg_score, best_score, best_json):
        line = "Generation\t%d\tAverage Score\t%f\tBest Score\t%f" % (
            gen, avg_score, best_score)
        self.write_log(Config.JSON_FOLDER + "/" + "logfile.txt", line)
        filename = "brain-g%03d-%04d.json" % (gen, best_score * 1000)

        if best_score >= self.bestbest:
            if best_json != None:
                self.write_file(Config.JSON_FOLDER + "/" + filename, best_json)
            self.bestbest = best_score

        if gen == Config.GENERATIONS:
            self.done = True

    def write_log(self, filename, line):
        print(line)
        with open(filename, "a") as outfile:
            outfile.write(line + "\n")

    def write_file(self, filename, data):
        with open(filename, "w") as outfile:
            outfile.write(data + "\n")

    def run(self):
        # Loop until the user clicks the close button.
        while not self.done:

            self.done = self.graphics.queryQuit()

            # Set the screen background
            self.graphics.fill(Config.GREEN)
            self.graphics.print("Clock: {}".format(self.graphics.fps()))

            # Do physics
            self.pool.tick()

            # Draw everything
            self.pool.draw(self.graphics)

            # Update screen
            self.graphics.flip()

        # Exit
        self.graphics.quit()
示例#7
0
 def __init__(self):
     self.conv1 = Conv(3, 8, 1)
     # self.relu1 = Relu()
     self.pool1 = Pool()
     # self.conv2 = Conv(3, 16, 8)
     # self.relu2 = Relu()
     # self.pool2 = Pool()
     # self.conv3 = Conv(3, 24, 16)
     # self.pool3 = Pool()
     # self.conv4 = Conv(3, 30, 24)
     # self.pool4 = Pool()
     self.fcl1  = FCL(24 * 24 * 8, 10)
     self.soft  = Soft()
示例#8
0
	def group(self):
		pool = Pool()
		self.week_number = pool.getTimeStamp()
		self.maxTriad = pool.length()/3
		for i in range(pool.length()):
			user = pool.pull()
			t = self.checkTriads(user)
			if t == -1:
				self.createNewTriad(user)
				#print "Triad_list: %s" % self.triad_list[0].p1
			else:
				self.insertTriad(t, user)
				#print "Triad_list: %s" % self.triad_list[0].p1
		cursor.close()
示例#9
0
 def __init__(self):
     self.conv1 = Conv(3, 8, 1)
     self.relu1 = Relu()
     self.pool1 = Pool()
     self.conv2 = Conv(3, 16, 8)
     self.relu2 = Relu()
     self.pool2 = Pool()
     #self.fcl1  = FCL(12 * 6 * 16, 120)
     #self.fcl2  = FCL(120, 80)
     #self.relu3 = Relu()
     #self.relu4 = Relu()
     #self.relu = Relu()
     #self.fcl1  = FCL(256, 256)
     #self.relu1 = Relu()
     self.fcl = Reg(6 * 6 * 16, 2)
     self.soft = Softmax()
示例#10
0
def main():
    if len(sys.argv) <= 1:
        print("Please provide more arguments")
        return

    pool = Pool()

    while True:
        score = pool.execute_trees()
        print(score)
        #for tree in pool.trees:
        #print(tree.lines())

        write_file(pool.best_tree.lines())
        if score == 0:
            break
        pool.alter_best_tree()

    code = pool.best_tree.lines()
    f = open(sys.argv[1], "w+")
    for line in code:
        f.write(line + "\n")
    f.close()
示例#11
0
def initializePool(env):
    pool = Pool(env)
    for i in range(0, Population):
        basic = Genome(pool)
        basic.basicGenome()
        pool.addToSpecies(basic)

    pool.initializeRun()
    return pool
示例#12
0
 def initScene(self):
     self.pool = Pool(self)
     # plight = PointLight('plight')
     # plight.setColor(VBase4(0.0, 0.8, 0.8, 1))
     # plnp = render.attachNewNode(plight)
     # plnp.setPos(0, 3, 1)
     # render.setLight(plnp)
     # self.model = self.loader.loadModel("panda")
     # self.model.reparentTo(self.mainNode)
     # # self.model.setScale(0.25,0.25,0.25)
     # self.model.setPos(-8,42,0)
     # self.camera.setPosHpr(0,0,2,0,0,0)
     # base.enableMouse()
     print "Simulator Initialized"
     RosManager()
示例#13
0
 def test_max_memory(self):
     worker = Pool()
     count_worker = worker.map(self.task, self.generate_data(30, 100000))[0]
     mem_for_worker = worker.map(self.task, self.generate_data(30, 100000))[1]
     self.assertLessEqual(count_worker * mem_for_worker, 500)
示例#14
0
 def test_memory(self):
     worker = Pool()
     self.assertLessEqual(30, int(worker.map(self.task, self.generate_data(30, 100000))[1]))
示例#15
0
 def test_count_worker_min(self):
     worker = Pool(15, 20, 512)
     self.assertRaisesRegexp(MemoryError,"The number of required workers is less than the minimum specified.", worker.map,self.task, self.generate_data(30, 100000))
示例#16
0
 def test_count_worker_max(self):
     worker = Pool(1, 2, 512)
     self.assertEqual(2, worker.map(self.task, self.generate_data(30, 100000))[0])
示例#17
0
    def __process_message(self, peer: Peer, command: bytes,
                          message: bytes) -> bool:
        """
        
        Processes the received message
        
        :param peer:
        :param command:
        :param message:
        :return: True, if connection should be closed
        """

        if command == Protocol.Flags.HELLO:
            logging.warning("HELLO message received again")
            peer.send(Protocol.hello_message())

        elif command == Protocol.Flags.LOGIN:
            logging.warning("User \"" + peer.name + "\" is already logged in")
            peer.send(
                Protocol.server_message(Protocol.ServerFlags.ACK,
                                        "You are already logged in"))

        elif command == Protocol.Flags.PING:
            logging.info("PING message received")
            peer.send(Protocol.pong_message())

        elif command == Protocol.Flags.PONG:
            logging.info("PONG message received")

        elif command == Protocol.Flags.EXIT:
            logging.info("EXIT message received, connection closed")
            # peer.send(Protocol.server_message(Protocol.ServerFlags.NORMAL, "See you later"))
            return True

        elif command == Protocol.Flags.LOGOUT:
            if not peer.logged_in:
                return False
            logging.info("LOGOUT message received from \"" + peer.name + "\"")
            peer.logged_in = False
            peer.name = None

        elif command == Protocol.Flags.JOIN:
            if not peer.logged_in:
                return False

            pool_name = message.split(bytes([Protocol.Flags.SEPARATOR
                                             ]))[0].decode()
            passwd = message.split(bytes([Protocol.Flags.SEPARATOR
                                          ]))[1].decode()
            hashed = str(
                hashpw(passwd.encode("utf-8"), b"$2a$12$" +
                       b"SZ4R4Z3G3SZ4DJ4LS0RT..")).split("..")[1][:-1]

            logging.info("JOIN from \"" + peer.name + "\" for pool \"" +
                         pool_name + "\"")

            pool_id = SQLModule.PoolsSQLModule.get_id(pool_name)
            if pool_id == -1 or hashed == SQLModule.PoolsSQLModule.get_hashed_pwd(
                    pool_name):
                if pool_name in self.__pools:
                    logging.debug("Pool already exists")
                    self.__pools[pool_name].add_peer(peer)
                else:
                    logging.debug("Pool not exists, creating")
                    self.__pools[pool_name] = Pool(pool_name)
                    self.__pools[pool_name].add_peer(peer)

                if pool_id == -1:
                    SQLModule.PoolsSQLModule.add_pool(pool_name, hashed)
                    logging.info("Pool created with name \"" + pool_name +
                                 "\"")
                    peer.send(
                        Protocol.server_message(
                            Protocol.ServerFlags.ACK,
                            "Pool created with name \"" + pool_name + "\""))

                pool_id = SQLModule.PoolsSQLModule.get_id(pool_name)
                peer_id = SQLModule.PeersSQLModule.get_id(peer.name)
                SQLModule.SwitchTable.add_peer_pool(peer_id, pool_id)
                logging.info("\"" + peer.name + "\" has joined \"" +
                             pool_name + "\" succesfully")
                peer.send(
                    Protocol.server_message(Protocol.ServerFlags.ACK,
                                            "Successful join"))

                peer.pool = self.__pools[pool_name]
                peer.pool.send_message(
                    Protocol.server_message(Protocol.ServerFlags.NORMAL,
                                            peer.name +
                                            " has joined the room"), peer)

            else:
                logging.warning("\"" + pool_name + "\" failed to log in")
                peer.send(
                    Protocol.server_message(Protocol.ServerFlags.NAK,
                                            "Wrong password for this pool"))

        elif command == Protocol.Flags.LEAVE:
            if not peer.logged_in:
                return False
            # pool_name = message.split(bytes([Protocol.Flags.SEPARATOR]))[0].decode()
            pool_name = peer.pool.name
            logging.info("LEAVE from \"" + peer.name + "\" for pool \"" +
                         pool_name + "\"")

            peer_id = SQLModule.PeersSQLModule.get_id(peer.name)
            pool_id = SQLModule.PoolsSQLModule.get_id(pool_name)

            if SQLModule.SwitchTable.remove_peer_pool(peer_id, pool_id):
                peer.send(
                    Protocol.server_message(Protocol.ServerFlags.NORMAL,
                                            "You've left the group"))

            peer.leave_pool()

        elif command == Protocol.Flags.USER:
            if not peer.logged_in:
                peer.send(
                    Protocol.server_message(Protocol.ServerFlags.NORMAL,
                                            "You gotta log in first"))
                return False
            if peer.pool is None:
                peer.send(
                    Protocol.server_message(Protocol.ServerFlags.NORMAL,
                                            "You gotta join a room first"))
                return False

            logging.info("USER message received")

            # peer.pool.send_message(message.split(bytes([Protocol.Flags.SEPARATOR]))[-1], peer)
            # peer.send(Protocol.Protocol.user_message(self.__name, peer.name, message.decode()))

            peer.pool.send_message(
                Protocol.user_message(
                    peer.pool.name, peer.name,
                    message.split(bytes([Protocol.Flags.SEPARATOR
                                         ]))[-1].decode()), peer)

        elif command == Protocol.Flags.SERVER:
            logging.warning(
                "Server received SERVER message, connection closed")
            return True

        else:
            peer.send(
                Protocol.server_message(Protocol.ServerFlags.NORMAL,
                                        "Invalid message received"))
            logging.warning("Invalid message received")

        return False
示例#18
0
    def __init__(self, para):
        Net.__init__(self, para)
        convPara1 = {
            'instanceName': 'RN18' + '_Conv1',
            'padding': True,
            'padShape': (1, 1),
            'stride': 1,
            'outChannel': para['c1OutChannel'],
            'kernelShape': (3, 3),
            'bias': False
        }
        self.conv1 = Conv2D(convPara1)
        self.norm1 = Normalize({'instanceName': 'RN18' + '_Norm1'})
        self.scale1 = Scale({'instanceName': 'RN18' + '_Scale1'})
        self.activation1 = Activation({
            'instanceName': 'RN18' + '_Activation1',
            'activationType': 'ReLU'
        })
        self.layerList.append(self.conv1)
        self.layerList.append(self.norm1)
        self.layerList.append(self.scale1)
        self.layerList.append(self.activation1)
        convPara2 = {
            'instanceName': 'RN18' + '_Conv2',
            'padding': True,
            'padShape': (1, 1),
            'stride': 2,
            'outChannel': para['c2OutChannel'],
            'kernelShape': (3, 3),
            'bias': False
        }
        self.conv2 = Conv2D(convPara2)
        self.norm2 = Normalize({'instanceName': 'RN18' + '_Norm2'})
        self.scale2 = Scale({'instanceName': 'RN18' + '_Scale2'})
        self.activation2 = Activation({
            'instanceName': 'RN18' + '_Activation2',
            'activationType': 'ReLU'
        })
        self.layerList.append(self.conv2)
        self.layerList.append(self.norm2)
        self.layerList.append(self.scale2)
        self.layerList.append(self.activation2)
        self.rnb1 = ResNetBlock({
            'instanceName': 'RN18' + '_RNB1',
            'skipMode': 'identity',
            'skipStride': 0,
            'stride1': 1,
            'outChannel1': int(para['rnb1OutChannel'] / 4),
            'outChannel2': int(para['rnb1OutChannel'] / 4),
            'outChannel3': para['rnb1OutChannel'],
            'activationType': 'ReLU'
        })
        self.layerList.append(self.rnb1)
        self.rnb2 = ResNetBlock({
            'instanceName': 'RN18' + '_RNB2',
            'skipMode': 'identity',
            'skipStride': 0,
            'stride1': 1,
            'outChannel1': int(para['rnb1OutChannel'] / 4),
            'outChannel2': int(para['rnb1OutChannel'] / 4),
            'outChannel3': para['rnb1OutChannel'],
            'activationType': 'ReLU'
        })
        self.layerList.append(self.rnb2)
        self.rnb3 = ResNetBlock({
            'instanceName': 'RN18' + '_RNB3',
            'skipMode': 'identity',
            'skipStride': 0,
            'stride1': 1,
            'outChannel1': int(para['rnb1OutChannel'] / 4),
            'outChannel2': int(para['rnb1OutChannel'] / 4),
            'outChannel3': para['rnb1OutChannel'],
            'activationType': 'ReLU'
        })
        self.layerList.append(self.rnb3)
        self.rnb4 = ResNetBlock({
            'instanceName': 'RN18' + '_RNB4',
            'skipMode': 'conv',
            'skipStride': 2,
            'stride1': 2,
            'outChannel1': int(para['rnb4OutChannel'] / 4),
            'outChannel2': int(para['rnb4OutChannel'] / 4),
            'outChannel3': para['rnb4OutChannel'],
            'activationType': 'ReLU'
        })
        self.layerList.append(self.rnb4)
        self.rnb5 = ResNetBlock({
            'instanceName': 'RN18' + '_RNB5',
            'skipMode': 'identity',
            'skipStride': 1,
            'stride1': 1,
            'outChannel1': int(para['rnb4OutChannel'] / 4),
            'outChannel2': int(para['rnb4OutChannel'] / 4),
            'outChannel3': para['rnb4OutChannel'],
            'activationType': 'ReLU'
        })
        self.layerList.append(self.rnb5)
        self.pool1 = Pool({
            'instanceName': 'RN18' + '_pool1',
            'poolType': 'ave',
            'stride': para['pSize'],
            'kernelShape': (para['pSize'], para['pSize'])
        })
        self.layerList.append(self.pool1)
        self.fc1 = FullyConnected({
            'instanceName': 'RN18' + '_fc1',
            'outChannel': para['classNum'],
            'bias': True
        })
        self.layerList.append(self.fc1)
        self.softmax = Softmax({'instanceName': 'RN18' + '_softmax'})
        self.layerList.append(self.softmax)

        self.bottomInterface = self.conv1
        self.topInterface = self.softmax
        self.softmax.setNet(self)
示例#19
0
import numpy as np
import copy
import random
import operator
from NEAT_MapleLegend import constants
from Pool import Pool



pool = Pool()
def newInnovation():
    pool.innovation = pool.innovation+1
    return pool.innovation

class Genome(object):
    class Neuron(object):
        def __init__(self):
            self.incoming = {}
            self.value = 0.0
    class Gene(object):
        def __init__(self, into = 0, out = 0, weight = 0.0, enable = True, innovation = 0):
            self.into = into
            self.out = out
            self.weight = weight
            self.enable = enable
            self.innovation = innovation

        def copy(self):
            return copy.deepcopy(self)
    def __init__(self, Inputs = 0, Outputs = 0):
        self.Inputs = Inputs
示例#20
0
class Trainer:
    def __init__(self,
                 pool_size,
                 start_training,
                 country,
                 region=None,
                 covid_data=None,
                 statistic='Cases'):
        # The object containing all case, fatality, and movement data
        self.covid_data = covid_data
        if covid_data is None:
            self.covid_data = DataGenerator()

        # The country and possibly specific region to generate a model for
        self.country = country
        self.region = region

        # The number of models for each generation
        self.pool_size = pool_size
        # What metric to create a model for, either 'Cases' or 'Fatalities'
        self.statistic = statistic

        # The date when training takes data from through 2 weeks later
        self.start_training = start_training

        # The object which handles all the models in a generation
        self.pool = Pool(pool_size)

        # The actual data which the predictions will be compared to
        self.test_case = []

        self.generate_test_case()

    def generate_test_case(self):
        # Gathers the case data for a week after the training data ends
        day_one = parse_day(self.start_training)
        if self.region is None:
            self.test_case = [
                self.country.daily[day_one + i][self.statistic]
                for i in range(7)
            ]
        else:
            self.test_case = [
                self.region.daily[day_one + i][self.statistic]
                for i in range(7)
            ]

    def train(self, generations):
        self.pool.seed_pool()

        # evaluates and creates a new generation
        for i in range(generations):
            self.threaded_evaluate()

            if i < generations - 1:
                self.pool.next_generation()

        # Returns the top 10 models
        self.pool.sort()
        return self.pool.pool[:10]

    def threaded_evaluate(self):
        # a thread_count of 4 seems to be the most effective
        # higher values slow the program
        thread_count = 4

        with multiprocessing.Pool(thread_count) as worker_pool:
            result = worker_pool.map(self.thread, self.pool.pool)

            # waits for all the processes to finish and closes them
            worker_pool.close()
            worker_pool.join()

            # replaces the pool with the new models
            self.pool.pool = result

    def thread(self, model):
        prediction = self.predict(model, self.start_training)
        if self.region is None:
            model.score = self.rmsle(
                prediction, self.country.cumulative[parse_day(
                    self.start_training)][self.statistic])
        else:
            model.score = self.rmsle(
                prediction, self.region.cumulative[parse_day(
                    self.start_training)][self.statistic])
        return model

    def rmsle(self, predicted, baseline):
        # Root mean square log error test
        les = []

        predicted_cumulative = baseline
        actual_cumulative = baseline

        for predict, actual in zip(predicted, self.test_case):
            if predict < 0: predict = 0
            if actual < 0: actual = 0
            predicted_cumulative += predict
            actual_cumulative += actual

            les.append((math.log(predicted_cumulative + 1.0) -
                        math.log(actual_cumulative + 1.0))**2)

        return math.sqrt(sum(les) / len(les))

    def evaluate(self, start_date):
        # generates a week of predicted values for each model
        for model in self.pool.pool:
            model_predictions = self.predict(model, start_date)
            cumulative_stat = self.region.cumulative[parse_day(
                self.start_training)][self.statistic]
            model.score = self.rmsle(model_predictions, cumulative_stat)

    def predict(self, model, start_date):
        int_date = parse_day(start_date)

        if self.region is None:
            zone = self.country
        else:
            zone = self.region

        previous_cases = zone.daily[int_date][self.statistic]

        lag = model.mobility_lag

        week_prediction = []
        if self.statistic == 'Cases':
            for day in range(7):
                mobility_stats = []
                for category in zone.categories.values():
                    movement_stat = []
                    for date in range(int_date + day - lag - 4,
                                      int_date + day - lag + 1):
                        try:
                            movement_stat.append(category[date]['value'])
                        except:
                            continue
                    mobility_stats.append(
                        sum(movement_stat) / len(movement_stat))
                prediction = model.predict(previous_cases, mobility_stats,
                                           zone.infection_rate(),
                                           zone.population)
                week_prediction.append(prediction)
                previous_cases = prediction
        else:
            for day in range(7):
                week_ago_cases = 0
                for i in range(int_date - 8 + day, int_date - 5 + day):
                    week_ago_cases += zone.daily[i]['Cases']
                prediction = int(model.fatality_ratio * week_ago_cases / 3)
                week_prediction.append(prediction)

        return week_prediction
示例#21
0
文件: Role.py 项目: poorboy/openulteo
class Role(AbstractRole):
  
	def __init__(self, main_instance):
		
		AbstractRole.__init__(self, main_instance)
		self.virt_co = libvirt.open(Config.libvirt_uri)
		self.has_run = False
		self.queue = Queue.Queue()
		self.pool = Pool(Config.ulteo_pool_name,self.virt_co)
		self.virtual_machine = {}
		self.network = Network(Config.network_name, self.virt_co)
		self.webserver = None
	
	
	"""
	Check if the storage pool exist, if not create it, if yes, reload it 
	Check if the virtual machine network exist, if not create it
	Reload virtual machine in the list of VM
	Init the HTTP server use for communication between hypervisor and virtuals machines
	"""
	def init(self):
	  
		if not self.pool.exist():
			self.pool.create(Config.ulteo_pool_path)
		else:
			self.pool.reload()
			
		if not self.network.exist():
			self.network.create()
			
		self.reload_vm()
		
		self.webserver = HttpServer2((Config.lan, Config.port), self)
			
		return True
	
	
	@staticmethod
	def getName():
		return "Hypervisor"
	
	
	"""
	When role is stopped, all running virtuals machines are stopped
	and the http server is closed
	"""
	def force_stop(self):
		AbstractRole.force_stop(self)
		
		for vm in self.virtual_machine :
			vm = self.virtual_machine[vm]
			
			if vm.getStatus() == "RUNNING" :
				vm.shutdown()
				
		self.webserver.server_close()
	
	
	def finalize(self):
		Logger.info("Hypervisor:: stopping")
	
	
	def run(self):
		
		self.has_run = True
		
		self.status = Role.STATUS_RUNNING
		
		self.webserver.serve_forever()
		
		while self.loop:
			if self.status == Role.STATUS_STOPPING:
				break
			
			try:
				(request, obj) = self.queue.get(True, 4)
			except Queue.Empty, err:
				continue
			# This error is ue to the sigterm sended by the init script
			except TypeError:
				return
			except (EOFError, socket.error):
				return
示例#22
0
class CNNMR:
    def __init__(self):
        self.conv1 = Conv(3, 8, 1)
        # self.relu1 = Relu()
        self.pool1 = Pool()
        # self.conv2 = Conv(3, 16, 8)
        # self.relu2 = Relu()
        # self.pool2 = Pool()
        # self.conv3 = Conv(3, 24, 16)
        # self.pool3 = Pool()
        # self.conv4 = Conv(3, 30, 24)
        # self.pool4 = Pool()
        self.fcl1  = FCL(24 * 24 * 8, 10)
        self.soft  = Soft()

    def normalize_batch(self, batch):
        batch[1] = (batch[1]/255.) - 0.5
        if batch[1].ndim != 3:
            batch[1] = batch[1][:, :, np.newaxis]
        # print(batch[1].shape)
        return batch

    def forward(self, batch):
        # batch = self.normalize_batch(batch)
        #print(batch[1].shape)
        out  = self.apply(batch[1])

        loss = np.sum(self.soft.cross_entropy_loss(batch[0]))
        acc  = np.abs(self.soft.acc(batch[0]))

        return out, loss, acc

    def apply(self, batch):
        out = self.conv1.forward(batch)
        # print(np.max(out))
        # out = self.relu1.forward(out)
        out = self.pool1.forward(out)
        
        # out = self.conv2.forward(out)
        # out = self.relu2.forward(out)
        # out = self.pool2.forward(out)
        # out = self.conv3.forward(out)
        # out = self.pool3.forward(out)
        # out = self.conv4.forward(out)
        # out = self.pool4.forward(out)
        out = self.fcl1.forward(out)
        # print(np.max(out))
        out = self.soft.forward(out)
        return out

    def train(self, batch, lr=0.0005):
        out, loss, acc = self.forward(batch)
        gradient = self.soft.backprop(batch[0])
        gradient = self.fcl1.backprop(gradient, lr)
        # gradient = self.pool4.backprop(gradient)
        # gradient = self.conv4.backprop(gradient, lr) 
        # gradient = self.pool3.backprop(gradient)
        # gradient = self.conv3.backprop(gradient, lr) 
        # gradient = self.pool2.backprop(gradient)
        # gradient = self.relu2.backprop(gradient)
        # gradient = self.conv2.backprop(gradient, lr) 
        gradient = self.pool1.backprop(gradient)
        # gradient = self.relu1.backprop(gradient)
        gradient = self.conv1.backprop(gradient, lr)        

        return out, loss, acc
示例#23
0
def my_pool():
    pool_size = 3
    blocks = Blocks()
    yield Pool(pool_size, blocks)
示例#24
0
def main():
    print("Starting Network...")
    print("-------------------------------------------------------")
    print("Reading Data sets...")

    # MNIST Data sets
    #train_img, test_img, train_lbl, test_lbl = load(file_name="mnist")

    # CIFAR-10 Data sets
    train_img, test_img, train_lbl, test_lbl = load(file_name="cifar")

    Y = train_lbl[:].astype(int)
    X = train_img[:] / 255.
    Y_test = test_lbl[:].astype(int)
    X_test = test_img[:] / 255.

    #preprocess data
    #X = preprocess_data(X)
    #X_test = preprocess_data(X_test)

    #model
    model = Model()

    model.add(
        ConvNet(filter_size=(5, 5),
                filter_no=6,
                zero_padding=0,
                stride=(1, 1),
                activation="relu"))
    model.add(Pool(pool_size=(2, 2), stride=(2, 2), pool_type="max"))
    model.add(
        ConvNet(filter_size=(5, 5),
                filter_no=6,
                zero_padding=0,
                stride=(1, 1),
                activation="relu"))
    model.add(Pool(pool_size=(2, 2), stride=(2, 2), pool_type="max"))
    model.add(Flatten())
    model.add(
        FCLayer(activation="relu",
                n_neurons=32,
                l_rate=0.001,
                is_drop_out=True,
                drop_out=0.7))
    model.add(FCLayer(activation="softmax", n_neurons=10, l_rate=0.001))

    print("-------------------------------------------------------")
    print("CNN Layers:")
    print("-------------------------------------------------------")
    model.print_layers()

    print("-------------------------------------------------------")
    print("Begin Training...")

    model.train(X, Y, n_epochs=150, print_loss=True, batch_size=32)

    print("End Training.")
    print("-------------------------------------------------------")
    print("Begin Testing...")

    train_accuracy = model.test(X, Y)
    test_accuracy = model.test(X_test, Y_test)

    print("End Testing.")
    print("-------------------------------------------------------")

    print('Training Accuracy: {0:0.2f} %'.format(train_accuracy))
    print('Test Accuracy: {0:0.2f} %'.format(test_accuracy))
    model.show_graph()
示例#25
0
文件: Train.py 项目: DeShrike/PoolBot
def callback(gen, avg_score, best_score, best_json):
	global bestbest
	global done

	line = "Generation\t%d\tAverage Score\t%f\tBest Score\t%f" % (gen, avg_score, best_score)
	write_log(Config.JSON_FOLDER + "/" + "logfile.txt", line)
	filename = "brain-g%03d-%04d.json" % (gen, best_score * 1000)

	if best_score >= bestbest:
		if best_json != None:
			write_file(Config.JSON_FOLDER + "/" + filename, best_json)
		bestbest = best_score

	if gen == Config.GENERATIONS:
		done = True

def write_log(filename, line):
	print(line)
	with open(filename, "a") as outfile:
		outfile.write(line + "\n")


def write_file(filename, data):
	with open(filename, "w") as outfile:
		outfile.write(data + "\n")

pool = Pool(Config.WIDTH, Config.HEIGHT, callback)

while done == False:
	pool.tick()
示例#26
0
class CNNMR:
    def __init__(self):
        self.conv1 = Conv(3, 8, 1)
        self.relu1 = Relu()
        self.pool1 = Pool()
        self.conv2 = Conv(3, 16, 8)
        self.relu2 = Relu()
        self.pool2 = Pool()
        #self.fcl1  = FCL(12 * 6 * 16, 120)
        #self.fcl2  = FCL(120, 80)
        #self.relu3 = Relu()
        #self.relu4 = Relu()
        #self.relu = Relu()
        #self.fcl1  = FCL(256, 256)
        #self.relu1 = Relu()
        self.fcl = Reg(6 * 6 * 16, 2)
        self.soft = Softmax()

    def normalize_batch(self, batch):
        batch[1] = (batch[1] / 255) - 0.5
        if batch[1].ndim != 4:
            batch[1] = batch[1][:, :, np.newaxis, :]
        #print(batch[1].shape)
        return batch

    def forward(self, batch):
        batch = self.normalize_batch(batch)
        out = self.apply(batch[1])

        loss = np.sum(self.soft.cross_entropy_loss(batch[0]))
        acc = np.abs(self.soft.acc(batch[0]))

        return out, loss, acc

    def apply(self, batch):
        out = self.conv1.apply(batch)
        out = self.relu1.apply(out)
        out = self.pool1.apply(out)
        out = self.conv2.apply(out)
        out = self.relu2.apply(out)
        out = self.pool2.apply(out)
        # out = self.fcl1.apply(out)
        # out = self.relu3.apply(out)
        # out = self.fcl2.apply(out)
        # out = self.relu4.apply(out)
        # out = self.reg.apply(out)
        out = self.fcl.apply(out)
        out = self.soft.apply(out)
        return out

    def train(self, batch, lr=0.05):
        out, loss, acc = self.forward(batch)

        # gradient = self.reg.backprop(batch[0], lr)
        # gradient = self.relu4.backprop(gradient)
        # gradient = self.fcl2.backprop(gradient, lr)
        # gradient = self.relu3.backprop(gradient)
        # gradient = self.fcl1.backprop(gradient, lr)
        gradient = self.soft.backprop(batch[0])
        gradient = self.fcl.backprop(gradient, lr)
        gradient = self.pool2.backprop(gradient)
        gradient = self.relu2.backprop(gradient)
        gradient = self.conv2.backprop(gradient, lr)
        gradient = self.pool1.backprop(gradient)
        gradient = self.relu1.backprop(gradient)
        gradient = self.conv1.backprop(gradient, lr)
        #gradient = self.conv.backprop(gradient, lr)

        return out, loss, acc
示例#27
0
from Logger import Logger
from Blocks import Blocks
from os import getcwd
from datetime import datetime
from sys import argv
from itertools import product
from functools import reduce
from math import gcd

#main
if __name__ == "__main__":
    board_size = int(argv[1] if len(argv) > 1 else 5)
    learn_switch = argv[2] if len(argv) > 2 else ""
    logname = datetime.strftime(datetime.now(), "%Y%m%d%H%M%S")

    player = Player(Pool(3, Blocks()),
                    Board(board_size),
                    Brain(Blocks(), weights=(100, 1, 10, 1)),
                    max=1000,
                    logger=Logger(getcwd() +
                                  f"\\log\\woody_log_{logname}.log"))

    if (learn_switch == "-l"):
        w1 = (0, 1, 10)
        w2 = (0, 1, 10)
        w3 = (0, 1, 10)
        w4 = (0, 1, 10)
        already = []
        for a1, a2, a3, a4 in product(w1, w2, w3, w4):
            its_gcd = reduce(gcd, [a1, a2, a3, a4])
            its_gcd = 1 if its_gcd == 0 else its_gcd
示例#28
0
from Pool import Pool

pool = Pool()

Object1 = pool.GetObject()
Object1.Print()
pool.ReleaseObject(Object1)
print('___________')

Object1 = pool.GetObject()
Object2 = pool.GetObject()
Object1.Print()
Object2.Print()
pool.ReleaseObject(Object1)
pool.ReleaseObject(Object2)
print('____________')

Object1 = pool.GetObject()
Object2 = pool.GetObject()
Object3 = pool.GetObject()
Object4 = pool.GetObject()
示例#29
0
class RN18(Net):
    '''
    ResNet18 has a total of 18 layers: 
    Note that some parameters are predetermined. The parameters need to be specified are in ''.
    For all ResNetBlock modules, the output sizes of stage 1 and stage 2 conv2D blocks equals to 
    1/4 of that of the final stage.
    Conv1 - kernel:(3x3), pad:(1,1), stride:1, output: 'c1OutChannel'
    Conv1 - kernel:(3x3), pad:(1,1), stride:2, output: 'c2OutChannel'  # H and W reduced by half
    RNB1 - skipMode:identity, output : 'rnb1OutChannel' 
    RNB2 - skipMode:identity, output : same as RNB1
    RNB3 - skipMode:identity, output : same as RNB1
    RNB4 - skipMode:conv, skipStride:2, output : 'rnb4OutChannel' # H and W reduced by half
    RNB5 - skipMode:identity, output : same as RNB4
    pool - average pooling of RNB5 per channel, reducing output to 'rnb4OutChannel', need to specify
            'pSize', which is used to specify stride and kernel size
    fc - outChannel: 'classNum'
    softmax - final classification layer
    '''
    def __init__(self, para):
        Net.__init__(self, para)
        convPara1 = {
            'instanceName': 'RN18' + '_Conv1',
            'padding': True,
            'padShape': (1, 1),
            'stride': 1,
            'outChannel': para['c1OutChannel'],
            'kernelShape': (3, 3),
            'bias': False
        }
        self.conv1 = Conv2D(convPara1)
        self.norm1 = Normalize({'instanceName': 'RN18' + '_Norm1'})
        self.scale1 = Scale({'instanceName': 'RN18' + '_Scale1'})
        self.activation1 = Activation({
            'instanceName': 'RN18' + '_Activation1',
            'activationType': 'ReLU'
        })
        self.layerList.append(self.conv1)
        self.layerList.append(self.norm1)
        self.layerList.append(self.scale1)
        self.layerList.append(self.activation1)
        convPara2 = {
            'instanceName': 'RN18' + '_Conv2',
            'padding': True,
            'padShape': (1, 1),
            'stride': 2,
            'outChannel': para['c2OutChannel'],
            'kernelShape': (3, 3),
            'bias': False
        }
        self.conv2 = Conv2D(convPara2)
        self.norm2 = Normalize({'instanceName': 'RN18' + '_Norm2'})
        self.scale2 = Scale({'instanceName': 'RN18' + '_Scale2'})
        self.activation2 = Activation({
            'instanceName': 'RN18' + '_Activation2',
            'activationType': 'ReLU'
        })
        self.layerList.append(self.conv2)
        self.layerList.append(self.norm2)
        self.layerList.append(self.scale2)
        self.layerList.append(self.activation2)
        self.rnb1 = ResNetBlock({
            'instanceName': 'RN18' + '_RNB1',
            'skipMode': 'identity',
            'skipStride': 0,
            'stride1': 1,
            'outChannel1': int(para['rnb1OutChannel'] / 4),
            'outChannel2': int(para['rnb1OutChannel'] / 4),
            'outChannel3': para['rnb1OutChannel'],
            'activationType': 'ReLU'
        })
        self.layerList.append(self.rnb1)
        self.rnb2 = ResNetBlock({
            'instanceName': 'RN18' + '_RNB2',
            'skipMode': 'identity',
            'skipStride': 0,
            'stride1': 1,
            'outChannel1': int(para['rnb1OutChannel'] / 4),
            'outChannel2': int(para['rnb1OutChannel'] / 4),
            'outChannel3': para['rnb1OutChannel'],
            'activationType': 'ReLU'
        })
        self.layerList.append(self.rnb2)
        self.rnb3 = ResNetBlock({
            'instanceName': 'RN18' + '_RNB3',
            'skipMode': 'identity',
            'skipStride': 0,
            'stride1': 1,
            'outChannel1': int(para['rnb1OutChannel'] / 4),
            'outChannel2': int(para['rnb1OutChannel'] / 4),
            'outChannel3': para['rnb1OutChannel'],
            'activationType': 'ReLU'
        })
        self.layerList.append(self.rnb3)
        self.rnb4 = ResNetBlock({
            'instanceName': 'RN18' + '_RNB4',
            'skipMode': 'conv',
            'skipStride': 2,
            'stride1': 2,
            'outChannel1': int(para['rnb4OutChannel'] / 4),
            'outChannel2': int(para['rnb4OutChannel'] / 4),
            'outChannel3': para['rnb4OutChannel'],
            'activationType': 'ReLU'
        })
        self.layerList.append(self.rnb4)
        self.rnb5 = ResNetBlock({
            'instanceName': 'RN18' + '_RNB5',
            'skipMode': 'identity',
            'skipStride': 1,
            'stride1': 1,
            'outChannel1': int(para['rnb4OutChannel'] / 4),
            'outChannel2': int(para['rnb4OutChannel'] / 4),
            'outChannel3': para['rnb4OutChannel'],
            'activationType': 'ReLU'
        })
        self.layerList.append(self.rnb5)
        self.pool1 = Pool({
            'instanceName': 'RN18' + '_pool1',
            'poolType': 'ave',
            'stride': para['pSize'],
            'kernelShape': (para['pSize'], para['pSize'])
        })
        self.layerList.append(self.pool1)
        self.fc1 = FullyConnected({
            'instanceName': 'RN18' + '_fc1',
            'outChannel': para['classNum'],
            'bias': True
        })
        self.layerList.append(self.fc1)
        self.softmax = Softmax({'instanceName': 'RN18' + '_softmax'})
        self.layerList.append(self.softmax)

        self.bottomInterface = self.conv1
        self.topInterface = self.softmax
        self.softmax.setNet(self)

    def stack(self, top, bottom):
        self.top = top
        self.bottom = bottom

        self.conv1.stack(self.norm1, bottom)
        self.norm1.stack(self.scale1, self.conv1)
        self.scale1.stack(self.activation1, self.norm1)
        self.activation1.stack(self.conv2, self.scale1)

        self.conv2.stack(self.norm2, self.activation1)
        self.norm2.stack(self.scale2, self.conv2)
        self.scale2.stack(self.activation2, self.norm2)
        self.activation2.stack(self.rnb1, self.scale2)

        self.rnb1.stack(self.rnb2, self.activation2)
        self.rnb2.stack(self.rnb3, self.rnb1)
        self.rnb3.stack(self.rnb4, self.rnb2)
        self.rnb4.stack(self.rnb5, self.rnb3)
        self.rnb5.stack(self.pool1, self.rnb4)
        self.pool1.stack(self.fc1, self.rnb5)
        self.fc1.stack(self.softmax, self.pool1)
        self.softmax.stack(top, self.fc1)
        self.softmax.setSource(bottom)
示例#30
0
 def test_count_worker(self):
     worker = Pool()
     self.assertEqual(10, worker.map(self.task, self.generate_data(30, 100000))[0])
示例#31
0
# This program reads a list of bets from stdin and as input (one per each) in format of Bet:<product>:<selections>:<stake>
# The last input is expected to be a Result:<first>:<second>:<last>
# Outpus the divedends for WIN, PLACE and EXACTA

import sys
from Pool import Pool, PlacePool
import utils

win_pool = Pool(0.15, "W")
place_pool = PlacePool(0.12, "P")
exacta_pool = Pool(0.18, "E")

# read input from stdin
# Question: what we do if we encounter an invalid line of input? Skip or Stop?
# In this script we will stop if an invalid input string is found
for line in sys.stdin:
    break_down = line.strip().split(":")
    if not line.strip():
        # empty line? OK, we'll just skip it
        continue
        # any valid input should have exactly 4 parts
    elif len(break_down) != 4:
        print "Invalid input format: ", line
        break
    elif break_down[0] == "Bet":
        # we could have a class Bet but it is not needed in our case
        # Selections will be validate as non-negative integers, but we'll store them in a dic as a string
        product = break_down[1]
        # Assume if selection is started with '0', e.g. '03' , it is invalid
        selections = break_down[2]
        stake = break_down[3]
示例#32
0
    
    Log("Average Fitness: {}".format(np.mean(scores)))

    best_game = games[best_indices[0]]
    worst_game = games[best_indices[-1]]

    return  best_game, worst_game

BOARD_SIZE = 10
MAX_MOVES = 100
ALPHA = 0.5     # mutation rate
POOL_NAME = "green"
FPS = 25

network = Network()
pool = Pool(BOARD_SIZE, MAX_MOVES, network)
pool.load(POOL_NAME)

if(pool.get_size() == 0):
    pool.seed(100)
    pool.race(top=10, games_per_snake=5)

for epoch in range(5):

    Log("--- Epoch #{} ---".format(epoch))

    pool.populate(pool_size=100, alpha = ALPHA)
    snakes, scores = pool.race(top=10, games_per_snake=5)

    for i,score  in enumerate(scores):
        snake = snakes[i]