コード例 #1
0
def createInitialPop(popSize):
    ls = []
    for _ in range(popSize):
        c = Coordinator()
        c.initalize()
        ls.append(c)
    return ls
コード例 #2
0
 def placeSegments(self, segmentList, deepStorage, percentReplicate,
                   replicationFactor):
     self.log("Placing Segments")
     Coordinator.placeSegmentsAndReplicas(
         segmentList, deepStorage, percentReplicate, replicationFactor,
         self.historicalNodeList, self.queryList, self.placementStrategy)
     Utils.printSegmentPlacement(self.historicalNodeList)
コード例 #3
0
ファイル: __init__.py プロジェクト: hhldiniz/pysaldo
 def __init__(self):
     super().__init__()
     self.coordinator = Coordinator()
     self.subscriber = Subscriber()
     self.subscriber.get_mqtt_client(
     ).on_message = self.subscriber.on_message
     self.subscriber.get_mqtt_client(
     ).on_connect = self.subscriber.on_connect
     self.subscriber.get_mqtt_client(
     ).on_disconnect = self.subscriber.on_disconnect
コード例 #4
0
def startAlgo():
    print("Initializing and reading from db")
    Coordinator.initalizeStatic()
    print("Algorithm starts")
    start = time.time()
    lss = geneticScheduler(500,
                           100,
                           5,
                           mutateProb=0.8,
                           elitismOffset=5,
                           mutateRate=0.015)
    print([f.fitnessValue for f in lss])
    elasped = time.time() - start
    print("time taken = {}s".format(elasped))
    answer = sorted(lss, key=lambda coord: coord.fitnessValue)[0]
    # if answer.fitnessValue > 1: raise Exception("Hard Constraints not Satisfied")
    #key into database
    #deletes all previous data
    models.CourseClass.query.delete()

    for cc in answer.courseClasses:
        stgString = cc.studentGroups[0].name
        profString = cc.professors[0].name

        for i in range(1, len(cc.studentGroups)):
            stgString += ",{}".format(cc.studentGroups[i].name)

        for j in range(1, len(cc.studentGroups)):
            profString += ",{}".format(cc.professors[j].name)

        day = cc.day
        startTime = cc.startTime
        endTime = cc.endTime
        roomName = cc.slots[0].room.name
        courseName = cc.course.name
        courseClassDb = models.CourseClass(course=courseName,
                                           studentGroups=stgString,
                                           professors=profString,
                                           day=day,
                                           startTime=startTime,
                                           endTime=endTime,
                                           room=roomName)
        db.session.add(courseClassDb)

    #adding soft constraints
    for prof in answer.professors:
        name = prof.name

        currProf = models.Professor.query.filter(
            models.Professor.name == name).first()
        currProf.satisfied = prof.satisfied

    db.session.commit()
コード例 #5
0
ファイル: __init__.py プロジェクト: hhldiniz/pysaldo
class SubscriberStarter(Thread):
    def __init__(self):
        super().__init__()
        self.coordinator = Coordinator()
        self.subscriber = Subscriber()
        self.subscriber.get_mqtt_client(
        ).on_message = self.subscriber.on_message
        self.subscriber.get_mqtt_client(
        ).on_connect = self.subscriber.on_connect
        self.subscriber.get_mqtt_client(
        ).on_disconnect = self.subscriber.on_disconnect

    def execute_withdraw(self, value):
        if self.coordinator.get_hash() is not "":
            self.subscriber.set_hash(self.coordinator.get_hash())
            self.coordinator.set_hash("")
            self.coordinator.proccess_request(self.subscriber.withdraw, value)
            self.coordinator.set_hash(self.subscriber.get_hash())
            self.subscriber.set_hash("")

    def run(self):
        self.subscriber.get_mqtt_client().loop_start()
        while True:
            self.execute_withdraw(10)
            sleep(1)
コード例 #6
0
ファイル: Main.py プロジェクト: flint-stone/DruidSimulator
def runExperiment(historicalNodeCount, segmentList, percentreplicate, replicationFactor, queryList, placementStrategy, routingStrategy):
	segmentcount = len(segmentList)

	#Creating Historical Nodes
	print "Creating Historical Nodes"
	historicalnodelist = createHistoricalNodes(historicalNodeCount)
	
	#Placing Segments
	print "Placing Segments"
	avgreplication = Coordinator.placeSegmentsAndReplicas(segmentList, segmentList, percentreplicate, replicationFactor, historicalnodelist, queryList, placementStrategy)
	Coordinator.printCurrentPlacement(historicalnodelist)
	print("%s,%s,%f Average Replication: %f" % (placementStrategy, routingStrategy, percentreplicate, avgreplication))
	
	#Calculating Scores
	print "Routing Queries"
	timetaken = Broker.routeQueries(queryList, historicalnodelist, routingStrategy, segmentcount, 0)
	print("%s,%s,%f Overall Completion Time: %d" % (placementStrategy, routingStrategy, percentreplicate, timetaken))
コード例 #7
0
        def placeSegments(self, segmentList, deepStorage, percentReplicate, replicationFactor):
		self.log("Placing Segments")
		Coordinator.placeSegmentsAndReplicas(segmentList, deepStorage, percentReplicate, replicationFactor, self.historicalNodeList, self.queryList, self.placementStrategy)
		Utils.printSegmentPlacement(self.historicalNodeList)
コード例 #8
0
ファイル: Strategy.py プロジェクト: mghosh4/DruidSimulator
	def placeSegments(self, segmentList, time, config):
		self.log(time, "Placing Segments")
		(numloads, computetime) = Coordinator.placeSegmentsAndReplicas(segmentList, self.historicalNodeList, self.queryList, self.placementStrategy, self.replicationStrategy, self.segmentReplicaCount, self.pastHistory, time, config)
		self.numsegmentloads += numloads
		self.totalcomputetime += computetime
		Utils.printSegmentPlacement(self.historicalNodeList)
コード例 #9
0
ファイル: memtest.py プロジェクト: jm-begon/masterthesis
 def __init__(self, nbFeatures, nbObjects, logger, verbosity):
     Coordinator.__init__(self, logger, verbosity)
     self._nbFeatures = nbFeatures
     self._nbObj = nbObjects
     self._factory = NumpyFactory()
コード例 #10
0
ファイル: Train.py プロジェクト: zachoines/ACIOTResearch
    def start(self):
        workers = []
        network_params = (self.NUM_STATE,
                          self._config['Max steps taken per batch'],
                          self.NUM_ACTIONS, self.ACTION_SPACE)

        # Init Global and Local networks. Generate Weights for them as well.
        if self._config['CNN type'] == '':
            self._global_model = AC_Model_Large(self.NUM_STATE,
                                                self.NUM_ACTIONS,
                                                self._config,
                                                is_training=True)
            self._global_model(
                tf.convert_to_tensor(np.random.random((1, *self.NUM_STATE)),
                                     dtype='float64'))
            self._step_model = AC_Model_Large(self.NUM_STATE,
                                              self.NUM_ACTIONS,
                                              self._config,
                                              is_training=True)
            self._step_model(
                tf.convert_to_tensor(np.random.random((1, *self.NUM_STATE)),
                                     dtype='float64'))
        else:
            self._global_model = CNN_class_import(
                self._config['CNN type'],
                (self.NUM_STATE, self.NUM_ACTIONS, self._config, True))
            self._global_model(
                tf.convert_to_tensor(np.random.random((1, *self.NUM_STATE)),
                                     dtype='float64'))
            self._step_model = CNN_class_import(
                self._config['CNN type'],
                (self.NUM_STATE, self.NUM_ACTIONS, self._config, True))
            self._step_model(
                tf.convert_to_tensor(np.random.random((1, *self.NUM_STATE)),
                                     dtype='float64'))

        # Load model if exists
        if not os.path.exists(self._model_save_path):
            os.makedirs(self._model_save_path)
        else:
            try:
                if (os.path.exists(self._model_save_path + "\checkpoint")):

                    self._global_model.load_model_weights()
                    self._step_model.load_model_weights()
                    for env in self._envs:
                        workers.append(
                            Worker(self._step_model,
                                   env,
                                   batch_size=self.
                                   _config['Max steps taken per batch'],
                                   render=False))

                    print("Model restored.")

                else:

                    for env in self._envs:
                        workers.append(
                            Worker(self._step_model,
                                   env,
                                   batch_size=self.
                                   _config['Max steps taken per batch'],
                                   render=False))

                    print("Creating new model.")
            except:
                print("ERROR: There was an issue loading the model!")
                raise

        coordinator = Coordinator(self._global_model, self._step_model,
                                  workers, self._plot, self._model_save_path,
                                  self._config)

        # Train and save
        try:
            if coordinator.run():
                try:
                    self._global_model.save_model_weights()
                    print("Model saved.")
                    return True
                except:
                    print("ERROR: There was an issue saving the model!")
                    raise

        except:
            print("ERROR: There was an issues during training!")
            raise
コード例 #11
0
def writeToFile(data, path, action):
    data = str(data)
    with open(path, action, encoding="utf-8") as f:  # open the file to write
        f.write(data)


# function to pass dictionary to text
def dictToStr(dict):
    strDict = ''
    for k, v in dict.items():
        strDict += f'{str(k)} -> {str(v)}\n'
    return strDict


# Join dataSets and normalize
coord = Coordinator()
# joinedDS = coord.join(pathA, pathB)
# normalize joined Dataset
initPf = pd.read_csv(initPath, delimiter=';', encoding="ISO-8859-1")
initPf[initPf < 0] = 0
# initPf[(np.abs(stats.zscore(initPf)) < 3).all(axis=1)]
normPf = coord.normalize(initPf)
print('normPf ', str(normPf))
writeToFile(str(normPf), initNormed, 'w')

# # Run Spectral clustering for k 2 to k 10
labelsList = coord.runConfig(normPf.head(100))

# Run best k spectral clustering
resultDict = coord.run(normPf.head(100), 3, labelsList)
strResDict = dictToStr(resultDict)
コード例 #12
0
        if (os.path.exists(model_save_path + "\checkpoint")):
            
            Global_Model.load_model_weights()
            step_model.load_model_weights()

            for env in envs:
                workers.append(Worker(step_model, env, batch_size=batch_size, render=False))
            
            print("Model restored.")
        
        else:
            
            for env in envs:
                workers.append(Worker(step_model, env, batch_size=batch_size, render=False))
            
            print("Creating new model.")
    except:
        print("ERROR: There was an issue loading the model!")
        raise

coordinator = Coordinator(Global_Model, step_model, workers, plot, num_envs, num_epocs, batches_per_epoch, batch_size, gamma, model_save_path, anneling_steps)

# Train and save
if coordinator.run():
    try:
        Global_Model.save_model_weights()
        print("Model saved.")
        print("Now testing results....")
    except:
        print("ERROR: There was an issue saving the model!")
コード例 #13
0
     [0, 0, 0, 0, 0, 0, 0, 0, b, 0, 0, 0, b, 0, 0, 0, 0, 0, 0, 0],
     [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, b, 0, 0, 0, 0, 0, 0, 0],
     [0, 0, 0, 0, a, 0, 0, 0, b, 0, b, 0, 0, 0, 0, c, 0, 0, 0, 0],
     [0, 0, 0, 0, a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, c, 0, 0, 0, 0],
     [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, b, 0, 0, 0, 0, 0, 0, 0, 0, 0],
     [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, a, 0, 0, 0, 1, 0, 0],
     [0, 0, 0, 0, 0, 0, 0, 0, 0, c, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
     [0, 0, 0, 0, 0, 0, 0, 0, 0, c, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
     [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0],
     [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, b, 0, 0, 0, 1, 0, 0, 0],
     [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, a, 0, 0, 0, 0, 0, 0],
     [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, c, 0, 0, 1, 0],
     [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, c, 0, 0, 0, 0]])

testMatrix = np.array([
    [a, a, 0, 0],
    [a, 0, b, 0],
    [0, a, b, 0],
    [0, 0, b, 0],
])
testMatrix2 = np.array([[a, a, 0], [a, 0, 0], [0, a, 0]])
# Assign the matrix to a Coordinator for both algorithms
coord = Coordinator(initMatrix)

# Run the power iteration algorithm and print vector
coord.runPowerIteration()

# test multiplication
testMatrix3 = np.array([[a, a, 0], [a, 0, 0], [0, a, 1]])
testMatrix4 = np.array([[b, b, b], [b, b, b], [b, b, b]])
コード例 #14
0
def mate(c1, c2):
    #note take care to prevent alliasing issues
    c = Coordinator()
    c.generateSlots()
    c.generateCourses()
    c.generateProfs()
    c.assignRequests()
    c.generateStudentGroups()
    c.generateCourseClasses()
    c.appendSTGtoCC()
    c.setHardBlocks()

    for i in range(len(c1.courseClasses)):
        #we need to manage the professor and stg manually
        randomInt = random.randint(0, 1)
        if (randomInt == 0):
            currCourseClass = c1.courseClasses[i]
        else:
            currCourseClass = c2.courseClasses[i]

        #from courseclass get stg, get prof, get slot and update values
        courseName = currCourseClass.course.name
        req = currCourseClass.req
        duration = currCourseClass.duration

        day = currCourseClass.day
        startTime = currCourseClass.startTime
        endTime = currCourseClass.endTime

        profNameList = [p.name for p in currCourseClass.professors]
        #copying courseClasses attributes no alliasing
        c.courseClasses[i].course = c.getCourse(courseName)
        c.courseClasses[i].req = req
        c.courseClasses[i].duration = duration
        c.courseClasses[i].day = day
        c.courseClasses[i].startTime = startTime
        c.courseClasses[i].endTime = endTime

        for name in profNameList:
            c.courseClasses[i].professors.append(c.getProf(name))

        for prof in c.courseClasses[i].professors:
            prof.courseClasses.append(c.courseClasses[i])

        for slot in currCourseClass.slots:
            index = slot.index
            c.slots[index].counter += 1
            c.courseClasses[i].slots.append(c.slots[index])
            for stg in c.courseClasses[i].studentGroups:
                stg.slots.append(c.slots[index])

            for prof in c.courseClasses[i].professors:
                prof.slots.append(c.slots[index])

    return c
コード例 #15
0
from Coordinator import Coordinator

main = Coordinator()

コード例 #16
0
def main():
	# Data centre sizes
	MY_RESOURCE_TYPES = {
		"L":{ 
				"CPU":		{'CAPACITY':100.0, 'EXECOST': LinearCostFunc(1.0,0.0),'OVERLOADCOST': NoCostFunc(),'MU':1},
				"NET_UP":	{'CAPACITY':100.0, 'EXECOST': NoCostFunc(),'OVERLOADCOST': NoCostFunc(),'MU':1},
				"NET_DOWN":	{'CAPACITY':100.0, 'EXECOST': NoCostFunc(),'OVERLOADCOST': NoCostFunc(),'MU':1}
			},
		"M":{ 
				"CPU":		{'CAPACITY':50.0, 'EXECOST': LinearCostFunc(1.0,0.0),'OVERLOADCOST': NoCostFunc(),'MU':1},
				"NET_UP":	{'CAPACITY':50.0, 'EXECOST': NoCostFunc(),'OVERLOADCOST':NoCostFunc(),'MU':1},
				"NET_DOWN":	{'CAPACITY':50.0, 'EXECOST': NoCostFunc(),'OVERLOADCOST': NoCostFunc(),'MU':1}
			},
		"S":{ 
				"CPU":		{'CAPACITY':10.0, 'EXECOST': LinearCostFunc(1.0,0.0),'OVERLOADCOST': NoCostFunc(),'MU':1},
				"NET_UP":	{'CAPACITY':10.0, 'EXECOST': NoCostFunc(),'OVERLOADCOST': NoCostFunc(),'MU':1},
				"NET_DOWN":	{'CAPACITY':10.0, 'EXECOST': NoCostFunc(),'OVERLOADCOST': NoCostFunc(),'MU':1}
			}
		}
	
	#workloadName = "workload_v1_6_a5mini_1may"
	workloadName = "workfile_tripple_production"
	nbrApps = 5
	depth = 4
	mode = "_continuous"
	testCase  = "_cost_ver"

	logging.basicConfig(filename='activities.log', level=logging.DEBUG, filemode='w')
	logging.info("---- %s ----" % time.strftime("%d/%m/%Y - %H:%M:%S"))
	
	applications = {}
	applicationTypes = Application.TYPES.keys()
	for i in range(0, nbrApps):
		#applications.update({'A%i'%i : Application('A%i'%i, Application.TYPES[random.choice(applicationTypes)])})
		applications.update({'A%i'%i : Application('A%i'%i, Application.TYPES['SYMMETRIC'])})

	env = simpy.Environment()

	topologyMaker = TopologyMaker(env, None, applications)

	datacentres, links, leafnodes = topologyMaker.GenerateTreeFromParameters(	childStruct 	= [2, 2, 1], 
																				sizeStruct 		= [	MY_RESOURCE_TYPES['L'],
																									MY_RESOURCE_TYPES['M'],
																									MY_RESOURCE_TYPES['S'] ], 
																				uplinkStruct 	= [10000,1000,1000], 
																				downlinkStruct 	= [10000,1000,1000], 
																				latencyStruct 	= [1,1,1] )
																				
	logging.info('Topology generated, with %i datacentres' % len(datacentres))
	
	topology = Topology(env, datacentres, links, leafnodes)
	
	scheduler = optScheduler(env, topology, applications)
	logging.info('%s scheduler created' % type(scheduler).__name__)
	
	coordinator = Coordinator(env, topology, scheduler, depth)
	
	workload = Workload(env,'workloads/'+workloadName+'.json', coordinator)
	monitor = SystemMonitor(env, 1, 0.2, workloadName+mode+testCase, topology, coordinator, applications, scheduler, 	
															[	("TOTAL_OVERLOAD", SystemMonitor.measureSystemOverloaFactor),
																("COMPONENT_OVERLOAD", SystemMonitor.measureComponentOverloadFactor),
																("RESOURCE_UTILISATION", SystemMonitor.measureComponentResourceUtilisation),
																("APP_RESOURCE_UTILISATION", SystemMonitor.measureUtilisationPerApp),
																("APP_RTT", SystemMonitor.measureAppLatency),
															], 
															[	("TOTAL_OVERLOAD", SystemMonitor.fileCSVOutput, None),
																("COMPONENT_OVERLOAD", SystemMonitor.fileCSVOutput, SystemMonitor.composeDCLinkHeader),
																("RESOURCE_UTILISATION", SystemMonitor.fileCSVOutput, SystemMonitor.composeDCLinkHeader),
																("APP_RESOURCE_UTILISATION", SystemMonitor.fileCSVOutput, None),
																("APP_RTT", SystemMonitor.fileCSVOutput, SystemMonitor.composeLatencyHeader)
															],
															[])
	
	#workload.produceWorkload()
	env.process(workload.produceWorkload())
	env.process(monitor.measure())
	
	logging.info("Controller started")
	controller = PeriodicController(env, coordinator, 1, 0.1)
	
	logging.info("Simulation started")
	env.run(until=workload.getWorkloadTimeSpan())
	logging.info("Simulation Done")
	
	monitor.compose()
	logging.info("Composing results")
	
	monitor.produceOutput()
	scheduler.output(workloadName+mode+testCase)
	
	print "DONE"
コード例 #17
0
import Config

WEB_SERVER_PORT = 8889

MODE = {
    'init': 'INITIALIZATION',
    'scale_up': 'SCALE UP',
    'scale_down': 'SCALE_DOWN'
}

#curl -d hostname=alessandro-VirtualBox -d mode=mobile_presence -d mac= http://10.101.101.119:8888 #192.168.56.101

#NODES_WEIGTH_MAP = {'alessandro-VirtualBox2':1, 'alessandro-VirtualBox3':1}

nodes = {}
coordinator = Coordinator(nodes={}, threshold=Config.THRESHOLD_DEFAULT)


def calcolo_pesi():
    print("calcolo pesi")


class MainHandler(tornado.web.RequestHandler):
    def post(self):
        arguments = self.request.arguments
        mode = arguments["mode"][0].decode("utf-8")
        hostname_request = arguments["hostname"][0].decode("utf-8")
        print(MODE[mode] + " from node " + hostname_request)
        if mode == "init":
            try:
                file = self.request.files['file'][0]['body'].decode("utf-8")