示例#1
0
	def antTour(self):
		# The job sequence build by the ant
		antSched=Schedule(self.jsspInst)
		# Initialize the roulette, which is a dictionary with JobId and the probability interval: (jobId:[pStart,pEnd])
		jobRoulette = dict([x, [0.0,0.0]] for x in range(self.jsspInst.jobs))

		# Defining the first job: randomized(70%) or best schedule(30%).
		if len(self.bestSchedule.jobSched) > 0 and random.random() > 0.7:
			currJob = self.bestSchedule.jobSched[0]   # The same first position of the last best schedule
		else:
			currJob = int((random.random() * 1000) % self.jsspInst.jobs)
		del jobRoulette[currJob]
		antSched.addJob(currJob)

		# Select next jobs
		while (len(jobRoulette) > 0):
			self.buildRoulette(jobRoulette, currJob)
			
			bingo = random.random()
			for jId,prob in jobRoulette.iteritems():
				if prob[0] < bingo < prob[1]:
					currJob = jId
					break
			del jobRoulette[currJob]
			antSched.addJob(currJob)

		self.antScheds.append(antSched)
		return
示例#2
0
def main():
    while True:
        messages = get_messages()
        for message in messages:
            s3_url, uuid = message['s3_url'], message['uuid']
            response = aws_transcribe(uuid, s3_url)
            text = get_text(response)
            s3_url = save_text_to_s3(uuid, text)
             try:
                cell_range = 'M{0}:M{0}'.format(row)
                sch.write_single_range(sheet_id, cell_range,[[youtube_url]])

            except Exception as e:
                logging.error('Failed to update sheets for {}'.format(process_name))
                print('{} failed to update sheets'.format(process_name))
 

            keywords = key_word_analysis(text)
            print(keywords)





        sleep(60*5)
示例#3
0
    def _combine_hosts(self):
        ct = ConfiguredTest()
        ct.resources = Resources.resources.resources()
        ct.hosts = {}
        ct.end_policy = Schedule.get_schedule().test_end_policy()
        ct.setup_phase_delay = Schedule.get_schedule().setup_phase_delay()
        ct.triggers = Schedule.get_schedule().triggers()

        for h in Model.get_model().hosts():
            host = ConfiguredHost()
            host.model = h
            host.device = h.bound()
            host.schedule = Schedule.get_schedule().host_schedule(h['name'])

            resources = set(h.needed_resources())
            for event in host.schedule:
                resources.update(event.command().needed_resources())

            def resolve_resource(r):
                if isinstance(r, str):
                    return Utils.resolve_resource_name(r)
                return r

            host.resources = set(map(resolve_resource, resources))

            ct.hosts[h['name']] = host

        ct.sanity_check()

        ct.model = Model.get_model()
        ct.laboratory = Laboratory.get_laboratory()
        ct.schedule = Schedule.get_schedule()
        ct.mapping = Mapping.get_mapping()

        self._configured_test = ct
示例#4
0
def overlapCheckerTwo(potentialSchedule):

	timez = []
	tempSched = Schedule(potentialSchedule)

	for i in range(5):
		dayClasses = tempSched.getClassesOnDay(i)
		timez.clear()
		for lect in dayClasses:
			timez.append(lect.getTimes())

			for i in range(len(timez)):
				for j in range(i, len(timez)):
					time = timez[i]
					timeTwo = timez[j]
					if time!=timeTwo:
						if time[0][0] == timeTwo[1][0]:
							if time[0][1] <= timeTwo[1][1]:
								return False
						elif time[1][0] == timeTwo[0][0]:
							if time[1][1] <= timeTwo[0][1]:
								return False
						elif time[0][0] > timeTwo[0][0] and time[0][0] < timeTwo[1][0]:
							return False
	return True
示例#5
0
文件: app.py 项目: jplamb/FantasyCFB
def con_update_schedule():
	[roster_urls, teams] = get_power_five_roster_links('http://espn.go.com/college-football/teams')
	
	
	for count,team in enumerate(teams):
		schedule_url = roster_urls[count].replace("roster", "schedule")
		temp_team = Schedule(team, schedule_url)
		temp_team.get_schedule(schedule_url)
示例#6
0
    def make_streamer_schedule(self):

        apps = []
        for app, num_frozen, target_fps \
                in zip(self.apps, self.num_frozen_list, self.target_fps_list):
            a = app.copy()
            a["num_frozen"] = num_frozen
            a["target_fps"] = target_fps
            apps.append(a)

        s = Schedule.StreamerSchedule()

        num_apps_done = 0
        last_shared_layer = 1
        parent_net = Schedule.NeuralNet(-1, -1, self.model, end=1)

        while (num_apps_done < len(apps)):
            min_frozen = min([app["num_frozen"] \
                for app in apps if app["num_frozen"] > last_shared_layer])
            min_apps    = [app for app in apps \
                            if app["num_frozen"] == min_frozen]
            future_apps = [app for app in apps \
                            if app["num_frozen"] > min_frozen]

            # Check if we need to share part of the NN, and make a base NN
            # If so, we make it and set it as the parent
            if len(future_apps) > 0 or len(apps) == len(min_apps):

                # Set target_fps depending on children target_fps
                if len(future_apps) > 0:
                    parent_target_fps = max(
                        [app["target_fps"] for app in future_apps])
                else:
                    parent_target_fps = max(
                        [app["target_fps"] for app in min_apps])

                net = Schedule.NeuralNet(s.get_id(), -1, self.model,
                                         parent_net.net_id, last_shared_layer,
                                         min_frozen, True, parent_target_fps,
                                         min_apps[0]["model_path"][min_frozen])
                s.add_neural_net(net)
                parent_net = net

            # Make app-specific NN that is branched off the parent
            # Parent is either nothing or the last shared branch
            for app in min_apps:
                net = Schedule.NeuralNet(s.get_id(), app["app_id"], self.model,
                                         parent_net.net_id, parent_net.end,
                                         self.model.final_layer, False,
                                         app["target_fps"],
                                         app["model_path"][min_frozen])
                s.add_neural_net(net)
                num_apps_done += 1

            last_shared_layer = parent_net.end

        return s.schedule
示例#7
0
def text_reply(msg):
    sender = msg['User']['RemarkName']
    content = msg['Content'].split()
    command = content[0]
    if command == 'a':
        try:
            if content[1] not in info[sender]['times']:
                Schedule.SetDailySchedule(sender, [content[1]],
                                          info[sender]['loc'])
                info[sender]['times'].append(content[1])
                with open('info.txt', 'w') as f:
                    f.write(str(info))
                    f.close()
            print("Successfully added " + sender + "'s schedule @" +
                  content[1])
            return u'添加成功!'
        except:
            return u'请求错误!\n' + instruction['a']
    elif command == 'd':
        try:
            if content[1] in info[sender]['times']:
                Schedule.ClearSomeone(sender + content[1])
                info[sender]['times'].remove(content[1])
                with open('info.txt', 'w') as f:
                    f.write(str(info))
                    f.close()
                print("Successfully deleted " + sender + "'s schedule @" +
                      content[1])
                return u'删除成功!'
            else:
                return u'无此定时!'
        except:
            return u'请求错误!\n' + instruction['d']
    elif command == 'l':
        s = ""
        for each in info[sender]['times']:
            s = s + each + '\n'
        if s == "":
            return u'当前无定时!'
        else:
            return u'' + s
    elif command == 'da':
        for each in info[sender]['times']:
            Schedule.ClearSomeone(sender + each)
        info[sender]['times'] = []
        with open('info.txt', 'w') as f:
            f.write(str(info))
            f.close()
        print("Successfully deleted all schedules of " + sender)
        return u'已清除全部定时推送!'
    elif command == 'h':
        return u'' + instruction['h']
    else:
        return u'请求错误!\n' + instruction['h']
示例#8
0
    def make_streamer_schedule_no_sharing(self):

        s = Schedule.StreamerSchedule()

        for app in self.apps:
            num_frozen = min(app["accuracies"].keys())
            net = Schedule.NeuralNet(s.get_id(), app["app_id"], self.model, -1,
                                     1, self.model.final_layer, False,
                                     self.video_desc["stream_fps"],
                                     app["model_path"][num_frozen])
            s.add_neural_net(net)
        return s.schedule
示例#9
0
class Doctor(Personnel):
    def __init__(self, personnel_ID):
        super.__init__(personnel_ID)
        self.schedule = Schedule(personnel_ID)
        
    def add_sch(self, appointment_ID, date, patient_ID, personnel_ID, prescription_ID, service_charge):
        self.schedule.add_sch(appointment_ID, date, patient_ID, personnel_ID, prescription_ID, service_charge)

    def del_sch(self, appointment_ID):
        self.schedule.del_from_sch(appointment_ID)

    def get_lab_result(self, appointment_ID):
        row = Models.Appointment.select().where(appointment_ID == appointment_ID).get()
        return row.lab_result
示例#10
0
class Doctor(Personnel):
    def __init__(self, personnel_ID):
        super.__init__(personnel_ID)
        self.schedule = Schedule(personnel_ID)
        
    def add_sch(self, appointment_ID, date, patient_ID, personnel_ID, prescription_ID, service_charge):
        self.schedule.add_sch(appointment_ID, date, patient_ID, personnel_ID, prescription_ID, service_charge)

    def del_sch(self, appointment_ID):
        self.schedule.del_from_sch(appointment_ID)

    def get_lab_result(self, appointment_ID):
        row = Models.Appointment.select().where(appointment_ID == appointment_ID).get()
        return row.lab_result
    def crossover(self):
        self.selection()
        sectionschedule1=[]
        #print("selected schedule conflict parent 1")
        #print(self.selected_schedule[0].conflicts)
        sectionschedule1=self.selected_schedule[0].sections
        #print("selected schedule conflict parent 2")
        #print(self.selected_schedule[1].conflicts)
        sectionschedule2=self.selected_schedule[1].sections
        half1=0
        half2=0
        if len(sectionschedule2)%2!=0:
            half1=len(sectionschedule1)//2
            half2=half1+1
        if len(sectionschedule2) % 2 == 0:
                half1=half2=len(sectionschedule2)//2
        crossover_section=[]
        crossover_section2 = []
        for i in range(half1):
            crossover_section.append(sectionschedule1[i])
        for j in range (half2,len(sectionschedule2)):
            crossover_section.append(sectionschedule2[j])
        crossover_scheduale=Schedule.Schedule(data)
        crossover_scheduale.setsections(crossover_section)
        #print("cross_over1 :"+str(crossover_scheduale.conflicts))


        #
        for k in range(half1):
            crossover_section2.append(sectionschedule2[k])
        for l in range (half2,len(sectionschedule2)):
            crossover_section2.append(sectionschedule1[l])


        crossover_scheduale2=Schedule.Schedule(data)
        crossover_scheduale2.setsections(crossover_section2)
        #print("cross_over2 :"+str(crossover_scheduale2.conflicts))

        if(crossover_scheduale2.conflicts>crossover_scheduale.conflicts):

            self.mutate_schedule(crossover_scheduale)

        else:

            self.mutate_schedule(crossover_scheduale2)

        self.population.append(crossover_scheduale2)
        self.population.append(crossover_scheduale)
    def __init__(self, quarter, year, team_json, iter_count):
        # read in param json
        self.role_params = dict()
        role_param_json = u.get_roles()
        for role in role_param_json:
            self.role_params[role['role']] = role
        priority_sequence = [x['role'] for x in role_param_json]

        # prep member data
        self.member_data = md.MemberData(self.role_params, team_json, quarter,
                                         year)

        # get all sundays in this quarter
        self.sundays_datestrs = list()
        for sunday in u.get_all_sundays_in_quarter(quarter, year):
            self.sundays_datestrs.append(u.datetime_to_datestring(sunday))

        self.iteration_count = iter_count
        self.top_three = [{
            'score': 0
        }, {
            'score': 0
        }, {
            'score': 0
        }]  # list of dict of schedule of bands

        self.schedule = s.Schedule(self.role_params, priority_sequence,
                                   self.member_data.get_availability_matrix(),
                                   self.member_data)
示例#13
0
    def adapt_simulator(self, time):

        if time < self.adaptive_lookback:
            return

        rand_coefficient = random.uniform(0, 1)
        simulated_exp = math.exp(1 / float(math.log(time))) - 1
        if not simulated_exp < rand_coefficient:
            return

        lookback = self.adaptive_lookback
        adapt_sa = True
        lookback_table = []
        for i in range(
                len(self.iteration_value) - lookback,
                len(self.iteration_value)):
            lookback_table.append(self.iteration_value[i])
            if self.iteration_value[i] != self.iteration_value[
                    len(self.iteration_value) - 1]:
                adapt_sa = False

        print(f'lookback_table = {str(lookback_table)}, taken? {adapt_sa}')

        if adapt_sa:
            self.adapted_timestamps.append(time - 1)
            tmp_schedule_res = self.get_clashes_in_schedule(self.schedule)
            if len(tmp_schedule_res) < self.best_result:
                self.best_schedule = self.schedule
                self.best_result = len(tmp_schedule_res)
            self.schedule = scdule.generate_schedule(self.events)
示例#14
0
    def sanity_check(self):
        """ Sprawdź poprawność konfiguracji.

        Sprawdzane są tylko podstawowe typy błędów jak np. brak wymaganego
        mapowania urządzeń. Metoda ma wyeliminować część błędów przed
        uruchomieniem testu, nie gwarantuje jednak, że konfiguracja pozwala
        poprawnie wykonać test.

        """
        for (name, host) in self.hosts.items():
            if name != host.model['name']:
                raise Exceptions.SanityError("Key name is different than element's name")

            if not host.model.bound():
                raise Exceptions.SanityError("Model host '%s' is not bound" % host.model['name'])

            device = host.device

            obligatory_attributes = ['connection', 'frontend']

            for attr in obligatory_attributes:
                if attr not in device.attributes():
                    raise Exceptions.SanityError("Device '%s' doesn't specify '%s' attribute" % (device['name'], attr))

            for (iname, interface) in host.model.interfaces().items():
                if iname != interface['name']:
                    raise Exceptions.SanityError("Key's name is different than element's name")

                if not interface.bound():
                    raise Exceptions.SanityError("Interface '%s' of model host '%s' is not bound" % (iname, name))

        if not Schedule.get_schedule().test_end_policy():
            raise Exceptions.SanityError("Test end policy not specified. Use test_end_policy(<policy>) in your configuration.")
示例#15
0
def make_schedule(counter_array, courses, num_courses):
    # Create a new Schedule
    schedule = Schedule.Schedule()

    last_section = False

    # Traverses all courses to make a Schedule
    for course_idx in range(num_courses):
        # Continues looping until a section doesn't conflict
        while True:
            # Pulls the next section for the course and attempts to add it
            counter = counter_array[course_idx]
            section = courses[course_idx].sections[counter]
            success = schedule.add_section(section)

            if not success:
                # Reached last section for this course
                if (counter + 1) == len(courses[course_idx].sections):
                    last_section = True

                # Increment counter for this Course to avoid future conflicts
                finished = increment_counter(counter_array, courses, course_idx)

                if finished:
                    # Last combination possible failed, we're done making Schedules
                    return "finished"

                # Main loop will continue and try again with the new iterated values
                if last_section:
                    return False
            else:
                break
    
    # Successfully made the schedule
    return schedule
示例#16
0
    def set_max_parameters(self):
        # Makes schedule which uses max sharing and max target_fps
        # Sets self.schedule, self.num_frozen_list, self.target_fps_list

        schedule = []
        num_frozen_list = []
        target_fps_list = []
        for app in self.apps:
            app_id = app["app_id"]
            num_frozen = max(app["accuracies"].keys())
            target_fps = self.stream_fps
            unit = Schedule.ScheduleUnit(app, target_fps, num_frozen)
            num_frozen_list.append(num_frozen)
            target_fps_list.append(target_fps)
            schedule.append(unit)
        self.schedule = schedule
        self.num_frozen_list = num_frozen_list
        self.target_fps_list = target_fps_list

        cost = scheduler_util.get_cost_schedule(schedule,
                                                self.model.layer_latencies,
                                                self.model.final_layer)

        average_metric = self.set_schedule_values(schedule)

        return average_metric
 def mutate_schedule(self,schedule):
     mutate_schedule=Schedule.Schedule(data)
     mutate_schedule.generate_sample()
     for i in range(len(schedule.sections)):
         if(self.mutate_rate>random()):
             schedule.sections[i]=mutate_schedule.sections[i]
     schedule.calculate_conflicts()
示例#18
0
def schedule_process():
    dow = request.form.get('dow')
    time = request.form.get('time')
    temp = request.form.get('temp')
    schedule_queue.append(Schedule.ThermoSchedule(dow, time, temp))
    current_temp = Temp.get_temp()
    return render_template('index_b.html', temp=current_temp)
 def _crossover_schedule(self, schedule1, schedule2):
     crossoverSchedule = Schedule.Schedule().initialize()
     for i in range(0, len(crossoverSchedule.get_classes())):
         if (random.random() > 0.5):
             crossoverSchedule.get_classes()[i] = schedule1.get_classes()[i]
         else:
             crossoverSchedule.get_classes()[i] = schedule2.get_classes()[i]
     return crossoverSchedule
示例#20
0
    def testAddJob(self):
        # Testing the presentation istance
        jsspInst = JSSPInstance("../jssp_instances/transparencia.txt")
        sched = Schedule(jsspInst)

        status = sched.addJob(0)
        self.assert_(status == True, "Error job 0")

        status = sched.addJob(1)
        self.assert_(status == True, "Error job 1")

        status = sched.addJob(1)
        self.assert_(status == False, "Duplicated jobId")

        status = sched.addJob(6)
        self.assert_(status == False, "Invalid jobId(0-2): 6")

        status = sched.addJob(2)
        self.assert_(status == True, "Error job 2")

        status = sched.addJob(3)
        self.assert_(status == False, "Maximum jobs exceeded")

        self.assert_(sched.jobSched[0] == 0, "jobSched 0")
        self.assert_(sched.jobSched[1] == 1, "jobSched 1")
        self.assert_(sched.jobSched[2] == 2, "jobSched 2")
        return
示例#21
0
 def __init__(self, events, iterations, adaptive,
              adaptive_lookback) -> None:
     super().__init__(events)
     self.iterations = iterations
     self.schedule = scdule.generate_schedule(self.events)
     self.adaptive = adaptive
     self.adaptive_lookback = adaptive_lookback
     self.best_result = len(self.get_clashes_in_schedule(self.schedule))
     self.best_schedule = self.schedule
示例#22
0
    def get_parameter_options(self):

        ## Calculate all possible schedules
        possible_params = []
        for num_frozen in sorted(self.apps[0]["accuracies"].keys()):
            for target_fps in range(1, self.stream_fps + 1):
                possible_params.append((num_frozen, target_fps))

        permutations = list(itertools.product(possible_params,
                                              repeat=len(self.apps)))

        # Append app_ids to parameter permutations to make schedules
        # so that set parameters are associated explicitly with an app
        schedules = []
        app_ids = [app["app_id"] for app in self.apps]
        for perm in permutations:
            schedule = []
            for app_id, tup in zip(app_ids, perm):
                full_tup = tup + (app_id,)
                for a in self.apps:
                    if a["app_id"] == app_id:
                        app = a
                unit = Schedule.ScheduleUnit(app, tup[1], tup[0])
                schedule.append(unit)
            schedules.append(schedule)

        ## Calculate the metric you're minimizing for, for each schedule
        metric_by_schedule = {}
        for schedule in schedules:
            total_metric = 0.0
            for unit in schedule:
                app = unit.app
                num_frozen = unit.num_frozen
                target_fps = unit.target_fps

                metric = self.get_metric(app,
                                         num_frozen,
                                         target_fps)

                total_metric += metric

            avg_metric = total_metric / len(self.apps)
            metric_by_schedule[tuple(schedule)] = round(avg_metric, 4)

        ## Sort schedules by metric
        sorted_d = sorted(metric_by_schedule.items(), key=operator.itemgetter(1))
        schedules = [tup[0] for tup in sorted_d] #implicit ordering by metric
        metrics = [tup[1] for tup in sorted_d]   #implicit ordering by metric
        costs = []                               #implicit ordering by metric

        for schedule, metric in zip(schedules, metrics):
            cost = scheduler_util.get_cost_schedule(schedule,
                                                    self.model.layer_latencies,
                                                    self.model.final_layer)
            costs.append(cost)
        return schedules, metrics, costs
示例#23
0
class Doctor(Personnel):
    def __init__(self, username):
        super.__init__(username)
        self.schedule = Schedule(username)
        
    def add_sch(self, appointment_ID, date, patient_ID, personnel_ID, prescription_ID, service_charge):
        self.schedule.add_sch(appointment_ID, date, patient_ID, personnel_ID, prescription_ID, service_charge)

    def del_sch(self, appointment_ID):
        self.schedule.del_from_sch(appointment_ID)

    def get_lab_result(self, appointment_ID):
        row = Models.Appointment.select().where(appointment_ID == appointment_ID).get()
        return row.lab_result

    def prescribe(self, appointment_ID, medicine_ID, date = datetime.datetime.now):
        Models.Prescribe.add(appointment_ID, medicine_ID, date)

    def get_appt_lst(self):
        return [appt_lst for appt_lst in Models.Appointment.select().where(Models.Appointment.personnel_ID_fk == get_personnel_ID())]
示例#24
0
 def __init__(self, metric, apps, video_desc, model_desc, verbose=0, scheduler='greedy', agg='avg'):
     self.apps = apps
     self.video_desc = video_desc
     self.metric = metric
     self.model = Schedule.Model(model_desc)
     self.num_frozen_list = []
     self.target_fps_list = []
     self.stream_fps = self.video_desc["stream_fps"]
     self.verbose = verbose
     self.scheduler = scheduler
     self.agg = agg
示例#25
0
def main():
    arg = docopt(__doc__, version='0.1')

    if arg["add"] == True:
        Schedule.addappointment(arg["<name>"], arg["<weekday>"], arg["<hour>"],
                                arg["--color"])

    if arg["today"] == True:
        Schedule.printtoday()

    if arg["delete"] == True:
        Schedule.delappointment(arg["<name>"])

    if arg["get"] == True:
        Schedule.printday(arg["<weekday>"])

    if arg["next"] == True:
        Schedule.nextapp()

    if arg["reset"] == True:
        files.gendata(datafilepath)
示例#26
0
    def _sanity_check(self):
        if not Model.get_model():
            raise Exceptions.SanityError("No model defined. You need to create a model. Did you forget to use 'create_model(name)' in your configuration?")

        if not Laboratory.get_laboratory():
            raise Exceptions.SanityError("No laboratory defined. You need to create a laboratory. Did you forget to use 'create_laboratory(name)' in your configuration?")

        if not Mapping.get_mapping():
            raise Exceptions.SanityError("No mapping defined. You need to create a mapping. Did you forget to use 'create_mapping(name)' in your configuration?")

        if not Schedule.get_schedule():
            raise Exceptions.SanityError("No schedule defined. You need to create a schedule. Did you forget to use 'create_schedule(name)' in your configuration?")
示例#27
0
def crossOver(scheduleA, scheduleB):
    '''Breeds two schedules together by splitting on a random division point'''
    divisionPoint = r.randint(1, 11)
    courses = scheduleA.courseArray[:divisionPoint]
    courses.extend(scheduleB.courseArray[divisionPoint:])

    #call mutation function for each course
    mutations = list(map(mutate, courses))

    return Schedule.Schedule(courses[0], courses[1], courses[2], courses[3],
                             courses[4], courses[5], courses[6], courses[7],
                             courses[8], courses[9], courses[10], courses[11])
示例#28
0
class Student:
	
	def __init__(self, name):
		self._name = name
		self._schedule = Schedule(self._name)
	
	def isEmpty(self):
		return self._schedule=={}
		
	def getName(self):
		return self._name
	
	def getSchedule(self):
		return self._schedule
		
	def hasCourse(self, courseNumber):
		return len(self._schedule.searchbyCourseNumber(courseNumber))==1
		
	def addCourse(self, courseName, courseObject):
		self._schedule.addCourse(courseName, courseObject)
		
	def getClass(self, courseName):
		return self._schedule[courseName]
	
	def __str__(self):
		if self._schedule.isEmpty():
			return 'student is not currently enrolled'
		else:
			 self._schedule.listCourses()
示例#29
0
 def step(self, iteration):
     clashes = self.get_clashes_in_schedule(self.schedule, "operative")
     if iteration == 1:
         self.max_clash = len(clashes)
     if len(clashes) == 0:
         return self.schedule
     step_clash_instance = random.choice(clashes)
     step_clash_event = self.events[step_clash_instance.event_index]
     new_instance = random.choice(step_clash_event.instances)
     new_schedule = scdule.Schedule(self.schedule.instances.copy())
     new_schedule.instances.remove(step_clash_instance)
     new_schedule.instances.append(new_instance)
     return new_schedule
示例#30
0
def overlapChecker(potentialSchedule, lecture):
	temp = lecture.getTimes()
	times = []


	tempSched = Schedule(potentialSchedule)
	for i in range(5):
		dayClasses = tempSched.getClassesOnDay(i)
		for lect in dayClasses:
			times.append(lect.getTimes())

		for time in times:
			if temp[0][0] == time[1][0]:
				if temp[0][1] <= time[1][1]:
					return False
			elif temp[1][0] == time[0][0]:
				if temp[1][1] <= time[0][1]:
					return False
			elif temp[0][0] > time[0][0] and temp[0][0] < time[1][0]:
				return False

	return True
示例#31
0
def mm_reply(msg):
    sender = msg['User']['RemarkName']
    res = Weather.GetPosition(msg['Url'].split('=')[1])
    if res == False:
        return u'定位获取失败...请重试!'
    else:
        try:
            for each in info[sender]['times']:
                Schedule.ClearSomeone(sender + each)
            info[sender]['loc'] = res['cid']
            info[sender]['tz'] = eval(res['tz'])
            Schedule.SetDailySchedule(sender, info[sender]['times'],
                                      info[sender]['loc'])
            with open('info.txt', 'w') as f:
                f.write(str(info))
                f.close()
            print("Successfully changed " + sender + "'s position to " +
                  res['cid'])
            return u'定位已修改为:' + res['admin_area'] + res['location']
        except:
            print("Unsuccessfully changed location!")
            return u'定位修改失败!'
示例#32
0
    def get_observed_performance(self, streamer_schedule, fpses):
        fps_by_app_id = self.get_fps_by_app_id(streamer_schedule, fpses)
        fnrs = []
        fprs = []
        f1s = []
        observed_schedule = []
        for app, num_frozen in zip(self.apps, self.num_frozen_list):
            kwargs = {}
            if self.metric.endswith('-x'):
                kwargs['x_vote'] = app['x_vote']
            observed_fps = fps_by_app_id[app["app_id"]]

            accuracy = app["accuracies"][num_frozen]
            prob_tnr = app["prob_tnrs"][num_frozen]
            false_neg_rate = scheduler_util.get_false_neg_rate(
                                              accuracy,
                                              app["event_length_ms"],
                                              app["correlation_coefficient"],
                                              self.stream_fps,
                                              observed_fps,
                                              **kwargs)
            false_pos_rate = scheduler_util.get_false_pos_rate(
                                              accuracy,
                                              prob_tnr,
                                              app["event_length_ms"],
                                              app["event_frequency"],
                                              app["correlation_coefficient"],
                                              self.stream_fps,
                                              observed_fps,
                                              **kwargs)

            recall = 1. - false_neg_rate
            precision = 1. - false_pos_rate
            f1 = 2. / (1. / recall + 1. / precision)

            fnrs.append(false_neg_rate)
            fprs.append(false_pos_rate)
            f1s.append(f1)

            observed_unit = Schedule.ScheduleUnit(app,
                                                  observed_fps,
                                                  num_frozen)
            observed_schedule.append(observed_unit)

        observed_cost = scheduler_util.get_cost_schedule(observed_schedule,
                                                         self.model.layer_latencies,
                                                         self.model.final_layer)
        average_fnr = sum(fnrs) / float(len(fnrs))
        average_fpr = sum(fprs) / float(len(fprs))
        average_f1 = sum(f1s) / float(len(f1s))
        return round(average_fnr, 4), round(average_fpr, 4), round(average_f1, 4), round(observed_cost, 4)
示例#33
0
def main():
    gyms = make_gyms()

    divisions, teams = make_divisions(gyms)

    rr_divs = []
    for d in divisions:
        print(d)
        rr_divs.append(roundrobin(d))

    dates = [date(2018, 2, 11), date(2018, 2, 18), date(2018, 3, 4)]

    times = [time(1, 30), time(3, 15)]

    slots = make_slots(dates, list(gyms.values()), times)

    cur_date = date(1867, 7, 1)

    fill_slots(slots, rr_divs)

    slots.sort(key=lambda slot: (slot.date, slot.gym, slot.start, slot.court))

    schedule = Schedule(slots, teams)
    schedule.assign_refs()

    date_teams_with_byes = get_teams_with_byes(slots, teams)

    for slot in slots:
        if cur_date != slot.date:
            print(slot.date)
            print('byes:', date_teams_with_byes[slot.date])
            cur_date = slot.date
        print(slot)

    print('byes:')
    for d, teams_with_byes in date_teams_with_byes.items():
        print(d, teams_with_byes)
示例#34
0
async def generate_schedule(args):
    """
    Middle man between socket server and Schedule.py. This allows for proper asynchronous calls so we 
    don't slow down the webserver.
    Paremters: 
        input_data: raw data string from the client.
        user: user object pertaining to who requested the schedule
    """
    user = args

    input_dict = json.loads(user.input_data)
    log('input_dict\n' + str(input_dict))
    user_courses = input_dict['course']
    currentTerm = input_dict['currentTerm']
    personalEvents = input_dict['personalEvent']

    waitlist_okay = False
    if input_dict['waitlistStat'].upper() == 'TRUE':
        waitlist_okay = True

    start_time = time()
    must_haves, could_haves = ScheduleofClasses.get_section_pairings(user_courses=user_courses, termCode=currentTerm, \
        personalEvents=personalEvents, waitlist_okay=waitlist_okay)
    log(message='get_section_pairings took ' +
        str(round(time() - start_time, 5)))

    log('must_haves:\n' + str(must_haves) + '\nwant_to_haves: \n' +
        str(could_haves) + '\nprefs: \n:' + str(input_dict['preference']))

    start_time = time()
    user.schedule = Schedule.generateSchedule(must_haves=must_haves, want_to_haves=could_haves, \
        preferences=input_dict['preference'])
    log(message='generateSchedule took' + str(round(time() - start_time, 5)))

    log('Schedule: \n' + str(user.schedule))  # save raw output of schedule

    new_display, new_schedule = convert_schedule(user)

    # test_schedule = json.dumps(test)
    log("Converted Schedule:\n" + str(new_schedule) + '\n' + str(new_display))
    display_schedule = json.dumps({
        'display': new_display,
        'schedule': new_schedule
    })  # json to string
    await sio.emit('schedule_ready', display_schedule, room=user.sid)
    log('Sent schedule to: ' + str(user.address))
示例#35
0
 def get_cost_threshold(self, streamer_schedule, fpses):
     print "[get_cost_threshold] Recalculating..."
     fps_by_app_id = self.get_fps_by_app_id(streamer_schedule, fpses)
     observed_schedule = []
     for unit in self.schedule:
         target_fps = unit.target_fps
         observed_fps = fps_by_app_id[unit.app_id]
         observed_unit = Schedule.ScheduleUnit(unit.app, observed_fps,
                                               unit.num_frozen)
         observed_schedule.append(observed_unit)
         print "[get_cost_threshold] Target FPS: ", target_fps, "Observed FPS: ", observed_fps
     target_cost = scheduler_util.get_cost_schedule(
         self.schedule, self.model.layer_latencies, self.model.final_layer)
     observed_cost = scheduler_util.get_cost_schedule(
         observed_schedule, self.model.layer_latencies,
         self.model.final_layer)
     print "[get_cost_threshold] Target cost: ", target_cost, " Observed cost: ", observed_cost
     if abs(target_cost - observed_cost) / target_cost < 0.20:
         return -1
     return observed_cost
示例#36
0
    def mate(self, elite_population, selection_type):

        for gene in self.genes:

            p1, p2 = None, None
            # Fully Random
            if selection_type == 0:
                p1, p2 = np.random.choice(elite_population.genes + self.genes,
                                          2)
            # Elitistic Random
            elif selection_type == 1:
                p1, p2 = np.random.choice(elite_population.genes, 2)
            # Wheel Selection
            elif selection_type == 2:
                i1, i2 = self.wheel_selection(elite_population)
                total_pop = elite_population.genes + self.genes
                p1 = total_pop[i1]
                p2 = total_pop[i2]
            # Elitistic Exponential
            elif selection_type == 3:
                i1 = self.generate_idx_exp(
                    len(elite_population.genes + self.genes))
                i2 = self.generate_idx_exp(
                    len(elite_population.genes + self.genes))
                total_pop = elite_population.genes + self.genes
                p1 = total_pop[i1]
                p2 = total_pop[i2]

            # crossover - by events: take some instances from one parent and some from the other
            p1.schedule.instances.sort(key=lambda x: x.title, reverse=True)
            p2.schedule.instances.sort(key=lambda x: x.title, reverse=True)

            instances = list()
            for i in range(0, len(self.events)):
                if random.randint(0, 100) > 50:
                    instances.append(p1.schedule.instances[i])
                else:
                    instances.append(p2.schedule.instances[i])

            gene.set_schedule(scdule.Schedule(instances))
示例#37
0
	def __init__(self, name):
		self._name = name
		self._schedule = Schedule(self._name)
示例#38
0
class MasterDaemon:
    """
    Main class of the master daemon
    It provides communication between master and slave boxes and a part of the database management
    """
    def __init__(self, log_flag):
        self.logger = Logger(log_flag, LOG_FILE);
        self.logger.info('Started Domoleaf Master Daemon');
        self.d3config = {};
        self.aes_slave_keys = {};
        self.aes_master_key = None
        self.connected_clients = {};
        self.sql = MasterSql();
        self._parser = DaemonConfigParser(MASTER_CONF_FILE);
        self.db_username = self._parser.getValueFromSection(MASTER_CONF_MYSQL_SECTION, MASTER_CONF_MYSQL_USER_ENTRY);
        self.db_passwd = self._parser.getValueFromSection(MASTER_CONF_MYSQL_SECTION, MASTER_CONF_MYSQL_PASSWORD_ENTRY);
        self.db_dbname = self._parser.getValueFromSection(MASTER_CONF_MYSQL_SECTION, MASTER_CONF_MYSQL_DB_NAME_ENTRY);
        self.get_aes_slave_keys(0);
        self.reload_camera(None, None, 0);
        self._scanner = Scanner();
        self.hostlist = [];
        self.hostlist.append(Host('', '127.0.0.1', socket.gethostname().upper()));
        self.knx_manager = KNXManager(self.aes_slave_keys);
        self.enocean_manager = EnOceanManager(self.aes_slave_keys);
        self.reload_d3config(None, None, 0);
        self.trigger = Trigger(self);
        self.scenario = Scenario(self);
        self.schedule = Schedule(self);
        self.calcLogs = CalcLogs(self);

        self.functions = {
              1 : self.knx_manager.send_knx_write_short_to_slave,
              2 : self.knx_manager.send_knx_write_long_to_slave,
              3 : self.knx_manager.send_knx_write_speed_fan,
              4 : self.knx_manager.send_knx_write_temp,
              5 : IP_IRManager().send_to_gc,
              6 : self.knx_manager.send_on,
              7 : self.knx_manager.send_to_thermostat,
              8 : self.knx_manager.send_clim_mode,
              9 : HttpReq().http_action,
             10 : self.upnp_audio,
             11 : self.knx_manager.send_knx_write_percent,
             12 : self.knx_manager.send_off,
             13 : self.knx_manager.send_knx_write_short_to_slave_r,
        };
        self.data_function = {
            DATA_MONITOR_KNX                  : self.monitor_knx,
            DATA_MONITOR_IP                   : self.monitor_ip,
            DATA_MONITOR_ENOCEAN              : self.monitor_enocean,
            DATA_MONITOR_BLUETOOTH            : self.monitor_bluetooth,
            DATA_KNX_READ                     : self.knx_read,
            DATA_KNX_WRITE_S                  : self.knx_write_short,
            DATA_KNX_WRITE_L                  : self.knx_write_long,
            DATA_SEND_TO_DEVICE               : self.send_to_device,
            DATA_CRON_UPNP                    : self.cron_upnp,
            DATA_SEND_MAIL                    : self.send_mail,
            DATA_MODIF_DATETIME               : self.modif_datetime,
            DATA_CHECK_SLAVE                  : self.check_slave,
            DATA_RELOAD_CAMERA                : self.reload_camera,
            DATA_RELOAD_D3CONFIG              : self.reload_d3config,
            DATA_BACKUP_DB_CREATE_LOCAL       : self.backup_db_create_local,
            DATA_BACKUP_DB_REMOVE_LOCAL       : self.backup_db_remove_local,
            DATA_BACKUP_DB_LIST_LOCAL         : self.backup_db_list_local,
            DATA_BACKUP_DB_RESTORE_LOCAL      : self.backup_db_restore_local,
            DATA_CHECK_USB                    : self.check_usb,
            DATA_BACKUP_DB_CREATE_USB         : self.backup_db_create_usb,
            DATA_BACKUP_DB_REMOVE_USB         : self.backup_db_remove_usb,
            DATA_BACKUP_DB_LIST_USB           : self.backup_db_list_usb,
            DATA_BACKUP_DB_RESTORE_USB        : self.backup_db_restore_usb,
            DATA_SMARTCMD_LAUNCH              : self.smartcmd_launch,
            DATA_TRIGGERS_LIST_UPDATE         : self.triggers_list_update,
            DATA_SCHEDULES_LIST_UPDATE        : self.schedules_list_update,
            DATA_SCENARIOS_LIST_UPDATE        : self.scenarios_list_update,
            DATA_CHECK_ALL_SCHEDULES          : self.check_schedules,
            DATA_CALC_LOGS                    : self.launch_calc_logs,
            DATA_CHECK_UPDATES                : self.check_updates,
            DATA_UPDATE                       : self.update,
            DATA_SEND_ALIVE                   : self.send_request,
            DATA_SEND_TECH                    : self.send_tech,
            DATA_SEND_INTERFACES              : self.send_interfaces,
            DATA_SHUTDOWN_D3                  : self.shutdown_d3,
            DATA_REBOOT_D3                    : self.reboot_d3,
            DATA_WIFI_UPDATE                  : self.wifi_update,
            DATA_REMOTE_SQL                   : self.remote_sql
        };

    def get_aes_slave_keys(self, db):
        """
        Get the secretkeys of each slave daemon stored in database
        """
        query = "SELECT serial, secretkey FROM daemon";
        res = self.sql.mysql_handler_personnal_query(query, db);
        self_hostname = socket.gethostname();
        for r in res:
            if SLAVE_NAME_PREFIX in r[0] or 'MD3' in r[0]:
                self.aes_slave_keys[r[0]] = r[1];
            elif self_hostname == r[0]:
                self.aes_slave_keys[r[0]] = r[1];
                self.aes_master_key = r[1];

    def stop(self):
        """
        Stops the daemon and closes sockets
        """
        flag = False;
        while not flag:
            flag = True;
            for client in self.connected_clients.values():
                flag = False;
                client.close();
                break;
        self.slave_connection.close();
        sys.exit(0);

    def run(self):
        """
        Initialization of the connections and accepting incomming communications
        """
        self.slave_connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM);
        self.cmd_connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM);
        self.slave_connection.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1);
        self.cmd_connection.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1);
        self.slave_connection.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1);
        self.cmd_connection.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1);
        s_port = self._parser.getValueFromSection(MASTER_CONF_LISTEN_SECTION, MASTER_CONF_LISTEN_PORT_SLAVE_ENTRY);
        c_port = self._parser.getValueFromSection(MASTER_CONF_LISTEN_SECTION, MASTER_CONF_LISTEN_PORT_CMD_ENTRY);
        if not s_port:
            frameinfo = getframeinfo(currentframe());
            self.logger.error('in run: No slave listening port defined in '+MASTER_CONF_FILE);
            sys.exit(1);
        if not c_port:
            frameinfo = getframeinfo(currentframe());
            self.logger.error('in run: No command listening port defined in '+MASTER_CONF_FILE);
            sys.exit(1);
        self.slave_connection.bind(('', int(s_port)));
        self.slave_connection.listen(MAX_SLAVES);
        self.cmd_connection.bind(('', int(c_port)));
        self.cmd_connection.listen(MAX_CMDS);
        self.loop();

    def loop(self):
        """
        Main loop. Waits for new connections.
        """
        self.run = True;
        while self.run:
            try:
                rlist, wlist, elist = select.select([self.slave_connection], [], [], SELECT_TIMEOUT);
                for connection in rlist:
                    self.accept_new_slave_connection(connection);
                rlist, wlist, elist = select.select([self.cmd_connection], [], [], SELECT_TIMEOUT);
                for connection in rlist:
                    self.accept_new_cmd_connection(connection);
            except KeyboardInterrupt as e:
                frameinfo = getframeinfo(currentframe());
                self.logger.info('in loop: Keyboard interrupt: leaving program');
                print("[ MASTER DAEMON ",frameinfo.filename,":",str(frameinfo.lineno)," ]: Keyboard Interrupt");
                self.stop();
                sys.exit(0);
            except ValueError as e:
                frameinfo = getframeinfo(currentframe());
                self.logger.error('in loop: Value error: '+str(e));
                print("[ MASTER DAEMON ",frameinfo.filename,":",str(frameinfo.lineno),"]: Value Error");
                print(e);
                pass;

    def accept_new_cmd_connection(self, connection):
        """
        Gets new domoleaf connections and threads the treatment.
        """
        new_connection, addr = connection.accept();
        r = CommandReceiver(new_connection, self);
        r.start();

    def accept_new_slave_connection(self, connection):
        """
        Gets new slave connections and threads the treatment.
        """
        new_connection, addr = connection.accept();
        myname = socket.gethostname();
        try:
            name = socket.gethostbyaddr(addr[0])[0]
        except socket.error as serr:
            name = 'localhost'
        if name == 'localhost':
            name = myname
        name = name.split('.')[0];
        r = SlaveReceiver(new_connection, name, self);
        r.start();

    def parse_data(self, data, connection, daemon_id, db):
        """
        Once data are received whether from domoleaf or slave, the function of the packet_type in data is called.
        """
        json_obj = json.JSONDecoder().decode(data);
        json_obj['daemon_id'] = daemon_id;
        if json_obj['packet_type'] in self.data_function.keys():
            self.data_function[json_obj['packet_type']](json_obj, connection, db);
        else:
            frameinfo = getframeinfo(currentframe());

    def check_updates(self, json_obj, connection, db):
        query = 'SELECT configuration_value FROM configuration WHERE configuration_id=4';
        actual_version = self.sql.mysql_handler_personnal_query(query, db);
        if not actual_version:
            self.logger.error("CHECK_UPDATE : No Master Version");
            return;
        query = 'UPDATE configuration SET configuration_value="" WHERE configuration_id=13';
        self.sql.mysql_handler_personnal_query(query, db);
        p = call(['dpkg', '--configure', '-a'])
        p = Popen(['apt-get', 'update'], stdin=PIPE, stdout=PIPE, stderr=PIPE, bufsize=-1);
        output, error = p.communicate();
        p = Popen(['apt-show-versions',  '-u', 'domomaster'], stdin=PIPE, stdout=PIPE, stderr=PIPE, bufsize=-1);
        output, error = p.communicate();
        if not p.returncode:
            tab = output.decode("utf-8").split(" ");
            version = tab[-1].rsplit("\n")[0];
        else:
            version = actual_version[0][0];
        query = ''.join(['UPDATE configuration SET configuration_value="', version, '" WHERE configuration_id=13']);
        self.sql.mysql_handler_personnal_query(query, db);

    def update(self, json_obj, connection, db):
        call(['apt-get', 'update']);
        p = Popen("DEBIAN_FRONTEND=noninteractive apt-get install domomaster domoslave -y ",
              shell=True, stdin=None, stdout=False, stderr=False,executable="/bin/bash");
        output, error = p.communicate();
        hostname = socket.gethostname();
        if '.' in hostname:
            hostname = hostname.split('.')[0];
        version = os.popen("dpkg-query -W -f='${Version}\n' domomaster").read().split('\n')[0];
        query = ''.join(['UPDATmon SET version="', version, '" WHERE name="', hostname, '"' ]);
        self.sql.mysql_handler_personnal_query(query, db);
        query = ''.join(['UPDATE configuration SET configuration_value="', version, '" WHERE configuration_id=4']);
        self.sql.mysql_handler_personnal_query(query, db);
        json_obj['data'].append(hostname);
        port = self._parser.getValueFromSection('connect', 'port');
        for host in self.hostlist:
            if (host._Hostname.startswith('MD3') or host._Hostname.startswith('SD3')) and host._Hostname not in json_obj['data']:
                sock = socket.create_connection((host._IpAddr, port));
                json_str = json.JSONEncoder().encode(json_obj);
                sock.send(bytes(json_str, 'utf-8'));
                data = sock.recv(4096);
                decrypt_IV = data[:16].decode();
                decode_obj = AES.new(self.aes_master_key, AES.MODE_CBC, decrypt_IV);
                data2 = decode_obj.decrypt(data[16:]).decode();
                version = data2['new_version'];
                query = ''.join(['UPDATE daemon SET version="', version, '" WHERE name="', host._Hostname, '"']);
                self.sql.mysql_handler_personnal_query(query, db);
                sock.close();

    def backup_db_create_local(self, json_obj, connection, db):
        path = '/etc/domoleaf/sql/backup/';
        filename = 'domoleaf_backup_';
        t = str(time.time());
        if '.' in t:
            t = t.split('.')[0];
        filename += t+'.sql';
        os.system("mysqldump --defaults-file=/etc/mysql/debian.cnf domoleaf > "+path+filename);
        os.system('cd '+path+' && tar -czf '+filename+'.tar.gz'+' '+filename);
        os.system('rm '+path+filename);

    def backup_db_remove_local(self, json_obj, connection, db):
        filename = ''.join(['/etc/domoleaf/sql/backup/domoleaf_backup_', str(json_obj['data']), '.sql.tar.gz']);
        if str(json_obj['data'][0]) == '.' or str(json_obj['data'][0]) == '/':
            self.logger.error('The filename is corrupted. Aborting database file removing.')
            return;
        try:
            os.stat(filename);
        except Exception as e:
            try:
                filename = filename.split('.tar.gz')[0];
                os.stat(filename);
            except Exception as e:
                self.logger.error("The database file to remove does not exists.")
                self.logger.error(e)
                return;
        os.remove(filename);

    def backup_db_list_local(self, json_obj, connection, db):
        json_obj = [];
        append = json_obj.append;
        backup_list = os.listdir('/etc/domoleaf/sql/backup/')
        for f in backup_list:
            s = os.stat('/etc/domoleaf/sql/backup/'+f);
            if '.sql' in f:
                g = f.split('.sql')[0];
                append({"name": g, "size": s.st_size});
        json_sorted = sorted(json_obj, key=lambda json_obj: json_obj['name'], reverse=True);
        json_str = json.JSONEncoder().encode(json_sorted);
        connection.send(bytes(json_str, 'utf-8'));

    def backup_db_restore_local(self, json_obj, connection, db):
        path = '/etc/domoleaf/sql/backup/';
        filename = ''.join(['domoleaf_backup_', str(json_obj['data']), '.sql.tar.gz']);
        if json_obj['data'][0] == '.' or json_obj['data'][0] == '/':
            self.logger.error('The filename is corrupted. Aborting database restoring.')
            return;
        try:
            os.stat(path+filename);
            os.system('cd '+path+' && tar -xzf '+filename);
            os.system('mysql --defaults-file=/etc/mysql/debian.cnf domoleaf < '+path+filename.split('.tar.gz')[0]);
            os.system('rm '+path+filename.split('.tar.gz')[0]);
            return;
        except Exception as e:
            try:
                filename = filename.split('.tar.gz')[0];
                os.stat(path+filename);
            except Exception as e:
                self.logger.error("The database file to restore does not exists.");
                self.logger.error(e);
                return;
        os.system('mysql --defaults-file=/etc/mysql/debian.cnf domoleaf < '+path+filename);

    def check_usb(self, json_obj, connection, db):
        try:
            sdx1 = glob.glob('/dev/sd?1')[0];
        except Exception as e:
            return;
        if not (os.path.exists(sdx1)):
            json_obj = 0;
        else:
            json_obj = 1;
        json_str = json.JSONEncoder().encode(json_obj);
        connection.send(bytes(json_str, 'utf-8'));

    def backup_db_list_usb(self, json_obj, connection, db):
        json_obj = [];
        append = json_obj.append
        sdx1 = glob.glob('/dev/sd?1')[0];
        if not (os.path.exists(sdx1)):
            return;
        os.system('mount '+sdx1+' /etc/domoleaf/mnt');
        os.system('mkdir -p /etc/domoleaf/mnt/backup');
        backup_list = os.listdir('/etc/domoleaf/mnt/backup/')
        for f in backup_list:
            s = os.stat('/etc/domoleaf/mnt/backup/'+f);
            if '.sql' in f:
                g = f.split('.sql')[0];
                append({"name": g, "size": s.st_size});
        os.system('umount /etc/domoleaf/mnt');
        json_sorted = sorted(json_obj, key=lambda json_obj: json_obj['name'], reverse=True);
        json_str = json.JSONEncoder().encode(json_sorted);
        connection.send(bytes(json_str, 'utf-8'));

    def backup_db_remove_usb(self, json_obj, connection, db):
        filename = ''.join(['/etc/domoleaf/mnt/backup/domoleaf_backup_', str(json_obj['data']), '.sql.tar.gz']);
        if str(json_obj['data'][0]) == '.' or str(json_obj['data'][0]) == '/':
            self.logger.error('The filename is corrupted. Aborting database file removing.')
            return;
        sdx1 = glob.glob('/dev/sd?1')[0];
        if not (os.path.exists(sdx1)):
            return;
        os.system('mount '+sdx1+' /etc/domoleaf/mnt');
        path = '/etc/domoleaf/mnt/backup/';
        try:
            os.stat(filename);
        except Exception as e:
            try:
                filename = filename.split('.tar.gz')[0];
                os.stat(filename);
            except Exception as e:
                self.logger.error("The database file to remove does not exists.")
                self.logger.error(e)
                os.system('umount /etc/domoleaf/mnt');
                return;
        os.remove(filename);
        os.system('umount /etc/domoleaf/mnt');

    def backup_db_restore_usb(self, json_obj, connection, db):
        path = '/etc/domoleaf/mnt/backup/';
        filename = ''.join(['domoleaf_backup_', str(json_obj['data']), '.sql']);
        if json_obj['data'][0] == '.' or json_obj['data'][0] == '/':
            self.logger.error('The filename is corrupted. Aborting database restoring.')
            return;
        sdx1 = glob.glob('/dev/sd?1')[0];
        if not (os.path.exists(sdx1)):
            return;
        os.system('mount '+sdx1+' /etc/domoleaf/mnt');
        try:
            os.stat(path+filename);
            os.system('cp '+path+filename+' /tmp/ && umount /etc/domoleaf/mnt && cd /tmp/');
            os.system('mysql --defaults-file=/etc/mysql/debian.cnf domoleaf < /tmp/'+filename);
            os.remove('/tmp/'+filename);
            return;
        except Exception as e:
            try:
                filename += '.tar.gz';
                os.stat(path+filename);
                os.system('cp '+path+filename+' /tmp/ && umount /etc/domoleaf/mnt && cd /tmp/ && tar -xzf '+filename);
            except Exception as e:
                self.logger.error("The database file to restore does not exists.");
                self.logger.error(e);
                os.system('umount /etc/domoleaf/mnt');
                return;
        os.system('umount /etc/domoleaf/mnt');
        os.system('mysql --defaults-file=/etc/mysql/debian.cnf domoleaf < /tmp/'+filename.split('.tar.gz')[0]);
        os.remove('/tmp/'+filename);
        os.remove('/tmp/'+filename.split('.tar.gz')[0]);

    def backup_db_create_usb(self, json_obj, connection, db):
        sdx1 = glob.glob('/dev/sd?1')[0];
        if not (os.path.exists(sdx1)):
            return;
        os.system('mount '+sdx1+' /etc/domoleaf/mnt');
        path = '/etc/domoleaf/mnt/backup/';
        filename = 'domoleaf_backup_';
        os.system('mkdir -p '+path);
        t = str(time.time());
        if '.' in t:
            t = t.split('.')[0];
        filename += t+'.sql';
        os.system("mysqldump --defaults-file=/etc/mysql/debian.cnf domoleaf > "+path+filename);
        os.system('cd '+path+' && tar -czf '+filename+'.tar.gz'+' '+filename);
        os.system('rm '+path +filename);
        os.system('umount /etc/domoleaf/mnt');

    def monitor_knx(self, json_obj, connection, db):
        """
        Callback called each time a monitor_knx packet is received.
        Updates room_device_option values in the database and check scenarios.
        """
        daemon_id = self.sql.update_knx_log(json_obj, db);
        doList = self.knx_manager.update_room_device_option(daemon_id, json_obj, db);
        if doList:
            self.scenario.setValues(self.get_global_state(db), self.trigger, self.schedule, connection, doList);
            self.scenario.start();
        connection.close();

    def knx_write_short(self, json_obj, connection, db):
        """
        Callback called each time a knx_write_short packet is received.
        Updates room_device_option values in the database.
        """
        daemons = self.sql.get_daemons(db);
        slave_name = self.get_slave_name(json_obj, daemons);
        if slave_name is None:
            connection.close();
            return None;
        dev = {}
        dev["addr_dst"] = json_obj['data']['addr']
        slave_name = slave_name.split('.')[0];
        self.knx_manager.send_knx_write_short_to_slave(json_obj, dev, slave_name);
        connection.close();
        return None;

    def knx_write_long(self, json_obj, connection, db):
        """
        Callback called each time a knx_write_long packet is received.
        Updates room_device_option values in the database.
        """
        daemons = self.sql.get_daemons(db);
        slave_name = self.get_slave_name(json_obj, daemons);
        if slave_name is None:
            connection.close();
            return None;
        dev = {}
        dev["addr_dst"] = json_obj['data']['addr']
        slave_name = slave_name.split('.')[0];
        self.knx_manager.send_knx_write_long_to_slave(json_obj, dev, slave_name);
        connection.close();
        return None;

    def knx_read(self, json_obj, connection, db):
        """
        Callback called each time a knx_read packet is received.
        """
        daemons = self.sql.get_daemons(db);
        slave_name = self.get_slave_name(json_obj, daemons);
        if slave_name is None:
            return None;
        slave_name = slave_name.split('.')[0];
        self.knx_manager.send_knx_read_request_to_slave(slave_name, json_obj);
        connection.close();

    def monitor_ip(self, json_obj, connection, db):
        """
        Callback called each time a monitor_ip packet is received.
        A new local network scan is performed and the result stored in the database
        """
        self.scanner.scan();
        self.sql.insert_hostlist_in_db(self.scanner._HostList, db);
        self.hostlist = self.scanner._HostList;
        connection.close();

    def monitor_bluetooth(self, json_obj, connection, db):
        """
        TODO
        """
        connection.close();
        return None;

    def monitor_enocean(self, json_obj, connection, db):
        """
        Callback called each time a monitor_enocean packet is received.
        Stores the data in enocean_log table.
        """
        daemon_id = self.sql.update_enocean_log(json_obj, db);
        doList = self.enocean_manager.update_room_device_option(daemon_id, json_obj, db);
        connection.close();
        if doList:
            self.scenario.setValues(self.get_global_state(db), self.trigger, self.schedule, connection, doList);
            self.scenario.start();
        return None;

    def send_to_device(self, json_obj, connection, db):
        """
        Retrieves the good device in the database and builds the request to send.
        """
        hostname = '';
        dm = DeviceManager(int(json_obj['data']['room_device_id']), int(json_obj['data']['option_id']), DEBUG_MODE);
        dev = dm.load_from_db(db);
        if dev is None:
            connection.close();
            return ;
        if 'daemon_name' in dev:
            for host in self.hostlist:
                if dev['daemon_name'] == host._Hostname:
                    hostname = host._Hostname;
                    break;
        function_writing = int(dev['function_writing']);
        if (function_writing > 0):
            try:
                self.functions[function_writing](json_obj, dev, hostname);
            except Exception as e:
                self.logger.error(e);
        connection.close();

    def upnp_audio(self, json_obj, dev, hostname):
        cmd = UpnpAudio(dev['addr'], int(dev['plus1']));
        cmd.action(json_obj);

    def get_ip_ifname(self, ifname):
        """
        Retrieves network interface name from IP address.
        """
        s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM);
        try:
            res = socket.inet_ntoa(fcntl.ioctl(s.fileno(),
                                               0x8915,
                                               struct.pack('256s', bytes(ifname, 'utf-8')))[20:24]);
            return res;
        except Exception as e:
            frameinfo = getframeinfo(currentframe());
            self.logger.error('in get_ip_ifname: '+str(e));
            return None;

    def cron_upnp(self, json_obj, connection, db):
        """
        Callback called each time a cron_upnp packet is received.
        """
        local_ip = self.get_ip_ifname("eth0");
        if local_ip is None:
            connection.close();
            return None;
        query = "SELECT configuration_id, configuration_value FROM configuration";
        res = self.sql.mysql_handler_personnal_query(query);
        actions = json_obj['data'];
        for act in actions:
            if act['action'] == 'open':
                for r in res:
                    if int(r[0]) == int(act['configuration_id']):
                        if int(r[0]) == 1:
                            call(["upnpc", "-a", local_ip, str(r[1]), "80", act['protocol']]);
                        elif int(r[0]) == 2:
                            call(["upnpc", "-a", local_ip, str(r[1]), "443", act['protocol']]);
            elif act['action'] == 'close':
                for r in res:
                    if int(r[0]) == int(act['configuration_id']):
                        call(["upnpc", "-d", str(r[1]), act['protocol']]);

    def reload_camera(self, json_obj, connection, db):
        """
        Generation of the file devices.conf located in /etc/domoleaf by default.
        """
        camera_file = open(CAMERA_CONF_FILE, 'w');
        query = "SELECT room_device_id, addr, plus1 FROM room_device WHERE protocol_id = 6";
        res = self.sql.mysql_handler_personnal_query(query, db);
        for r in res:
            ip = str(r[1]);
            if r[1] and utils.is_valid_ip(ip):
                camera_file.write("location /device/"+str(r[0]));
                camera_file.write("/ {\n")
                camera_file.write("\tproxy_buffering off;\n")
                camera_file.write("\tproxy_pass http://"+ip);
                if str(r[2]).isdigit():
                    camera_file.write(":"+str(r[2])+"/;\n}\n\n");
                else:
                    camera_file.write(":/;\n}\n\n");
        camera_file.close();
        call(["service", "nginx", "restart"]);

    def reload_d3config(self, json_obj, connection, db):
        """
        Loads port config. Reading in database and storing.
        """
        query = "SELECT configuration_id, configuration_value FROM configuration";
        res = self.sql.mysql_handler_personnal_query(query, db);
        for r in res:
            self.d3config[str(r[0])] = r[1];

    def check_slave(self, json_obj, connection, db):
        """
        Asks "check_slave" to the slave described in json_obj and waits for answer.
        """
        query = ''.join(["SELECT serial, secretkey FROM daemon WHERE daemon_id=", str(json_obj['data']['daemon_id'])]);
        res = self.sql.mysql_handler_personnal_query(query, db);
        if res is None or not res:
            self.logger.error('in check_slave: No daemon for id '+str(json_obj['data']['daemon_id']));
            connection.close();
            return ;
        elif len(res) > 1:
            self.logger.error('in check_slave: Too much daemons for id '+str(json_obj['data']['daemon_id']));
            connection.close();
            return ;
        hostname = res[0][0];
        self_hostname = socket.gethostname();
        if hostname == self_hostname:
            ip = '127.0.0.1';
        else:
            ip = '';
            for h in self.hostlist:
                if hostname in h._Hostname.upper():
                    ip = h._IpAddr;
        if not ip:
            self.logger.error('in check_slave: '+hostname+' not in hostlist. Try perform network scan again.');
            connection.close();
            return ;
        port = self._parser.getValueFromSection('connect', 'port');
        sock = socket.create_connection((ip, port));
        if '.' in self_hostname:
            self_hostname = self_hostname.split('.')[0];
        aes_IV = AESManager.get_IV();
        aes_key = self.get_secret_key(hostname);
        obj_to_send = ''.join(['{"packet_type": "check_slave", "sender_name": "', self_hostname, '"}']);
        encode_obj = AES.new(aes_key, AES.MODE_CBC, aes_IV);
        spaces = 16 - len(obj_to_send) % 16;
        sock.send(bytes(aes_IV, 'utf-8') + encode_obj.encrypt(obj_to_send + (spaces * ' ')));
        rlist, wlist, elist = select.select([sock], [], [], SELECT_TIMEOUT * 10);
        val = '0';
        version = '';
        interface_knx = '';
        interface_enocean = '';
        data = sock.recv(4096);
        if data:
            decrypt_IV = data[:16].decode();
            decode_obj = AES.new(res[0][1], AES.MODE_CBC, decrypt_IV);
            data2 = decode_obj.decrypt(data[16:]).decode();
            resp = json.JSONDecoder().decode(data2);
            if str(self.aes_slave_keys[hostname]) == str(resp['aes_pass']):
                val = '1';
                version = resp['version'];
                interface_knx = resp['interface_knx'];
                interface_enocean = resp['interface_enocean'];
            connection.send(bytes(version, 'utf-8'));
        connection.close();
        query = ''.join(['UPDATE daemon SET validation=', val, ', version="', version, '" WHERE serial="', hostname, '"']);
        self.sql.mysql_handler_personnal_query(query, db);
        query = ''.join(['UPDATE daemon_protocol SET interface="', interface_knx, '" WHERE daemon_id="', str(json_obj['data']['daemon_id']), '" AND protocol_id="1"']);
        self.sql.mysql_handler_personnal_query(query, db);
        query = ''.join(['UPDATE daemon_protocol SET interface="', interface_enocean, '" WHERE daemon_id="', str(json_obj['data']['daemon_id']), '" AND protocol_id="2"']);
        self.sql.mysql_handler_personnal_query(query, db);
        sock.close();

    def get_secret_key(self, hostname):
        """
        Retrieves the secretkey of 'hostname' in the database.
        """
        query = ''.join(['SELECT serial, secretkey FROM daemon WHERE serial = \'', hostname, '\'']);
        res = self.sql.mysql_handler_personnal_query(query);
        for r in res:
            if r[0] == hostname:
                return str(r[1]);

    def send_mail(self, json_obj, connection, db):
        """
        Callback called each time a send_mail packet is received.
        The parameters are stored in 'json_obj'.
        """
        try:
            from_addr = formataddr((self.d3config['6'], self.d3config['5']));
            host = self.d3config['7'];
            secure = self.d3config['8']
            port = self.d3config['9'];
            username = self.d3config['10'];
            password = self.d3config['11'];
            msg = MIMEMultipart();
            mdr = json_obj['data']['object'];
            msg['Subject'] = json_obj['data']['object'];
            msg['From'] = from_addr;
            msg['To'] = json_obj['data']['destinator'];
            msg.attach(MIMEText(json_obj['data']['message']));
            server = smtplib.SMTP(host, port);
            if (secure == 2):
                server.ehlo();
                server.starttls();
                server.ehlo();
            if not username and not password:
                server.login(self.d3config['5'], username);
            server.sendmail(from_addr, json_obj['data']['destinator'], msg.as_string());
            server.quit();
            connection.close();
        except Exception as e:
            self.logger.error('Error for sending mail');
            self.logger.error(e);
            connection.send(bytes('Error', 'utf-8'));
            connection.close();

    def modif_datetime(self, json_obj, connection, db):
        os.system('date --set '+json_obj['data'][0]);
        os.system('date --set '+json_obj['data'][1]);

    def get_slave_name(self, json_obj, daemons):
        """
        Retrieves the hostname of the daemon described by 'json_obj' in the 'daemons' list.
        """
        daemon_found = False;
        slave_name = '';
        for d in daemons:
            if int(json_obj['data']['daemon']) == int(d[0]):
                daemon_found = True;
                slave_name = str(d[2]);
                break;
        if daemon_found is False:
            frameinfo = getframeinfo(currentframe());
            self.logger.error('in get_slave_name: '+str(json_obj['data']['daemon']));
            return None;
        if str(json_obj['data']['addr']).count('/') != 2:
            frameinfo = getframeinfo(currentframe());
            self.logger.error('in get_slave_name: '+str(json_obj['data']['addr']));
            return None;
        return slave_name;

    def reload_web_server(self):
        """
        Call "service reload nginx"
        """
        self.logger.debug('Reloading web server...');
        call(["service", "nginx", "reload"]);
        self.logger.debug('[ OK ] Done reloading web server.');

    def smartcmd_launch(self, json_obj, connection, db):
        s = Smartcommand(self, int(json_obj['data']))
        s.setValues(connection);
        s.start();

    def triggers_list_update(self, json_obj, connection, db):
        self.trigger.update_triggers_list(db);

    def schedules_list_update(self, json_obj, connection, db):
        self.schedule.update_schedules_list(db);

    def scenarios_list_update(self, json_obj, connection, db):
        self.scenario.update_scenarios_list(db);

    def check_schedules(self, json_obj, connection, db):
        self.schedule.check_all_schedules(connection);

    def launch_calc_logs(self, json_obj, connection, db):
        try:
            self.calcLogs.sort_logs(connection, db);
        except Exception as e:
            self.logger.error(e);

    def get_global_state(self, db):
        query = 'SELECT room_device_id, option_id, opt_value FROM room_device_option';
        res = self.sql.mysql_handler_personnal_query(query, db);
        filtered = [];
        append = filtered.append;
        for elem in res:
            if elem[2]:
                append(elem);
        global_state = [];
        if filtered:
            global_state = filtered;
        else:
            global_state = '';
        return global_state;

    def send_tech(self, json_obj, connection, db):
        query = 'SELECT configuration_value FROM configuration WHERE configuration_id=1';
        http = self.sql.mysql_handler_personnal_query(query, db);
        query = 'SELECT configuration_value FROM configuration WHERE configuration_id=2';
        ssl = self.sql.mysql_handler_personnal_query(query, db);
        json_obj['info']['http'] = http[0][0];
        json_obj['info']['ssl']  = ssl[0][0];
        self.send_request(json_obj, connection, db)

    def send_request(self, json_obj, connection, db):
        if self._parser.getValueFromSection('greenleaf', 'commercial') == "1":
            admin_addr = self._parser.getValueFromSection('greenleaf', 'admin_addr')
            hostname = socket.gethostname()
            GLManager.SendRequest(str(json_obj), admin_addr, self.get_secret_key(hostname))

    def send_interfaces(self, json_obj, connection, db):
        query = ''.join(["SELECT serial, secretkey FROM daemon WHERE daemon_id=", str(json_obj['data']['daemon_id'])]);
        res = self.sql.mysql_handler_personnal_query(query, db);
        if res is None or not res:
            self.logger.error('in send_interfaces: No daemon for id '+str(json_obj['data']['daemon_id']));
            connection.close();
            return ;
        elif len(res) > 1:
            self.logger.error('in send_interfaces: Too much daemons for id '+str(json_obj['data']['daemon_id']));
            connection.close();
            return ;
        hostname = res[0][0];
        ip = '';
        for h in self.hostlist:
            if hostname in h._Hostname.upper():
                ip = h._IpAddr;
        if not ip:
            self.logger.error('in send_interfaces: '+hostname+' not in hostlist. Try perform network scan again.');
            connection.close();
            return ;
        port = self._parser.getValueFromSection('connect', 'port');
        sock = socket.create_connection((ip, port));
        self_hostname = socket.gethostname();
        if '.' in self_hostname:
            self_hostname = self_hostname.split('.')[0];
        aes_IV = AESManager.get_IV();
        aes_key = self.get_secret_key(hostname);
        obj_to_send = json.JSONEncoder().encode(
            {
                "packet_type": "send_interfaces", 
                "sender_name": self_hostname,
                "interface_knx": json_obj['data']['interface_knx'],
                "interface_EnOcean": json_obj['data']['interface_EnOcean'],
                "interface_arg_knx": json_obj['data']['interface_arg_knx'],
                "interface_arg_EnOcean": json_obj['data']['interface_arg_EnOcean'],
                "daemon_knx": json_obj['data']['daemon_knx']
            }
        );
        encode_obj = AES.new(aes_key, AES.MODE_CBC, aes_IV);
        spaces = 16 - len(obj_to_send) % 16;
        sock.send(bytes(aes_IV, 'utf-8') + encode_obj.encrypt(obj_to_send + (spaces * ' ')));
        rlist, wlist, elist = select.select([sock], [], [], SELECT_TIMEOUT * 300);
        re = '';
        data = sock.recv(4096);
        if data:
            decrypt_IV = data[:16].decode();
            host = None;
            for h in self.hostlist:
                if h._IpAddr == ip:
                    host = h;
            decode_obj = AES.new(res[0][1], AES.MODE_CBC, decrypt_IV);
            data2 = decode_obj.decrypt(data[16:]).decode();
            resp = json.JSONDecoder().decode(data2);
            hostname = host._Hostname;
            if '.' in host._Hostname:
                hostname = host._Hostname.split('.')[0];
            if str(self.aes_slave_keys[hostname]) == str(resp['aes_pass']):
                re = '1';
            connection.send(bytes(re, 'utf-8'));
        connection.close();
        sock.close();

    def shutdown_d3(self, json_obj, connection, db):
        """
        Asks "shutdown_d3" to the slave described in json_obj for shutdown daemon.
        """
        query = ''.join(["SELECT serial, secretkey FROM daemon WHERE daemon_id=", str(json_obj['data']['daemon_id'])]);
        res = self.sql.mysql_handler_personnal_query(query, db);
        if res is None or not res:
            self.logger.error('in shutdown_d3: No daemon for id '+str(json_obj['data']['daemon_id']));
            connection.close();
            return ;
        elif len(res) > 1:
            self.logger.error('in shutdown_d3: Too much daemons for id '+str(json_obj['data']['daemon_id']));
            connection.close();
            return ;
        hostname = res[0][0];
        ip = '';
        for h in self.hostlist:
            if hostname in h._Hostname.upper():
                ip = h._IpAddr;
        if not ip:
            self.logger.error('in shutdown_d3: '+hostname+' not in hostlist. Try perform network scan again.');
            connection.close();
            return ;
        port = self._parser.getValueFromSection('connect', 'port');
        sock = socket.create_connection((ip, port));
        self_hostname = socket.gethostname();
        if '.' in self_hostname:
            self_hostname = self_hostname.split('.')[0];
        aes_IV = AESManager.get_IV();
        aes_key = self.get_secret_key(hostname);
        obj_to_send = ''.join(['{"packet_type": "shutdown_d3", "sender_name": "', self_hostname, '"}']);
        encode_obj = AES.new(aes_key, AES.MODE_CBC, aes_IV);
        spaces = 16 - len(obj_to_send) % 16;
        sock.send(bytes(aes_IV, 'utf-8') + encode_obj.encrypt(obj_to_send + (spaces * ' ')));
        connection.close();
        sock.close();

    def reboot_d3(self, json_obj, connection, db):
        """
        Asks "reboot_d3" to the slave described in json_obj for reboot daemon.
        """
        query = ''.join(["SELECT serial, secretkey FROM daemon WHERE daemon_id=", str(json_obj['data']['daemon_id'])]);
        res = self.sql.mysql_handler_personnal_query(query, db);
        if res is None or not res:
            self.logger.error('in reboot_d3: No daemon for id '+str(json_obj['data']['daemon_id']));
            connection.close();
            return ;
        elif len(res) > 1:
            self.logger.error('in reboot_d3: Too much daemons for id '+str(json_obj['data']['daemon_id']));
            connection.close();
            return ;
        hostname = res[0][0];
        ip = '';
        for h in self.hostlist:
            if hostname in h._Hostname.upper():
                ip = h._IpAddr;
        if not ip:
            self.logger.error('in reboot_d3: '+hostname+' not in hostlist. Try perform network scan again.');
            connection.close();
            return ;
        port = self._parser.getValueFromSection('connect', 'port');
        sock = socket.create_connection((ip, port));
        self_hostname = socket.gethostname();
        if '.' in self_hostname:
            self_hostname = self_hostname.split('.')[0];
        aes_IV = AESManager.get_IV();
        aes_key = self.get_secret_key(hostname);
        obj_to_send = ''.join(['{"packet_type": "reboot_d3", "sender_name": "', self_hostname, '"}']);
        encode_obj = AES.new(aes_key, AES.MODE_CBC, aes_IV);
        spaces = 16 - len(obj_to_send) % 16;
        sock.send(bytes(aes_IV, 'utf-8') + encode_obj.encrypt(obj_to_send + (spaces * ' ')));
        connection.close();
        sock.close();

    def wifi_update(self, json_obj, connection, db):
        """
        Send "wifi_update" to the slave described in json_obj for update the wifi configuration.
        """
        query = ''.join(["SELECT serial, secretkey FROM daemon WHERE daemon_id=", str(json_obj['data']['daemon_id'])]);
        res = self.sql.mysql_handler_personnal_query(query, db);
        if res is None or not res:
            self.logger.error('in wifi_update: No daemon for id '+str(json_obj['data']['daemon_id']));
            connection.close();
            return ;
        elif len(res) > 1:
            self.logger.error('in wifi_update: Too much daemons for id '+str(json_obj['data']['daemon_id']));
            connection.close();
            return ;
        hostname = res[0][0];
        ip = '';
        for h in self.hostlist:
            if hostname in h._Hostname.upper():
                ip = h._IpAddr;
        if not ip:
            self.logger.error('in wifi_update: '+hostname+' not in hostlist. Try perform network scan again.');
            connection.close();
            return ;
        port = self._parser.getValueFromSection('connect', 'port');
        sock = socket.create_connection((ip, port));
        self_hostname = socket.gethostname();
        if '.' in self_hostname:
            self_hostname = self_hostname.split('.')[0];
        aes_IV = AESManager.get_IV();
        aes_key = self.get_secret_key(hostname);
        obj_to_send = ''.join(['{"packet_type": "wifi_update", "sender_name": "', str(self_hostname),
              '", "ssid": "', str(json_obj['data']['ssid']), '", "password": "******", "security": "', str(json_obj['data']['security']),
              '", "mode": "', str(json_obj['data']['mode']), '"}']);
        encode_obj = AES.new(aes_key, AES.MODE_CBC, aes_IV);
        spaces = 16 - len(obj_to_send) % 16;
        sock.send(bytes(aes_IV, 'utf-8') + encode_obj.encrypt(obj_to_send + (spaces * ' ')));
        rlist, wlist, elist = select.select([sock], [], [], SELECT_TIMEOUT * 300);
        re = '';
        for s in rlist:
            data = sock.recv(4096);
            if not data:
                continue;
            decrypt_IV = data[:16].decode();
            host = None;
            for h in self.hostlist:
                if h._IpAddr == ip:
                    host = h;
            decode_obj = AES.new(res[0][1], AES.MODE_CBC, decrypt_IV);
            data2 = decode_obj.decrypt(data[16:]).decode();
            resp = json.JSONDecoder().decode(data2);
            hostname = host._Hostname;
            if '.' in host._Hostname:
                hostname = host._Hostname.split('.')[0];
            if str(self.aes_slave_keys[hostname]) == str(resp['aes_pass']):
                re = '1';
            connection.send(bytes(re, 'utf-8'));
        connection.close();
        sock.close();
    
    def remote_sql(self, json_obj, connection):
        """
        Execute sql command from configurator
        """
        db = MasterSql();
        req = json_obj['data'].split(';');
        for item in req:
            if item != '':
                db.mysql_handler_personnal_query(item);
        connection.close();
        return;
示例#39
0
 def __init__(self, size):
     self._size = size
     self._data = data
     self._schedules = []
     for i in range(0, size):
         self._schedules.append(Schedule.Schedule().initialize())
示例#40
0
    def __init__(self, log_flag):
        self.logger = Logger(log_flag, LOG_FILE);
        self.logger.info('Started Domoleaf Master Daemon');
        self.d3config = {};
        self.aes_slave_keys = {};
        self.aes_master_key = None
        self.connected_clients = {};
        self.sql = MasterSql();
        self._parser = DaemonConfigParser(MASTER_CONF_FILE);
        self.db_username = self._parser.getValueFromSection(MASTER_CONF_MYSQL_SECTION, MASTER_CONF_MYSQL_USER_ENTRY);
        self.db_passwd = self._parser.getValueFromSection(MASTER_CONF_MYSQL_SECTION, MASTER_CONF_MYSQL_PASSWORD_ENTRY);
        self.db_dbname = self._parser.getValueFromSection(MASTER_CONF_MYSQL_SECTION, MASTER_CONF_MYSQL_DB_NAME_ENTRY);
        self.get_aes_slave_keys(0);
        self.reload_camera(None, None, 0);
        self._scanner = Scanner();
        self.hostlist = [];
        self.hostlist.append(Host('', '127.0.0.1', socket.gethostname().upper()));
        self.knx_manager = KNXManager(self.aes_slave_keys);
        self.enocean_manager = EnOceanManager(self.aes_slave_keys);
        self.reload_d3config(None, None, 0);
        self.trigger = Trigger(self);
        self.scenario = Scenario(self);
        self.schedule = Schedule(self);
        self.calcLogs = CalcLogs(self);

        self.functions = {
              1 : self.knx_manager.send_knx_write_short_to_slave,
              2 : self.knx_manager.send_knx_write_long_to_slave,
              3 : self.knx_manager.send_knx_write_speed_fan,
              4 : self.knx_manager.send_knx_write_temp,
              5 : IP_IRManager().send_to_gc,
              6 : self.knx_manager.send_on,
              7 : self.knx_manager.send_to_thermostat,
              8 : self.knx_manager.send_clim_mode,
              9 : HttpReq().http_action,
             10 : self.upnp_audio,
             11 : self.knx_manager.send_knx_write_percent,
             12 : self.knx_manager.send_off,
             13 : self.knx_manager.send_knx_write_short_to_slave_r,
        };
        self.data_function = {
            DATA_MONITOR_KNX                  : self.monitor_knx,
            DATA_MONITOR_IP                   : self.monitor_ip,
            DATA_MONITOR_ENOCEAN              : self.monitor_enocean,
            DATA_MONITOR_BLUETOOTH            : self.monitor_bluetooth,
            DATA_KNX_READ                     : self.knx_read,
            DATA_KNX_WRITE_S                  : self.knx_write_short,
            DATA_KNX_WRITE_L                  : self.knx_write_long,
            DATA_SEND_TO_DEVICE               : self.send_to_device,
            DATA_CRON_UPNP                    : self.cron_upnp,
            DATA_SEND_MAIL                    : self.send_mail,
            DATA_MODIF_DATETIME               : self.modif_datetime,
            DATA_CHECK_SLAVE                  : self.check_slave,
            DATA_RELOAD_CAMERA                : self.reload_camera,
            DATA_RELOAD_D3CONFIG              : self.reload_d3config,
            DATA_BACKUP_DB_CREATE_LOCAL       : self.backup_db_create_local,
            DATA_BACKUP_DB_REMOVE_LOCAL       : self.backup_db_remove_local,
            DATA_BACKUP_DB_LIST_LOCAL         : self.backup_db_list_local,
            DATA_BACKUP_DB_RESTORE_LOCAL      : self.backup_db_restore_local,
            DATA_CHECK_USB                    : self.check_usb,
            DATA_BACKUP_DB_CREATE_USB         : self.backup_db_create_usb,
            DATA_BACKUP_DB_REMOVE_USB         : self.backup_db_remove_usb,
            DATA_BACKUP_DB_LIST_USB           : self.backup_db_list_usb,
            DATA_BACKUP_DB_RESTORE_USB        : self.backup_db_restore_usb,
            DATA_SMARTCMD_LAUNCH              : self.smartcmd_launch,
            DATA_TRIGGERS_LIST_UPDATE         : self.triggers_list_update,
            DATA_SCHEDULES_LIST_UPDATE        : self.schedules_list_update,
            DATA_SCENARIOS_LIST_UPDATE        : self.scenarios_list_update,
            DATA_CHECK_ALL_SCHEDULES          : self.check_schedules,
            DATA_CALC_LOGS                    : self.launch_calc_logs,
            DATA_CHECK_UPDATES                : self.check_updates,
            DATA_UPDATE                       : self.update,
            DATA_SEND_ALIVE                   : self.send_request,
            DATA_SEND_TECH                    : self.send_tech,
            DATA_SEND_INTERFACES              : self.send_interfaces,
            DATA_SHUTDOWN_D3                  : self.shutdown_d3,
            DATA_REBOOT_D3                    : self.reboot_d3,
            DATA_WIFI_UPDATE                  : self.wifi_update,
            DATA_REMOTE_SQL                   : self.remote_sql
        };
示例#41
0
    def greedy_scheduler(self, cost_threshold):
        # Makes schedule with optimal choices for num_frozen and target_fps
        # Sets self.schedule, self.num_frozen_list, self.target_fps_list

        cost_benefits = self.get_cost_benefits()
        target_fps_options = range(1, self.stream_fps + 1)

        current_schedule = []
        for app in self.apps:
            num_frozen_options = app["accuracies"].keys()
            cheapest_target_fps = min(target_fps_options)
            cheapest_num_frozen = max(num_frozen_options)
            current_schedule.append(
                Schedule.ScheduleUnit(app, cheapest_target_fps,
                                      cheapest_num_frozen))

        ## Make moves in order of maximal cost/benefit
        ## which decrease the metric and fit the budget
        updated = True  # Stopping condition
        while (updated):
            updated = False
            # Get next best change to schedule
            # Upgrade is (target_fps, #frozen) with larger
            # cost and largest cost/benefit across all apps
            max_cost_benefit = 0
            best_new_unit = -1
            for unit in current_schedule:
                cur_target_fps = unit.target_fps
                cur_num_frozen = unit.num_frozen
                app_id = unit.app_id
                app = unit.app
                num_frozen_options = app["accuracies"].keys()
                cur_metric = self.get_metric(app, cur_num_frozen,
                                             cur_target_fps)

                for potential_target_fps in target_fps_options:
                    for potential_num_frozen in sorted(num_frozen_options):
                        # Skip if it is not a change
                        u_apps = [
                            u for u in current_schedule if u.app_id == app_id
                        ]
                        if (u_apps[0].num_frozen == potential_num_frozen and
                                u_apps[0].target_fps == potential_target_fps):
                            continue

                        cost_benefit_tup = \
                            cost_benefits[app_id][potential_num_frozen][potential_target_fps]
                        cost_benefit = cost_benefit_tup[1] / float(
                            cost_benefit_tup[0])
                        potential_metric = self.get_metric(
                            app, potential_num_frozen, potential_target_fps)
                        if potential_metric < cur_metric and cost_benefit > max_cost_benefit:

                            # Check that move its within budget
                            potential_unit = Schedule.ScheduleUnit(
                                app, potential_target_fps,
                                potential_num_frozen)
                            potential_schedule = []
                            for c_unit in current_schedule:
                                if c_unit.app_id == potential_unit.app_id:
                                    potential_schedule.append(potential_unit)
                                else:
                                    copy_unit = Schedule.ScheduleUnit(
                                        c_unit.app, c_unit.target_fps,
                                        c_unit.num_frozen)
                                    potential_schedule.append(copy_unit)
                            potential_sched_cost = scheduler_util.get_cost_schedule(
                                potential_schedule, self.model.layer_latencies,
                                self.model.final_layer)

                            if potential_sched_cost <= cost_threshold:
                                cost = potential_sched_cost
                                max_cost_benefit = cost_benefit
                                best_new_unit = potential_unit
                                best_new_schedule = potential_schedule
                                updated = True

            if updated:
                current_schedule = best_new_schedule

        average_metric = self.set_schedule_values(current_schedule)

        return average_metric
示例#42
0
    def hifi_scheduler(self, cost_threshold):
        cost_benefits = self.get_cost_benefits()

        target_fps_options = range(1, self.stream_fps + 1)

        agg_func = operator.add
        # for max-min
        # agg_func = min
        dp = {}

        cc = Counter()

        def relax2(curr, best_by_budget, curr_cost, curr_goodness, c_unit,
                   threshold):
            # curr/best_by_budget: [(benefit, min_cost), (benefit_lower, min_cost_lower)]
            vals = []
            for prev_goodness, prev_budget, info in reversed(best_by_budget):
                new_budget = prev_budget + curr_cost
                # Pruning
                if new_budget > threshold:
                    break
                new_goodness = agg_func(prev_goodness, curr_goodness)
                new_budget = int(new_budget * 50) / 50.
                new_goodness = int(new_goodness * 1000) / 1000.
                # new_budget = round(new_budget, 1)
                # new_goodness = round(new_goodness, 3)
                # print (new_goodness, new_budget)
                vals.append((new_goodness, new_budget, {
                    'unit': c_unit,
                    'prev': info
                }))
                # vals.append((new_goodness, new_budget, {'schedule': info['schedule'] + [c_unit]}))
            if len(curr) == 0:
                return vals
            elif len(vals) == 0:
                return curr
            # ret = scheduler_util.make_monotonic(curr + vals)
            ret = scheduler_util.merge_monotonic(curr, list(reversed(vals)))
            # cc[(len(curr), len(vals), len(ret))] += 1
            return ret

        for i, app in enumerate(self.apps):
            num_frozen_options = sorted(app["accuracies"].keys())
            combos = itertools.product(target_fps_options, num_frozen_options)

            for c_fps, c_frozen in combos:
                c_cost, c_benefit = cost_benefits[
                    app["app_id"]][c_frozen][c_fps]
                c_benefit = 1. - c_benefit
                c_unit = Schedule.ScheduleUnit(app, c_fps, c_frozen)
                if i == 0:
                    stem = scheduler_util.SharedStem([(c_frozen, c_fps)],
                                                     self.model)
                    assert stem not in dp
                    if stem.cost + c_cost < cost_threshold:
                        dp[stem] = [(c_benefit, c_cost, {
                            'unit': c_unit,
                            'prev': None
                        })]
                        # dp[stem] = [(c_benefit, c_cost, {'schedule': [c_unit]})]
                else:
                    for stem, best_by_budget in dp_prev.iteritems():
                        new_stem = stem.relax(c_frozen, c_fps)
                        assert new_stem.cost >= stem.cost
                        result = relax2(dp.get(new_stem, []), best_by_budget,
                                        c_cost, c_benefit, c_unit,
                                        cost_threshold - new_stem.cost)
                        if len(result) > 0:
                            dp[new_stem] = result

            print '{} apps'.format(i + 1)
            print 'Unique stems:', len(dp)
            lens_budgets_by_stem = map(len, dp.values())
            budgets_by_stem = Counter(lens_budgets_by_stem)
            print 'Total DP values', sum(lens_budgets_by_stem)
            budgets = [y[1] for x in dp.values() for y in x]
            goodnesses = [y[0] for x in dp.values() for y in x]
            cnt_budgets = Counter(budgets)
            cnt_goodness = Counter(goodnesses)

            def bucket_stats(vals):
                ret = [Counter(map(int, vals))]
                return ret + [
                    Counter(map(lambda x: int(x * k) / k, vals))
                    for k in [10., 100., 1000., 10000.]
                ]

            cnt_budgets_buckets = bucket_stats(budgets)
            cnt_goodness_buckets = bucket_stats(goodnesses)
            print 'Unique budgets:', len(cnt_budgets)
            print 'Budget buckets by int, .1, .01, .001:', map(
                len, cnt_budgets_buckets)
            print 'Unique goodness scores', len(cnt_goodness)
            print 'Goodness buckets by int, .1, .01, .001:', map(
                len, cnt_goodness_buckets)
            print 'Budgets per stem', budgets_by_stem
            # print 'Budgets:', ', '.join(map('{:.0f}'.format, sorted(cnt_budgets.keys(), reverse=True)))
            # print 'Budgets:', sorted(map(int, cnt_budgets.keys()), reverse=True)
            print 'Budgets by ints:', cnt_budgets_buckets[0]
            # print 'Some budgets:', map('{:g}'.format, sorted(cnt_budgets.keys()))
            # print 'Num of DP values by budget', sorted(cnt_budgets.values(), reverse=True)
            # print 'Num of DP values by goodness', sorted(cnt_goodness.values(), reverse=True)
            # print 'curr, vals:', cc
            cc.clear()
            print

            dp_prev = dp
            dp = {}

        options = []
        for stem, best_by_budget in dp_prev.iteritems():
            options += [(goodness, budget + stem.cost, info)
                        for goodness, budget, info in best_by_budget
                        if budget + stem.cost <= cost_threshold]
        results = scheduler_util.make_monotonic(options)

        def extract_schedule(info_dct):
            schedule = [info_dct['unit']]
            while info_dct['prev'] is not None:
                info_dct = info_dct['prev']
                schedule.insert(0, info_dct['unit'])
            return schedule

        best_result = results[0]
        print 'Best:', best_result[:2]
        # best_schedule = best_result[2]['schedule']
        best_schedule = extract_schedule(best_result[2])
        print 'Schedule cost:', scheduler_util.get_cost_schedule(
            best_schedule, self.model.layer_latencies, self.model.final_layer)
        avg_metric = self.set_schedule_values(best_schedule)
        return avg_metric
示例#43
0
 def __init__(self, personnel_ID):
     super.__init__(personnel_ID)
     self.schedule = Schedule(personnel_ID)
示例#44
0
    def testObjectiveFunction(self):
        # Testing the presentation istance
        jsspInst = JSSPInstance("../jssp_instances/transparencia.txt")

        sched = Schedule(jsspInst)
        sched.addJob(0)
        sched.addJob(1)
        sched.addJob(2)
        self.assert_(sched.makespan == 30, "Makespan: %i" % sched.makespan)

        sched = Schedule(jsspInst)
        sched.addJob(0)
        sched.addJob(2)
        sched.addJob(1)
        self.assert_(sched.makespan == 34, "Makespan: %i" % sched.makespan)

        sched = Schedule(jsspInst)
        sched.addJob(1)
        sched.addJob(0)
        sched.addJob(2)
        self.assert_(sched.makespan == 29, "Makespan: %i" % sched.makespan)

        sched = Schedule(jsspInst)
        sched.addJob(2)
        sched.addJob(1)
        sched.addJob(0)
        self.assert_(sched.makespan == 26, "Makespan: %i" % sched.makespan)

        # Testing the Abz5 istance. Makespan value sent from e-mail list
        jsspInst = JSSPInstance("../jssp_instances/abz5.txt")

        sched = Schedule(jsspInst)
        sched.addJob(7)
        sched.addJob(0)
        sched.addJob(2)
        sched.addJob(9)
        sched.addJob(6)
        sched.addJob(8)
        sched.addJob(4)
        sched.addJob(5)
        sched.addJob(1)
        sched.addJob(3)
        self.assert_(sched.makespan == 1544, "Makespan: %i" % sched.makespan)

        # Prof. Valdisio said that this is the best known makespan: 1544. But we found 1731.
        sched = Schedule(jsspInst)
        sched.addJob(5)
        sched.addJob(9)
        sched.addJob(3)
        sched.addJob(4)
        sched.addJob(6)
        sched.addJob(2)
        sched.addJob(7)
        sched.addJob(0)
        sched.addJob(1)
        sched.addJob(8)
        self.assert_(sched.makespan == 1731, "Makespan: %i" % sched.makespan)

        # Testing the Car5 istance. Makespan value sent from e-mail list
        jsspInst = JSSPInstance("../jssp_instances/Car5.txt")

        sched = Schedule(jsspInst)
        sched.addJob(4)
        sched.addJob(3)
        sched.addJob(2)
        sched.addJob(0)
        sched.addJob(5)
        sched.addJob(1)
        sched.addJob(8)
        sched.addJob(6)
        sched.addJob(9)
        sched.addJob(7)
        self.assert_(sched.makespan == 7822, "Makespan: %i" % sched.makespan)
        return
示例#45
0
 def __init__(self, username):
     super.__init__(username)
     self.schedule = Schedule(username)
示例#46
0
# Create list of processes 
Procs = text.split('\n')
N = int(Procs[0])
Procs = Procs[1:len(Procs)]

P = []
for i in range(0, N):
    P.append(parseProcess(Procs[i]))

# Get last time
lastTime = []
for i in range(0, N):
    lastTime.append(max(P[i].sched[-1]))
ENDTIME = max(lastTime)

mainSched = Schedule(P)
mem = Memory(alg)


# MAIN SIMULATION LOOP ********************
print( '-' * 80)
print('Starting Simulation...\n')
print('Simulation Parameters:')
print('Algorithm'.ljust(20) + 'Number of Procs'.ljust(20) + 'Quiet Mode'.ljust(20))
print(option.capitalize().ljust(20) + str(N).ljust(20) + str(qFlag).ljust(20))

currentTime = 0
promptTime = 0
procCount = 0
while True:
    if not qFlag:
示例#47
0
文件: app.py 项目: jplamb/FantasyCFB
def con_print_schedule():
	no_team = Schedule(' ',' ')
	no_team.print_schedule()
示例#48
0
def main():
    app = QtGui.QApplication(sys.argv)
    ex =  Schedule.import_excel()
    ex.saveFile1.test()
    app.exec_()