def s(self): """ Формирование матрицы коэффициентов системы :return: """ result = np.zeros( [2 * self.M1 * self.K + self.M + self.M1 + 1, self.M1 * (2 * self.K - 1) + self.M + 2 * self.M1 + 1], complex) w = self.wf(0) for n in xrange(0, self.M + 1): for m in xrange(0, self.M + 1): result[n, m] = delta(n, m) result[n, m + self.M1] = - w[n, m] result[n, m + 2 * self.M1] = - w[n, m] result[n + self.M1, m] = - w.transpose()[n, m] result[n + self.M1, m + self.M1] = - delta(n, m) result[n + self.M1, m + 2 * self.M1] = delta(n, m) if self.K > 1: for j in xrange(1, self.K): w = self.wf(j) p1 = self.psi1(j) p2 = self.psi2(j) for n in xrange(0, self.M + 1): for m in xrange(0, self.M + 1): result[2 * self.M1 * j + n, 2 * self.M1 * j + m - self.M1] = p1[n, m] result[2 * self.M1 * j + n, 2 * self.M1 * j + m + 0] = p2[n, m] result[2 * self.M1 * j + n, 2 * self.M1 * j + m + 1 * self.M1] = - w[n, m] result[2 * self.M1 * j + n, 2 * self.M1 * j + m + 2 * self.M1] = - w[n, m] result[2 * self.M1 * j + n + self.M1, 2 * self.M1 * j + m - self.M1] = \ np.dot(w.transpose(), p1)[n, m] result[2 * self.M1 * j + n + self.M1, 2 * self.M1 * j + m + 0] = np.dot(- w.transpose(), p2)[ n, m] result[2 * self.M1 * j + n + self.M1, 2 * self.M1 * j + m + 1 * self.M1] = - delta(n, m) result[2 * self.M1 * j + n + self.M1, 2 * self.M1 * j + m + 2 * self.M1] = delta(n, m) w = self.wf(self.K) p1 = self.psi1(self.K) p2 = self.psi2(self.K) for n in xrange(0, self.M + 1): for m in xrange(0, self.M + 1): result[2 * self.M1 * self.K + n, 1 * self.M1 * (2 * self.K - 1) + m - 0] = p1[n, m] result[2 * self.M1 * self.K + n, 1 * self.M1 * (2 * self.K - 1) + m + 1 * self.M1] = p2[n, m] result[2 * self.M1 * self.K + n, 1 * self.M1 * (2 * self.K - 1) + m + 2 * self.M1] = - w[n, m] result[2 * self.M1 * self.K + n + self.M1, 1 * self.M1 * (2 * self.K - 1) + m - 0] = \ np.dot(w.transpose(), p1)[n, m] result[2 * self.M1 * self.K + n + self.M1, 1 * self.M1 * (2 * self.K - 1) + m + 1 * self.M1] = \ np.dot(- w.transpose(), p2)[n, m] result[2 * self.M1 * self.K + n + self.M1, 1 * self.M1 * (2 * self.K - 1) + m + 2 * self.M1] = - delta( m, n) return result
def getMaxReach(avePos): distance = 0 for strut in STRUTS.values(): for end in strut: dist = ut.delta(end, avePos) if dist > distance: distance = dist return distance
def getDistances(): distances = [] for span in SPANS: distances.append(ut.delta(span[0][1], span[1][1])) global DISTANCES DISTANCES.clear() DISTANCES = distances
def calcula_topographic_error(mapa): erro = 0 for objeto in mapa.objetos: cluster1 = objeto.cluster cluster2 = objeto.segundo distancia = util.delta(cluster1.point, cluster2.point) if distancia > 2: erro += 1 tam = len(mapa.objetos) erro_topografico = erro/tam return erro_topografico
def energy_E2(V): P_set, Q_set = util.PQ_N4(I, P) V_P = V[P_set[0], P_set[1]] V_Q = V[Q_set[0], Q_set[1]] S_P = S[P_set[0], P_set[1]] S_Q = S[Q_set[0], Q_set[1]] H_P = H[P_set[0], P_set[1]] H_Q = H[Q_set[0], Q_set[1]] delta = util.delta(V_P, V_Q) s_max = np.max((S_P, S_Q), axis=0) d = np.deg2rad(util.deg_distance(H_P, H_Q)) e2 = np.multiply(np.multiply(delta, s_max), np.reciprocal(d)) #print(delta, s_max, np.multiply( delta, s_max ), np.reciprocal(d.astype(float)), e2) e2 = np.sum(e2) return e2
def energy_E2(self, X, V, P): # Opencv store H as [0, 180) --> [0, 360) H = X[:, :, 0].astype(np.int32)* 2 # Opencv store S as [0, 255] --> [0, 1] S = X[:, :, 1].astype(np.float32) / 255.0 P_set, Q_set = PQ_N4(X, P) V_P = V[ P_set[0], P_set[1] ] V_Q = V[ Q_set[0], Q_set[1] ] S_P = S[ P_set[0], P_set[1] ] S_Q = S[ Q_set[0], Q_set[1] ] H_P = H[ P_set[0], P_set[1] ] H_Q = H[ Q_set[0], Q_set[1] ] delta = util.delta( V_p, V_q ) s_max = np.max((S_P, S_Q), axis=0) d = util.deg_distance(H_P, H_Q) e2 = np.multiply( np.multiply( delta, s_max ), np.reciprocal(d) ) e2 = np.sum(e2) return e2
def __repr__(self): st = self.started and self.start_time end = self.completed and self.end_time s = 'Job {} [{} -> {} -> {} : run {} limit {} proc {}]' return s.format(self.ID, delta(self.submit), delta(st), delta(end), delta(self.run_time), delta(self.time_limit), self.proc)
def __repr__(self): s = 'Camp {} {} [created {} work {} left {} : jobs {} {}]' return s.format(self.ID, self.user.ID, delta(self.created), delta(self.workload), delta(self.time_left), len(self.active_jobs), len(self.completed_jobs))
def run(self): """ Proceed with the simulation. Return a list of encountered events. """ # Magic value taken from slurm/multifactor plugin. self._decay_factor = 1 - (0.693 / self._settings.decay) # Note: # The CPU usage decay is always applied after each event. # There is also a dummy `force_decay` event inserted into # the queue to force the calculations in case the gap # between consecutive events would be too long. # TODO ZAMIENIC APPLY DECAY NA BACKGROUND THREAD TAK SAMO JAK BACKFILLING?!?!?! # TODO WTEDY BEDZIE TRZEBA TRZYMAC "TEMPORARY" USAGE GDZIES ODDZIELNIE I GO DODAWAC # TODO DO GLOWNEGO DOPIERO PRZY URUCHOMIENIU TEGO "THREADA" self._force_period = 60 * 5 self._initialize() sub_iter = sub_count = 0 sub_total = len(self._block) end_iter = 0 schedule = backfill = False instant_bf = self._settings.bf_depth and not self._settings.bf_interval # the first job submission is the simulation 'time zero' prev_event = self._block[0].submit self._diag.prev_util["time"] = prev_event visual_update = 60 # notify the user about the progress # TODO DODAC POZA CZASEM TEZ PROCENTOWO CO 25% next_visual = time.time() + visual_update while sub_iter < sub_total or not self._pq.empty(): # We only need to keep two `new_job` events in the # queue at the same time (one to process, one to peek). while sub_iter < sub_total and sub_count < 2: self._pq.add(self._block[sub_iter].submit, Events.new_job, self._block[sub_iter]) sub_iter += 1 sub_count += 1 # the queue cannot be empty here self._now, event, entity = self._pq.pop() if event != Events.force_decay: logging.debug("Time %s, event %s", delta(self._now), event) # Process the time skipped between events # before changing the state of the system. diff = self._now - prev_event if diff: self._virt_first_stage(diff) self._real_first_stage(diff) # The default flow is to redistribute the virtual # time and compute new campaign ends (and maybe do # a scheduling / backfilling pass in the between). virt_second = True campaigns = True if event == Events.new_job: # check if the job is runnable if self._manager.runnable(entity): self._new_job_event(entity) schedule = True else: self._diag.skipped += 1 end_iter += 1 sub_count -= 1 elif event == Events.job_end: self._job_end_event(entity) end_iter += 1 schedule = True elif event == Events.estimate_end: self._estimate_end_event(entity) elif event == Events.bf_run: backfill = True elif event == Events.campaign_end: # We need to redistribute the virtual time now, # so the campaign can actually end. self._virt_second_stage() virt_second = False # already done campaigns = self._camp_end_event(entity) elif event == Events.force_decay: self._diag.forced += 1 virt_second = False # no need to do it now campaigns = False # no change to campaign ends else: raise Exception("unknown event") # update event timer prev_event = self._now if not self._pq.empty(): # We need to process all the events that happen at # the same time *AND* change the campaign workloads # before we can continue further. next_time, next_event, _ = self._pq.peek() if next_time == self._now and next_event < Events.bf_run: continue if virt_second: self._virt_second_stage() if schedule: scheduled_jobs = self._schedule(bf_mode=False) self._diag.sched_jobs += scheduled_jobs self._diag.sched_pass += 1 self._update_util() # must be after schedule schedule = False if instant_bf: backfill = True if backfill: backfilled_jobs = self._schedule(bf_mode=True) self._diag.bf_jobs += backfilled_jobs self._diag.bf_pass += 1 self._update_util() # must be after schedule backfill = False if campaigns: self._update_camp_estimates() # add periodically occurring events if event < Events.bf_run: self._next_backfill(self._now) elif event == Events.bf_run and backfilled_jobs: self._next_backfill(self._now + 1) if end_iter < sub_total: # There are still jobs in the simulation # so we need an accurate usage. assert not self._pq.empty(), "infinite loop" self._next_force_decay() # progress report if time.time() > next_visual: next_visual += visual_update comp = float(sub_iter + end_iter) / (2 * sub_total) msg = "Block {:2} scheduler {}: {} completed {:.2f}%" logging.info( msg.format(self._block.number, self._parts.scheduler, time.strftime("%H:%M:%S"), comp * 100) ) if self._waiting_jobs: top_prio = self._waiting_jobs[-1].proc else: top_prio = -1 logging.info( "events {} {} | cpus {} {} | waiting jobs {} {}" " | stats {:.2f} {:.2f} {:.2f}".format( sub_iter, end_iter, self._stats.cpu_used, self._cpu_free, len(self._waiting_jobs), top_prio, (self._diag.avg_util["sum"] / self._diag.avg_util["period"]), self._diag.sched_jobs / float(sub_iter), self._diag.bf_jobs / float(sub_iter), ) ) self._finalize() # Results for each user should be in this order: # 1) job ends (this is done during simulation) # 2) camp ends # 3) user stats for u in self._users.itervalues(): for i, c in enumerate(u.completed_camps): self._store_camp_ended(c) self._store_user_stats(u) # merge the results self._results.append(self._compressor.flush()) self._results = "".join(self._results) return self._results, self._diag
def __repr__(self): s = '[{}, {}] last {} first {}\n\tavail {}\n\trsrvd {}' return s.format(delta(self.begin), delta(self.end), self.job_ends, self.rsrv_starts, self.avail, self.reserved)
def run(self): """ Proceed with the simulation. Return a list of encountered events. """ # Magic value taken from slurm/multifactor plugin. self._decay_factor = 1 - (0.693 / self._settings.decay) # Note: # The CPU usage decay is always applied after each event. # There is also a dummy `force_decay` event inserted into # the queue to force the calculations in case the gap # between consecutive events would be too long. self._force_period = 60 * 5 self._initialize() sub_iter = sub_count = 0 sub_total = len(self._block) end_iter = 0 schedule = backfill = False instant_bf = (self._settings.bf_depth and not self._settings.bf_interval) # the first job submission is the simulation 'time zero' prev_event = self._block[0].submit self._diag.prev_util['time'] = prev_event # time to notify the user about the simulation progress next_visual_update = time.time() + self._settings.update_time while sub_iter < sub_total or not self._pq.empty(): # We only need to keep two `new_job` events in the # queue at the same time (one to process, one to peek). while sub_iter < sub_total and sub_count < 2: self._pq.add( self._block[sub_iter].submit, Events.new_job, self._block[sub_iter] ) sub_iter += 1 sub_count += 1 # the queue cannot be empty here self._now, event, entity = self._pq.pop() if event != Events.force_decay: logging.debug('Time %s, event %s', delta(self._now), event) # Process the time skipped between events # before changing the state of the system. diff = self._now - prev_event if diff: self._virt_first_stage(diff) self._real_first_stage(diff) # The default flow is to redistribute the virtual # time and compute new campaign ends (and maybe do # a scheduling / backfilling pass in the between). virt_second = True campaigns = True if event == Events.new_job: # check if the job is runnable if self._manager.runnable(entity): self._new_job_event(entity) schedule = True else: self._diag.skipped += 1 end_iter += 1 sub_count -= 1 elif event == Events.job_end: self._job_end_event(entity) end_iter += 1 schedule = True elif event == Events.estimate_end: self._estimate_end_event(entity) elif event == Events.bf_run: backfill = True elif event == Events.campaign_end: # We need to redistribute the virtual time now, # so the campaign can actually end. self._virt_second_stage() virt_second = False # already done campaigns = self._camp_end_event(entity) elif event == Events.force_decay: self._diag.forced += 1 virt_second = False # no need to do it now campaigns = False # no change to campaign ends else: raise Exception('unknown event') # update event timer prev_event = self._now if not self._pq.empty(): # We need to process all the events that happen at # the same time *AND* change the campaign workloads # before we can continue further. next_time, next_event, _ = self._pq.peek() if (next_time == self._now and next_event < Events.bf_run): continue if virt_second: self._virt_second_stage() if schedule: scheduled_jobs = self._schedule(bf_mode=False) self._diag.sched_jobs += scheduled_jobs self._diag.sched_pass += 1 self._update_util() # must be after schedule schedule = False if instant_bf: backfill = True if backfill: backfilled_jobs = self._schedule(bf_mode=True) self._diag.bf_jobs += backfilled_jobs self._diag.bf_pass += 1 self._update_util() # must be after schedule backfill = False if campaigns: self._update_camp_estimates() # add periodically occurring events if event < Events.bf_run: self._next_backfill(self._now) elif event == Events.bf_run and backfilled_jobs: self._next_backfill(self._now + 1) if end_iter < sub_total: # There are still jobs in the simulation # so we need an accurate usage. assert not self._pq.empty(), 'infinite loop' self._next_force_decay() # progress report if time.time() > next_visual_update: completed = float(sub_iter + end_iter) / (2 * sub_total) self._log_progress(sub_iter, completed) next_visual_update += self._settings.update_time self._finalize() # Results for each user should be in this order: # 1) job ends (this is done during simulation) # 2) camp ends # 3) user stats for u in self._users.itervalues(): for i, c in enumerate(u.completed_camps): self._store_camp_ended(c) self._store_user_stats(u) # merge the results self._results.append(self._compressor.flush()) self._results = ''.join(self._results) return self._results, self._diag
def psi2(self, num): """ :param num: :return: """ result = np.zeros([self.M + 1, self.M + 1], complex) for n in xrange(0, self.M + 1): for m in xrange(0, self.M + 1): result[n, m] = (sp.e ** ((0 + 1j) * self.beta(num, n) * self.deltaL)) * delta(n, m) return result