def dispatch_data(self, doc_name, result): """Dispatch data to collectd.""" if doc_name == "topicStats": for item in ['messagesIn', 'bytesOut', 'bytesIn', 'totalFetchRequests', 'totalProduceRequests', 'produceMessageConversions',\ 'failedProduceRequests', 'fetchMessageConversions', 'failedFetchRequests', 'bytesRejected']: try: del result[item] except KeyError: pass #collectd.error("Key %s deletion error in topicStats doctype for topic %s: %s" % (item, result['_topicName'], str(err))) collectd.info("Plugin kafkatopic: Succesfully sent topicStats: %s" % result['_topicName']) elif doc_name == "kafkaStats": for item in ["messagesInPerSec", "bytesInPerSec", "bytesOutPerSec", "isrExpandsPerSec", "isrShrinksPerSec", "leaderElectionPerSec",\ "uncleanLeaderElectionPerSec", "producerRequestsPerSec", "fetchConsumerRequestsPerSec", "fetchFollowerRequestsPerSec"]: try: del result[item] except KeyError: pass #collectd.error("Key %s deletion error in kafkaStats doctype: %s" % (item, str(err))) collectd.info("Plugin kafkatopic: Succesfully sent %s doctype to collectd." % doc_name) collectd.debug("Plugin kafkatopic: Values dispatched =%s" % json.dumps(result)) elif doc_name == "consumerStats": collectd.info("Plugin kafkatopic: Succesfully sent consumerStats of consumer group %s of topic %s" % (result['_groupName'], result['_topicName'])) else: collectd.info("Plugin kafkatopic: Succesfully sent topic %s of partitionStats: %s." % (result['_topicName'], result['_partitionNum'])) utils.dispatch(result)
def dispatch_data(self, top_stats_res): """Dispatches dictionary to collectd.""" collectd.info("Plugin topstats: Successfully sent to collectd.") collectd.debug("Plugin topstats: Values dispatched = " + json.dumps(top_stats_res)) for result in top_stats_res: utils.dispatch(result)
def config_queue(request): """管理队列权重,提升队列权重或降低队列权重 传入参数:http://server/taskqueues/q01/config?action=queue_down """ queue_id = request.matchdict['id'] url_action = request.params.get('action','') dispatcher_config = ztq_core.get_dispatcher_config() queue_weight = dispatcher_config['queue_weight'] # 根据操作类型进行权重调整, if url_action == 'queue_down' : queue_weight[queue_id] -= 1 # 队列权重最少为1 if queue_weight[queue_id] < 0: queue_weight[queue_id] = 0 # 更新调度策略并进行调度 ztq_core.set_dispatcher_config(dispatcher_config) utils.dispatch() elif url_action == 'queue_up' : queue_weight[queue_id] += 1 if queue_weight[queue_id] > 10: queue_weight[queue_id] = 10 # 更新调度策略并进行调度 ztq_core.set_dispatcher_config(dispatcher_config) utils.dispatch() elif url_action == 'from_right' : utils.dispatch_single_queue(queue_id, from_right=True) elif url_action == 'from_left' : utils.dispatch_single_queue(queue_id, from_right=False) return HTTPFound(location = '/taskqueues')
def config_queue(request): """管理队列权重,提升队列权重或降低队列权重 传入参数:http://server/taskqueues/q01/config?action=queue_down """ queue_id = request.matchdict['id'] url_action = request.params.get('action', '') dispatcher_config = ztq_core.get_dispatcher_config() queue_weight = dispatcher_config['queue_weight'] # 根据操作类型进行权重调整, if url_action == 'queue_down': queue_weight[queue_id] -= 1 # 队列权重最少为1 if queue_weight[queue_id] < 0: queue_weight[queue_id] = 0 # 更新调度策略并进行调度 ztq_core.set_dispatcher_config(dispatcher_config) utils.dispatch() elif url_action == 'queue_up': queue_weight[queue_id] += 1 if queue_weight[queue_id] > 10: queue_weight[queue_id] = 10 # 更新调度策略并进行调度 ztq_core.set_dispatcher_config(dispatcher_config) utils.dispatch() elif url_action == 'from_right': utils.dispatch_single_queue(queue_id, from_right=True) elif url_action == 'from_left': utils.dispatch_single_queue(queue_id, from_right=False) return HTTPFound(location='/taskqueues')
def dispatch_data(self, doc_name, result): """Dispatch data to collectd.""" collectd.info( "Plugin kafkajmx: Succesfully sent %s doctype to collectd." % doc_name) collectd.debug("Plugin kafkajmx: Values dispatched =%s" % json.dumps(result)) utils.dispatch(result)
def dispatch_data(self, doc_name, result): """Dispatch data to collectd.""" collectd.info( "Plugin tomcat: Succesfully sent %s doctype to collectd." % doc_name) collectd.debug("Plugin tomcat: Values dispatched for %s = %s" % (doc_name, json.dumps(result))) utils.dispatch(result)
def dispatch_data(self, dict_disks_copy): """Dispatches dictionary to collectd.""" for disk_name, disk_info in dict_disks_copy.items(): # delete readbyte, writebyte, readcount and writecount field del dict_disks_copy[disk_name][READBYTE], dict_disks_copy[ disk_name][WRITEBYTE], dict_disks_copy[disk_name][ READCOUNT], dict_disks_copy[disk_name][WRITECOUNT] collectd.info("Plugin disk_stat: Successfully sent to collectd.") collectd.debug("Plugin disk_stat: Values: " + json.dumps(disk_info)) utils.dispatch(disk_info)
def dispatch_data(self, doc_name, result): """Dispatch data to collectd.""" if doc_name == "zookeeperStats": for item in ["packetsSent", "packetsReceived"]: del result[item] collectd.info( "Plugin zookeeperjmx: Succesfully sent %s doctype to collectd." % doc_name) collectd.debug("Plugin zookeeperjmx: Values dispatched =%s" % json.dumps(result)) utils.dispatch(result)
def dispatch_data(self, result, doc): """Dispatch data to collectd.""" if doc == "haproxyStats": collectd.info("Plugin haproxy: Succesfully sent %s doctype to collectd." % doc) collectd.debug("Plugin haproxy: Values dispatched =%s" % json.dumps(result)) utils.dispatch(result) elif doc == "frontendStats" or doc == "backendStats": for pxname in result.keys(): collectd.info("Plugin haproxy: Succesfully sent %s of %s to collectd." % (doc, pxname)) collectd.debug("Plugin haproxy: Values dispatched =%s" % json.dumps(result[pxname])) utils.dispatch(result[pxname])
def tool(request, appname): # if we are here, we have been given a particular appname, e.g. "keyinfo", as part of the url context = {'applayout': appLayout[appname]} if request.method == 'POST': form = forms.Form(request.POST) elif request.method == 'GET': form = forms.Form(request.GET) else: form = forms.Form() if form.is_valid(): loginfo(appname, context, request) context = dispatch(context, request, appname) #context['form'] = form context = setconstants(request, context, appname) loginfo(appname, context, request) # special case: the data endpoint returns JSON if appname == 'data': return HttpResponse(json.dumps(context['data'])) else: return render(request, 'toolbox.html', context)
def start(self): rospy.loginfo( """\n\nCONTROLLER KEYMAP (HELP): wasd : control XY 12 : control Z ijkl : control roll & pitch 34 : control yaw q : quit\n""") while True: pose = self.pose p = pose.position # https://github.com/ros/geometry/issues/109 o = euler_from_quaternion((pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w)) rospy.loginfo( "Current position (mm):\n{}".format(p)) rospy.loginfo( "Current orientation (deg): {}".format(np.rad2deg(o))) gchr = _Getch() user_input = gchr() if user_input == 'q': break p, o = dispatch(user_input, p, o) pose = Pose(position=Point(*p), orientation=Quaternion(*quaternion_from_euler(*o))) self.pose = pose
def _energy(self, z): logp = z.new_zeros(z.shape[0]) for no in range(len(self.priorList)): _, z_ = dispatch(self.factorOutIList[no], self.factorOutJList[no], z) logp = logp + self.priorList[no]._energy(z_) return logp
def config_worker(request): """对worker进行配置管理 """ url_action = request.params.get('action','') dispatcher_config = ztq_core.get_dispatcher_config() worker_weight = dispatcher_config['worker_weight'] # 获取用户请求操作 worker_id = request.matchdict['id'] # 根据操作类型进行权重调整, if url_action == 'stop_worker': #停止worker worker_weight[worker_id] = 0 elif url_action == 'enable': #启用worker worker_weight[worker_id] = 5 elif url_action == 'worker_down' : #降低worker权重 worker_weight[worker_id] -= 1 if worker_weight[worker_id] < 1: worker_weight[worker_id] = 1 elif url_action == 'worker_up' : #提升worker权重 worker_weight[worker_id] += 1 if worker_weight[worker_id] >10: worker_weight[worker_id] = 10 elif url_action == 'delete': #删除还没启用的worker,删除操作不会导致调度配置更新 if worker_id in worker_weight: # 没有启用的情况 worker_weight.pop(worker_id) workers_dict = ztq_core.get_worker_state() del workers_dict[worker_id] worker_job = ztq_core.get_job_state(worker_id) for job_name, job_status in worker_job.items(): del worker_job[job_name] ztq_core.set_dispatcher_config(dispatcher_config) return HTTPFound(location = '/workerstatus') elif url_action == 'update': # 发报告指令到各命令队列让worker报告自身状态 worker_list = ztq_core.get_all_worker() for worker_name in worker_list: if worker_name == worker_id: utils.send_command(worker_name, 'report') time.sleep(1) return HTTPFound(location = '/workerstatus') # 更新调度策略并进行调度 ztq_core.set_dispatcher_config(dispatcher_config) utils.dispatch() return HTTPFound(location = '/workerstatus')
def config_worker(request): """对worker进行配置管理 """ url_action = request.params.get('action', '') dispatcher_config = ztq_core.get_dispatcher_config() worker_weight = dispatcher_config['worker_weight'] # 获取用户请求操作 worker_id = request.matchdict['id'] # 根据操作类型进行权重调整, if url_action == 'stop_worker': #停止worker worker_weight[worker_id] = 0 elif url_action == 'enable': #启用worker worker_weight[worker_id] = 5 elif url_action == 'worker_down': #降低worker权重 worker_weight[worker_id] -= 1 if worker_weight[worker_id] < 1: worker_weight[worker_id] = 1 elif url_action == 'worker_up': #提升worker权重 worker_weight[worker_id] += 1 if worker_weight[worker_id] > 10: worker_weight[worker_id] = 10 elif url_action == 'delete': #删除还没启用的worker,删除操作不会导致调度配置更新 if worker_id in worker_weight: # 没有启用的情况 worker_weight.pop(worker_id) workers_dict = ztq_core.get_worker_state() del workers_dict[worker_id] worker_job = ztq_core.get_job_state(worker_id) for job_name, job_status in worker_job.items(): del worker_job[job_name] ztq_core.set_dispatcher_config(dispatcher_config) return HTTPFound(location='/workerstatus') elif url_action == 'update': # 发报告指令到各命令队列让worker报告自身状态 worker_list = ztq_core.get_all_worker() for worker_name in worker_list: if worker_name == worker_id: utils.send_command(worker_name, 'report') time.sleep(1) return HTTPFound(location='/workerstatus') # 更新调度策略并进行调度 ztq_core.set_dispatcher_config(dispatcher_config) utils.dispatch() return HTTPFound(location='/workerstatus')
def inverse(self, z): batch_size = z.shape[0] inv_ldj = z.new_zeros(batch_size) for layer, indexI, indexJ in reversed( list(zip(self.layers, self.indexI, self.indexJ))): z, z_ = utils.dispatch(indexI, indexJ, z) z_ = utils.stackRGblock(z_) z_, log_prob = layer.inverse(z_) inv_ldj = inv_ldj + log_prob.view(batch_size, -1).sum(dim=1) z_ = utils.unstackRGblock(z_, batch_size) z = utils.collect(indexI, indexJ, z, z_) return z, inv_ldj
def forward(self, x): # dim(x) = (B, C, H, W) batch_size = x.shape[0] ldj = x.new_zeros(batch_size) for layer, indexI, indexJ in zip(self.layers, self.indexI, self.indexJ): x, x_ = utils.dispatch(indexI, indexJ, x) # dim(x_) = (B, C, num_RG_blocks, K*K) x_ = utils.stackRGblock(x_) # dim(x_) = (B*num_RG_blocks, C, K, K) x_, log_prob = layer.forward(x_) ldj = ldj + log_prob.view(batch_size, -1).sum(dim=1) x_ = utils.unstackRGblock(x_, batch_size) x = utils.collect(indexI, indexJ, x, x_) return x, ldj
def logProbability(self, z, K=None, meanList=None, scaleList=None): if meanList is None or scaleList is None: raise Exception("no mean or scale passed") logp = z.new_zeros(z.shape[0]) for no in range(len(self.factorOutIList)): _, z_ = dispatch(self.factorOutIList[no], self.factorOutJList[no], z) if no == len(self.factorOutIList) - 1: _logp = self.lastPrior._energy(z_) else: mean = meanList[no] scale = scaleList[no] assert mean.shape == scale.shape assert mean.shape == z_.shape _logp = -utils.logDiscreteLogistic(z_, mean, scale, self.decimal).reshape( z_.shape[0], -1).sum(-1) logp = logp + _logp return -logp
def dispatch_data(self, dict_nics): """Dispatches dictionary to collectd.""" for if_name, if_info in dict_nics.items(): collectd.info("Plugin nic_stats: Successfully sent to collectd.") collectd.debug("Plugin nic_stats: Values: " + json.dumps(if_info)) utils.dispatch(if_info)
from django.conf.urls.defaults import * from django.contrib import admin from utils import dispatch from starter import views from django.views.generic.simple import direct_to_template from django.conf import settings admin.autodiscover() urlpatterns = patterns('', (r'^admin/', include(admin.site.urls)), ) if settings.DEBUG: urlpatterns += patterns('', (r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}), ) urlpatterns += patterns('starter', (r'^login/?$', dispatch(post=views.plogin, get=views.glogin)), (r'^logout/?$', 'views.vlogout'), (r'^account/?$', dispatch(post=views.paccount, get=views.gaccount)), (r'^(.*)', 'views.home'), )
def dispatch_data(self, dict_tcp): """Dispatches dictionary to collectd.""" collectd.info("Plugin tcp_stats: Successfully sent to collectd.") collectd.debug("Plugin tcp_stats: Values :" + json.dumps(dict_tcp)) utils.dispatch(dict_tcp)
def dispatch_data(self, dict_cpu_util): """Dispatches dictionary to collectd.""" collectd.info("Plugin cpu_util: Succesfully sent to collectd.") collectd.debug("Plugin cpu_util: Values dispatched = " + json.dumps(dict_cpu_util)) utils.dispatch(dict_cpu_util)
scores, num_param_servers, vocab_size, learning_rate, embedding_size, lstm_size ) tf.contrib.learn.train( graph, output_dir, lstm.train_op, lstm.loss, global_step_tensor=lstm.global_step, supervisor_is_chief=is_chief, supervisor_master=target ) if __name__ == "__main__": parser = utils.base_parser() parser.add_argument( '--learning-rate', type=float, default=0.01 ) utils.dispatch( train, **parser.parse_args().__dict__ )
def dispatch_data(self, linux_stats): """Dispatches dictionary to collectd.""" collectd.info("Plugin linux_stats: Successfully sent to collectd.") collectd.info("Plugin linux_stats: Values dispatched = " + json.dumps(linux_stats)) utils.dispatch(linux_stats)
def dispatch_data(self, result): """Dispatch data to collectd.""" collectd.info("Plugin jmeter: Values dispatched =%s" % json.dumps(result)) utils.dispatch(result)
import mimetypes import utils if __name__ == "__main__": print('Start processing....') # Initialize mime types mimetypes.init() mimetypes.add_type('application/x-spss', '.sav') # mimetype for spss files mimetypes.add_type('application/x-stata-dta', '.dta') # mimetype for stata files for entry in utils.walkdir('./test'): mimetype = mimetypes.guess_type(entry.path)[0] utils.dispatch(entry.path, mimetype) print('Exiting...')
def test_hierarchyPrior(): class UniTestPrior(source.Source): def __init__(self, nvars, element, name="UniTestPrior"): super(UniTestPrior, self).__init__(nvars, 1.0, name) self.element = torch.nn.Parameter(torch.tensor(element), requires_grad=False) def sample(self, batchSize): return torch.ones([batchSize] + self.nvars).to(self.element).float() * self.element def _energy(self, z): return (torch.tensor([2])**self.element * np.prod(z.shape[2:])) length = 32 channel = 3 decimal = flow.ScalingNshifting(256, -128) p1 = source.DiscreteLogistic([channel, 256, 3], decimal, rounding=utils.roundingWidentityGradient) p2 = source.DiscreteLogistic([channel, 64, 3], decimal, rounding=utils.roundingWidentityGradient) p3 = source.DiscreteLogistic([channel, 16, 3], decimal, rounding=utils.roundingWidentityGradient) p4 = source.DiscreteLogistic([channel, 4, 3], decimal, rounding=utils.roundingWidentityGradient) p5 = source.MixtureDiscreteLogistic([channel, 1, 4], 5, decimal, rounding=utils.roundingWidentityGradient) P = source.HierarchyPrior(channel, length, [p1, p2, p3, p4, p5], repeat=1) x = P.sample(100) logp = P.logProbability(x) import math zparts = [] for no in range(int(math.log(length, 2))): _, parts = utils.dispatch(P.factorOutIList[no], P.factorOutJList[no], x) zparts.append(parts) rcnX = torch.zeros_like(x) for no in range(int(math.log(length, 2))): part = zparts[no] rcnX = utils.collect(P.factorOutIList[no], P.factorOutJList[no], rcnX, part) assert_allclose(x.detach(), rcnX.detach()) length = 8 p1 = UniTestPrior([channel, 16, 3], 1) p2 = UniTestPrior([channel, 4, 3], 2) p3 = UniTestPrior([channel, 1, 4], 3) Pp = source.HierarchyPrior(channel, length, [p1, p2, p3], repeat=2) x = Pp.sample(1) logp = Pp.logProbability(x) target = np.array([[3, 1, 2, 1, 3, 1, 2, 1], [1, 1, 1, 1, 1, 1, 1, 1], [2, 1, 2, 1, 2, 1, 2, 1], [1, 1, 1, 1, 1, 1, 1, 1], [3, 1, 2, 1, 3, 1, 2, 1], [1, 1, 1, 1, 1, 1, 1, 1], [2, 1, 2, 1, 2, 1, 2, 1], [1, 1, 1, 1, 1, 1, 1, 1]]) assert_allclose(x[0, 0].detach().numpy(), target) assert logp == -(16 * 3 * 2**1 + 4 * 3 * 2**2 + 1 * 4 * 2**3) p1 = UniTestPrior([channel, 16, 3], 1) p2 = UniTestPrior([channel, 4, 3], 2) p3 = UniTestPrior([channel, 1, 4], 3) Ppodd = source.HierarchyPrior(channel, length, [p1, p2, p3], repeat=1) x = Ppodd.sample(1) logp = Ppodd.logProbability(x) target = np.array([[3, 1, 2, 1, 3, 1, 2, 1], [1, 1, 1, 1, 1, 1, 1, 1], [2, 1, 2, 1, 2, 1, 2, 1], [1, 1, 1, 1, 1, 1, 1, 1], [3, 1, 2, 1, 3, 1, 2, 1], [1, 1, 1, 1, 1, 1, 1, 1], [2, 1, 2, 1, 2, 1, 2, 1], [1, 1, 1, 1, 1, 1, 1, 1]]) assert_allclose(x[0, 0].detach().numpy(), target) assert logp == -(16 * 3 * 2**1 + 4 * 3 * 2**2 + 1 * 4 * 2**3)
def dispatch_data(self, result): for details_type, details in result.items(): utils.dispatch(details)