Ejemplo n.º 1
0
    def get_records_in_order(self, max, base_enable, extra_attrs, filter, *args):
        assert(self.vlv_enable) #Requires called init_vlv at first
        retval = []
        idx = 0
        page_size = self.vlv_page_size
        if max <= BASE_INFINITE: #require for all records
            page_size = self.vlv_page_size
        elif max < self.vlv_page_size:
            page_size = max

        while True:
            vlv_filter = self._generate_vlv_filter(idx, page_size)
            search_filter = self._generate_filter(extra_attrs, base_enable, filter, vlv_filter, *args)
            results = self._do_get_action(search_filter)
            if isinstance(results, dict): #RS would return a dict if there was only one record
                results = [results]
            if results and isinstance(results, list):
                retval.extend(results)
            else:
                logger.warning('Unexpected data!')
                break #unexpected data
            if len(results) == max:
                break
            elif len(results) < self.vlv_page_size: #get all information already
                break
            else:
                idx += 1
        return retval
Ejemplo n.º 2
0
Archivo: base.py Proyecto: lls3018/mdmi
    def _get_config(self, account_id):
        # TODO the default value should not specific
        credential = {
            'Host': '',
            'Port': 443,
            'aw-tenant-code': '',
            'Authorization': '',
            'Content-Type': 'application/json',
        }

        account = HostedAccount(account_id)
        # get mdmUrl, mdmUsername, mdmPassword, mdmToken from hosteddb
        c = account.get_airwatch_credential()

        try:
            credential['Host'] = self._strip_hostname(c['mdmUrl'])
            username = self._strip_value(c['mdmUsername'])
            # the airwatch password in hosted was encrypted.
            password = decrypt(self._strip_value(c['mdmPassword'], False))
            credential['Authorization'] = " ".join(['Basic', base64.b64encode(':'.join([username, password]))])
            credential['aw-tenant-code'] = self._strip_value(c['mdmToken'])
        except Exception as e:
            logger.warning('Error when getting airwatch credential: %s', e)
            raise Exception(e)

        return credential
Ejemplo n.º 3
0
  def _discover(self):
    assert self.state.IsInitialized()

    request = DiscoveryRequest()
    request.client_id = self.state.client_id

    # Adds all the resources we know about so that we can get
    # the safe capacities for them.
    for r in self.state.resource:
      request.resource_id.append(r.resource_id)

    # Sends the request to a random task in the server job.
    response = self.downstream_job.get_random_task().Discovery_RPC(request)

    # If the response has a master_bns field we store the reference
    # to the master. If not there is no master, which would suck.
    if response.HasField('master_bns'):
      self.master = self.downstream_job.get_task_by_name(response.master_bns)
    else:
      self.master = None
      logger.warning('%s doesn\'t know who the master is.' %
                     self.state.client_id)
      Counter.get('client.discovery_failure').inc()

    # Goes through the response and stores all the safe capacities in the
    # client state.
    for safe in response.safe_capacity:
      self._find_resource(safe.resource_id).safe_capacity = safe.safe_capacity

    # Returns the server we just discovered to be the master.
    return self.master
Ejemplo n.º 4
0
Archivo: user.py Proyecto: lls3018/mdmi
    def _retrieve_user(self, **kwargs):
        rs = self._process_once('retrieve')
        if not kwargs:
            logger.warning('the retrieve condition is empty or none, it will return all users in metanate')
            return rs

        # get a key and value from kwargs
        key, value = self._get_key_value_from_dict(kwargs)
        logger.info('metanate retrieve key and value are: %s - %s', key, value)
        if not key:
            logger.error('parameter did not contain "dn", "cn" or "objectguid"')
            raise MDMiInvalidParameterError(601, 'Invalid Parameter', 'parameter did not contain "dn", "cn" or "objectguid"')

        for r in rs.content:
            if isinstance(r, tuple) and len(r) == 2 and isinstance(r[1], dict):
                if key == 'dn':
                    if r[0] == value:
                        return r
                else:
                    r1 = self._convert_dict_key_to_lower(r[1])
                    if r1.has_key(key) and isinstance(r1[key], list) and value in r1[key]:
                        return r

        logger.error('cannot find user with condition: %s', kwargs)
        return None
Ejemplo n.º 5
0
    def get_dataset_splits(self, normalize=False):
        dataset_func = self.get_dataset_combo if self.hparams.dataset == "combo" else self.get_dataset
        train_set = dataset_func(set="train")
        if self.test_set is not None:
            test_set = dataset_func(set=self.test_set, augment=self.hparams.augment)
        else:
            test_set = dataset_func(set="test", augment=self.hparams.augment)
        val_set = dataset_func(set="val",augment=False)

        if normalize:
            mean = 0.
            std = 0.
            loader = DataLoader(train_set, batch_size=self.hparams.bs, num_workers=self.hparams.workers, shuffle=False)
            for images, _ in loader:
                batch_samples = images.size(0)  # batch size
                # logger.debug(images.shape)
                images = images.view(batch_samples, images.size(1), -1)
                # logger.debug(images.shape)
                mean += images.mean(2).sum(0)
                std += images.std(2).sum(0)

            mean /= len(loader.dataset)
            std /= len(loader.dataset)
            logger.debug("Mean and stdev",mean,std)
            tf = transforms.Compose([
                transforms.Normalize(mean=(mean,), std=(std,))
            ])
            train_set.dataset.transform = tf
            test_set.dataset.transform = tf
            val_set.dataset.transform = tf

        logger.warning(f"{self.hparams.dataset} - train {len(train_set)} | val {len(val_set)} | test {len(test_set)}")
        return train_set, val_set, test_set
Ejemplo n.º 6
0
def server(path):
    if path in ("judge", "ping", "compile_spj"):
        logger.warning("app reached" + path)
        print("app reached," + path)
        _token = request.headers.get("X-Judge-Server-Token")
        try:
            if _token != token:
                raise TokenVerificationFailed("invalid token")
            try:
                data = request.json
            except Exception:
                data = {}
            ret = {"err": None, "data": getattr(JudgeServer, path)(**data)}
        except (CompileError, TokenVerificationFailed, SPJCompileError,
                JudgeClientError) as e:
            logger.exception(e)
            ret = {"err": e.__class__.__name__, "data": e.message}
        except Exception as e:
            logger.exception(e)
            ret = {
                "err": "JudgeClientError",
                "data": e.__class__.__name__ + " :" + str(e)
            }
    else:
        ret = {"err": "InvalidRequest", "data": "404"}
    return Response(json.dumps(ret), mimetype='application/json')
Ejemplo n.º 7
0
    def run_operation(self, operation):
        """Executes an operation given to it by the agent core.

        Returns:

            - Nothing

        """

        logger.debug("agent-id: {0}, agent-version: {1}"
                     .format(settings.AgentId, settings.AgentVersion))

        if not isinstance(operation, MonitOperation):
            operation = MonitOperation(operation.raw_operation)

        if operation.type == MonitOperationValue.MonitorData:
            monit_data = self.get_monit_data()

            operation.raw_result = json.dumps(monit_data)
            operation.urn_response = MonitUrn.get_monit_data_urn()
            operation.request_method = RequestMethod.POST

        else:
            logger.warning("Unknown operation %s. Ignoring." % operation.type)

        self._send_results(operation, retry=False)
Ejemplo n.º 8
0
    def submit(self, problem_id, language, src_code):
        self.code_len = len(src_code.encode('utf-8'))
        problem_id = str(problem_id).upper()
        try:
            language = CF.LANGUAGE[str(language).upper()]
        except Exception as e:
            logger.exception(e)
            logger.error('language unrecognizable!')
            return False

        self.browser.open(CF.URL_SUBMIT)
        submit_form = self.browser.get_form(class_='submit-form')
        submit_form['submittedProblemCode'] = problem_id
        submit_form['source'] = src_code
        submit_form['programTypeId'] = language

        self.browser.submit_form(submit_form)

        if self.browser.url[-6:] != 'status':
            logger.warning(
                'Submit Failed..'
                '(probably because you have submit the same file before.)')
            return False

        return True
Ejemplo n.º 9
0
    def start_ds_port_forward(self, instance_name='userstore', instance_nb=0):
        if not is_cluster_mode():
            ds_pod_name = '%s-%s' % (instance_name, instance_nb)
            ds_local_port = eval('self.%s%s_local_port' %
                                 (instance_name, instance_nb))
            cmd = self.helm_cmd + ' --namespace %s port-forward pod/%s %s:8080' % \
                  (tests_namespace(), ds_pod_name, ds_local_port)
            ds_popen = utils.cmd.run_cmd_process(cmd)

            duration = 60
            start_time = time.time()
            while time.time() - start_time < duration:
                soc = socket.socket()
                result = soc.connect_ex(("", ds_local_port))
                soc.close()
                if result != 0:
                    logger.warning(
                        'Port-forward for pod %s on port %s not ready, waiting 5s...'
                        % (ds_pod_name, ds_local_port))
                    time.sleep(5)
                else:
                    logger.info('Port-forward for pod %s on port %s is ready' %
                                (ds_pod_name, ds_local_port))
                    return ds_popen

            raise Exception(
                'Port-forward for pod %s on port %s not ready after %ss' %
                (ds_pod_name, ds_local_port, duration))
Ejemplo n.º 10
0
def finish_upgrade_event(data, node, now, use_gold = 0):
    """进行升级关键事件
    1 启动升级事件
    2 关键节点升一级
    3 结束随机事件,获得功勋值
    """
    #节点上必须有合法的升级随机事件
    if node.event_type != NodeInfo.EVENT_TYPE_UPGRADE:
        logger.warning("Wrong event[type=%d]" % node.event_type)
        return False
    if not node.launch_event(now):
        return False

    if not map_business.upgrade_key_node(data, node, now, use_gold):
        return False

    #获得成就值 和关键点等级相关
    resource = data.resource.get()
    resource.update_current_resource(now)
    ac_base = data_loader.LuckyEventBasicInfo_dict[node.event_type].achievementBase
    ac_coe = data_loader.LuckyEventBasicInfo_dict[node.event_type].achievementCoefficient
    achievement = ac_base + ac_coe * (node.level - 1)
    resource.gain_achievement(achievement)

    return node.finish_event(now)
Ejemplo n.º 11
0
    def use_vip_point_item(self, num):
        """使用vip物品
        Args:
            num[int] 使用的数量
        Returns:
            获得vip点数 
            计算失败返回 None
        """
        consume = self.consume(num)
        if not consume[0]:
            return False
        output_items = []
        output_items.append("[item=")
        output_items.append(utils.join_to_string(list(consume[1])))
        output_items.append("]")
        item = ''.join(output_items)
        log = log_formater.output_item(self, "use vip item",
                                       log_formater.VIP_ITEM, item)
        logger.notice(log)
        if not self.is_vip_point_item():
            logger.warning("Not vip point item[basic id=%d]" % self.basic_id)
            return None

        vip_point = data_loader.ItemBasicInfo_dict[self.basic_id].value * num
        return vip_point
Ejemplo n.º 12
0
def trigger_custom_event(data, now, node_basic_id, event_type, change_nodes):
    """触发特定的事件
    可以指定发生事件的节点,事件的类型
    """
    logger.debug("Trigger custom event")
    map = data.map.get()
    node_id = NodeInfo.generate_id(data.id, node_basic_id)
    node = data.node_list.get(node_id)

    candidate = _calc_candidate_event(data, node, now)
    candidate.extend(_calc_candidate_event(data, node, now, False))
    if len(candidate) == 0:
        logger.warning("Node not prepared for event[node basic id=%d]" % node.basic_id)
        return False

    if event_type not in candidate:
        logger.warning("Node not matched event"
                "[node basic id=%d][event type=%d][available=%s]" %
                (node.basic_id, event_type, utils.join_to_string(candidate)))
        return False

    if not _node_arise_event(data, node_id, event_type, now,
            change_nodes, [], []):
        return False

    change_nodes.append(node)
    logger.debug("node trigger event[basic id=%d][event type=%d]" %
            (node.basic_id, node.event_type))

    map.update_next_luck_time(now)
    return True
Ejemplo n.º 13
0
    def use_food_item(self, num):
        """使用粮包物品
        Args:
            item[ItemInfo out] 粮包物品
            num[int] 使用的数量
        Returns:
            获得粮草数量
            计算失败返回 None
        """
        consume = self.consume(num)
        if not consume[0]:
            return False
        output_items = []
        output_items.append("[item=")
        output_items.append(utils.join_to_string(list(consume[1])))
        output_items.append("]")
        item = ''.join(output_items)
        log = log_formater.output_item(self, "use food item",
                                       log_formater.FOOD_ITEM, item)
        logger.notice(log)
        if not self.is_food_item():
            logger.warning("Not food item[basic id=%d]" % self.basic_id)
            return None

        food = data_loader.ItemBasicInfo_dict[self.basic_id].value * num
        return food
Ejemplo n.º 14
0
    def use_package_item(self, num):
        """使用vip物品
        Args:
            num[int] 使用的数量
        Returns:
            PackageBasicInfo 
            计算失败返回 None
        """
        consume = self.consume(num)
        if not consume[0]:
            return False
        output_items = []
        output_items.append("[item=")
        output_items.append(utils.join_to_string(list(consume[1])))
        output_items.append("]")
        item = ''.join(output_items)
        log = log_formater.output_item(self, "use package item",
                                       log_formater.PACKAGE_ITEM, item)
        logger.notice(log)
        if not self.is_package_item():
            logger.warning("Not package item[basic id=%d]" % self.basic_id)
            return None

        package_id = data_loader.ItemBasicInfo_dict[self.basic_id].value
        return data_loader.PackageBasicInfo_dict[package_id]
Ejemplo n.º 15
0
    def run(self):
        """
        Execute requests and write to db for every page
        """
        self._prepare_tickers()

        while self.cur_page <= self.tot_pgs_api or self.tot_pgs_api == -1:
            query_dict = self._prepare_query_dict()
            self.stock_request.params = query_dict
            pg_req: Response = self.req_session.send(
                self.stock_request.prepare())
            try:
                pg_req.raise_for_status()
            except HTTPError as e:
                logger.error('Error retrieving news from StockNewsAPI: %s',
                             str(e))
                return
            resp_body = pg_req.json()
            if 'data' not in resp_body or len(resp_body['data']) == 0:
                # additional error check
                logger.warning('No articles found for specified tickers')
                break
            if 'total_pages' in resp_body and self.tot_pgs_api == -1:
                self.tot_pgs_api = resp_body['total_pages'] if resp_body['total_pages'] < self.max_req \
                    else self.max_req

            self.articles_enhancer.set_articles(resp_body['data'])
            self.articles_enhancer.enhance()
            paged_articles: List[Article] = self.articles_enhancer.articles
            self.upsert_msk(paged_articles)
            self.count += (len(paged_articles))
            self.cur_page += 1
Ejemplo n.º 16
0
    def judge(cls,
              language_config,
              src,
              max_cpu_time,
              max_memory,
              test_case_id,
              spj_version=None,
              spj_config=None,
              spj_compile_config=None,
              spj_src=None,
              output=False):
        # init
        compile_config = language_config.get("compile")
        run_config = language_config["run"]
        submission_id = uuid.uuid4().hex

        if spj_version and spj_config:
            spj_exe_path = os.path.join(
                SPJ_EXE_DIR,
                spj_config["exe_name"].format(spj_version=spj_version))
            # spj src has not been compiled
            if not os.path.isfile(spj_exe_path):
                logger.warning(
                    "%s does not exists, spj src will be recompiled")
                cls.compile_spj(spj_version=spj_version,
                                src=spj_src,
                                spj_compile_config=spj_compile_config)

        with InitSubmissionEnv(
                JUDGER_WORKSPACE_BASE,
                submission_id=str(submission_id)) as submission_dir:
            if compile_config:
                src_path = os.path.join(submission_dir,
                                        compile_config["src_name"])

                # write source code into file
                with open(src_path, "w") as f:  # encoding="utf-8"
                    f.write(src)

                # compile source code, return exe file path
                exe_path = Compiler().compile(compile_config=compile_config,
                                              src_path=src_path,
                                              output_dir=submission_dir)
            else:
                exe_path = os.path.join(submission_dir, run_config["exe_name"])
                with open(exe_path, "w") as f:  #encoding="utf-8"
                    f.write(src)
            print("It's running client")
            judge_client = JudgeClient(run_config=language_config["run"],
                                       exe_path=exe_path,
                                       max_cpu_time=max_cpu_time,
                                       max_memory=max_memory,
                                       test_case_id=str(test_case_id),
                                       submission_dir=submission_dir,
                                       spj_version=spj_version,
                                       spj_config=spj_config,
                                       output=output)
            run_result = judge_client.run()

            return run_result
Ejemplo n.º 17
0
  def create(self, template, server_level):
    # If the template specifies the algorithm, that's the one
    # we will use. If not there might be a default algorithm
    # specified in the configuration. If that is not available
    # we default to the "None" algorithm.
    if template.HasField('algorithm'):
      algo = template.algorithm
    else:
      algo = global_config.get_default_algorithm()

    if algo.name == 'Static':
      from algo_static import StaticAlgorithm
      a = StaticAlgorithm(algo, server_level)
    elif algo.name == 'None':
      from algo_none import NoneAlgorithm
      a = NoneAlgorithm(algo, server_level)
    elif algo.name == 'ProportionalShare':
      from algo_proportional import ProportionalShareAlgorithm
      a = ProportionalShareAlgorithm(algo, server_level)
    else:
      logger.warning('Unknown algorithm: %s' % algo.name)
      a = NoneAlgorithm(algo, server_level)

    a.server_level = server_level

    return a
Ejemplo n.º 18
0
    def _replacement(self):
        """进行换入换出
        在缓存储量超过警戒线,未达上限时执行
        """
        now = int(time.time())
        to_eliminate = []
        for id in self.db:
            if now - self.db[id].active_time >= self._BUFFER_EXPIRE_TIME:
                #淘汰不活跃的数据
                to_eliminate.append(id)

            #一次淘汰,最多淘汰一个固定个数,避免耗时过多
            if len(to_eliminate) >= self._BUFFER_ELIMINATE_NUM:
                break

        for id in to_eliminate:
            del self.db[id]

        count = len(to_eliminate)
        if count == 0:
            logger.warning("Database replacement failed, no inactive item[active num=%d]" %
                    len(self.db))
        else:
            logger.trace("Database replacement succeed[active num=%d][eliminate num=%d]" %
                    (len(self.db), count))
Ejemplo n.º 19
0
    def add_buff(self, buff_id, now):
        """添加 buff
        """
        buffs = utils.split_to_int(self.buffs)
        buffs_start_time = utils.split_to_int(self.buffs_start_time)

        #清除已经过期的 buff
        for index in range(0, len(buffs)):
            id = buffs[index]
            if id == 0:
                continue
            end_time = (buffs_start_time[index] +
                        data_loader.LegendCityBuffBasicInfo_dict[id].duration)
            if now >= end_time:
                buffs[index] = 0

        ok = False
        for index in range(0, len(buffs)):
            if buffs[index] == 0:
                buffs[index] = buff_id
                buffs_start_time[index] = now
                ok = True
                break

        if not ok:
            logger.warning("Not able to add legendcity buff")
            return False

        self.buffs = utils.join_to_string(buffs)
        self.buffs_start_time = utils.join_to_string(buffs_start_time)
        return True
Ejemplo n.º 20
0
def update_all_data_graph(_, buttonsTimes, relayoutData, binningValue,
                          clickData, refPointValue, startFitValue,
                          endFitValue):
    global shadow_shape
    dff = get_activ(sf_trigger)
    layout = dict(shapes=[])

    buttonsTimes, lastClickedButton = load_button_times(buttonsTimes)

    zoomPointButtonTimes = buttonsTimes['zoom-point-button']
    if zoomStartPoint is not None:
        if zoomPointButtonTimes[0] > zoomPointButtonTimes[1]:
            endPoint = get_from_clicked(
                dff, buttonsTimes[lastClickedButton][0],
                sf_trigger).time.values[:scattergl_limit][-1]
            startPoint = zoomStartPoint[0]

            shadow_shape = create_shadow_shape(startPoint, endPoint)
            layout['shapes'].append(shadow_shape)

        elif zoomPointButtonTimes[0] == zoomPointButtonTimes[1]:
            layout['shapes'].append(shadow_shape)

    if startFitValue is not None:
        layout['shapes'].append(create_line(startFitValue, 'vertical'))
    if endFitValue is not None:
        layout['shapes'].append(create_line(endFitValue, 'vertical'))
    if (refPointValue is not None) and (refPointValue != ''):
        try:
            set_point = float(refPointValue)
        except ValueError:
            logger.warning('Wrong ref point format')
        layout['shapes'].append(create_line(set_point, 'horizontal'))

    dff = get_binned_xy(dff, binningValue, sf_trigger)

    relayout_xrange = []
    relayout_yrange = []
    if relayoutData:
        if 'xaxis.range' in relayoutData:
            relayout_xrange = relayoutData['xaxis.range']

        if 'yaxis.range' in relayoutData:
            relayout_yrange = relayoutData['yaxis.range']

    layout['xaxis'] = dict(range=relayout_xrange,
                           title='Time [JD - {}]'.format(start_date_int))
    layout['yaxis'] = dict(range=relayout_yrange, title='Counts')

    layout['showlegend'] = False

    fig = get_full_graph(dff.time, dff.counts, sf_trigger)
    fig['layout'] = layout

    if fit_func is not None:
        functionPlot = create_function_plot(df, fit_func)
        for plot in functionPlot:
            fig['data'].append(plot)

    return fig
Ejemplo n.º 21
0
def _finish_related_event_failed(data, node, now, change_nodes, new_items, new_mails):
    """失败结束相关随机事件
    """
    logger.debug("finish related event after battle lose[event type=%d]" % node.event_type)
    if node.event_type == NodeInfo.EVENT_TYPE_DEFEND:
        #防御事件
        return defend_business.finish_defend_event_fail(
                data, node, now, change_nodes, new_items, new_mails)

    elif node.event_type == NodeInfo.EVENT_TYPE_JUNGLE:
        #野怪事件
        return jungle_business.finish_jungle_event_fail(data, node, now, change_nodes)

    elif node.event_type == NodeInfo.EVENT_TYPE_SPY:
        #谍报事件
        return True #do nothing

    elif node.event_type == NodeInfo.EVENT_TYPE_DUNGEON:
        #副本事件
        return True #do nothing

    elif node.event_type == NodeInfo.EVENT_TYPE_ARENA:
        #演武场事件
        return True #do nothing

    elif node.event_type == NodeInfo.EVENT_TYPE_WORLDBOSS:
        #世界boss事件
        return worldboss_business.finish_worldboss_event(data, node, now, change_nodes)

    else:
        logger.warning("Wrong event[type=%d]" % node.event_type)
        return False
Ejemplo n.º 22
0
    def add_word(root, word):
        """添加word和pinyin"""
        node = root  # node就是一个Node()实例对象
        if len(word) < 2:
            return
        # flag = len(word) >= 3  # 大于3个字的词用声母来表示,比如 惠享存 --> h x c
        # flag = True
        pinyin_of_word = ""
        pinyin_list = dfa.get_pinyin_list(word)  # 处理后的拼音列表
        if len(pinyin_list) != len(word):  # 若处理后的拼音列表长度不等于传入的中文字符长度,则抛异常
            logger.warning("该方法不适用中英夹杂的词汇: {}".format(word))
            return

        for i in range(len(word)):
            # temp = dfa.get_pinyin(word[i])[0] if flag else dfa.get_pinyin(word[i])
            temp = pinyin_list[i]
            pinyin_of_word += temp
            temp = temp[0]
            if node.children is None:  # 子节点为空
                node.children = {temp: Node()}  # 则key:z  value: Node()实例对象
            elif temp not in node.children:  # 若 z 不在
                node.children[temp] = Node()
            node = node.children[temp]  # Node()
        if node.word:
            node.word.append(word)  # list, 子节点上的词列表
            node.pinyin.append(pinyin_of_word)  # list, 子节点上的词拼音,索引和word同步
        else:
            node.word = [word]  # node.word为none时,自己构建一个列表
            node.pinyin = [pinyin_of_word]
        node.isEnd = True  # True 表示 word和pinyin不为空
Ejemplo n.º 23
0
def _start_related_event(data, node, now):
    """启动相关的随机事件
    """
    logger.debug("start related event before battle[event type=%d]" % node.event_type)
    if node.event_type == NodeInfo.EVENT_TYPE_DEFEND:
        #防御事件
        return defend_business.start_defend_event(data, node, now)

    elif node.event_type == NodeInfo.EVENT_TYPE_JUNGLE:
        #野怪事件
        return jungle_business.start_jungle_event(data, node, now)

    elif node.event_type == NodeInfo.EVENT_TYPE_SPY:
        #谍报事件
        return spy_business.start_spy_event(data, node, now)

    elif node.event_type == NodeInfo.EVENT_TYPE_DUNGEON:
        #副本事件
        return dungeon_business.start_dungeon_event(data, node, now)

    elif node.event_type == NodeInfo.EVENT_TYPE_ARENA:
        #演武场事件
        return arena_business.start_arena_event(data, node, now)

    elif node.event_type == NodeInfo.EVENT_TYPE_WORLDBOSS:
        #演武场事件
        return worldboss_business.start_worldboss_event(data, node, now)

    else:
        logger.warning("Wrong event[type=%d]" % node.event_type)
        return False
Ejemplo n.º 24
0
def test_loss(alpha, dist, debug=False):
    logger.warning(f"dist {dist}, alpha {alpha}")
    n_cls = 3
    input = torch.as_tensor([n_cls - 1])
    input = F.one_hot(input, num_classes=n_cls).float()
    # input = input.expand(2, 1, n_cls)
    logger.debug(input)
    ranks = [1, 2, 3]
    sord = SORDLoss(n_classes=n_cls,
                    ranks=ranks,
                    masking=True,
                    dist=dist,
                    alpha=alpha)
    kl = KLLoss(n_classes=n_cls, masking=True)

    # target = torch.tensor([0])
    # target = target.expand(2, 1, 1)
    input = torch.tensor(
        [[[[0.0]], [[1.0]], [[0.0]]], [[[0.0]], [[1.0]], [[0.0]]],
         [[[0.0]], [[1.0]], [[0.0]]]],
        requires_grad=True)
    target = torch.tensor([[[0]], [[1]], [[2]]])
    # logger.debug(target)
    # logger.debug("KL", kl(input, target, debug=True)/n_cls)
    soft, loss = sord(input, target, debug=debug, reduce=False)
    loss = loss / n_cls

    soft = soft.detach().cpu().numpy()
    entropy = scipy.stats.entropy(soft, axis=-1)
    logger.info(f"soft target {soft}")
    logger.info(f"entropy {entropy}")
Ejemplo n.º 25
0
def finish_worldboss_event(data, node, now, change_nodes):
    """结束世界boss的战斗
    """
    #节点上必须有合法的野怪随机事件
    if node.event_type != NodeInfo.EVENT_TYPE_WORLDBOSS:
        logger.warning("Wrong event[type=%d]" % node.event_type)
        return False

    worldboss = data.worldboss.get()
    if worldboss.is_killed():
        #if not node.launch_event(now, overtime = True):
        #    return False


        #if not node.finish_event(now, overtime = True):
        #    return False

        node.clear_event()
        
        #节点变为不活跃
        map = data.map.get()
        node.reset_dependency(map)
        
        change_nodes.append(node)

    return True
Ejemplo n.º 26
0
def main(config):
    rec_predictor = RecPredictor(config)
    image_list = get_image_list(config["Global"]["infer_imgs"])

    batch_imgs = []
    batch_names = []
    cnt = 0
    for idx, img_path in enumerate(image_list):
        img = cv2.imread(img_path)
        if img is None:
            logger.warning(
                "Image file failed to read and has been skipped. The path: {}".
                format(img_path))
        else:
            img = img[:, :, ::-1]
            batch_imgs.append(img)
            img_name = os.path.basename(img_path)
            batch_names.append(img_name)
            cnt += 1

        if cnt % config["Global"]["batch_size"] == 0 or (idx + 1
                                                         ) == len(image_list):
            if len(batch_imgs) == 0:
                continue

            batch_results = rec_predictor.predict(batch_imgs)
            for number, result_dict in enumerate(batch_results):
                filename = batch_names[number]
                print("{}:\t{}".format(filename, result_dict))
            batch_imgs = []
            batch_names = []
    if rec_predictor.benchmark:
        rec_predictor.auto_logger.report()

    return
Ejemplo n.º 27
0
Archivo: node.py Proyecto: hw233/test-2
    def _clear_exploit_hero(self, heroes):
        """清除参与开发的英雄
        """
        assert len(heroes) == EXPLOIT_HERO_COUNT

        heroes_id = utils.split_to_int(self.exploit_team)

        for hero in heroes:
            if hero is None:
                continue

            if hero.id not in heroes_id:
                logger.warning("Hero not match exploitation"
                               "[hero id=%d][node exploit heroes=%s]" %
                               (hero.id, node.exploit_team))
                return False

            index = heroes_id.index(hero.id)
            heroes_id[index] = NONE_HERO

        self.exploit_team = utils.join_to_string(heroes_id)

        if self.exploit_team != EMPTY_HEROES:
            logger.warning("Exploit heroes not clear")
            return False

        return True
Ejemplo n.º 28
0
    def _save(self, state, name):
        if self.save_path is None:
            logger.warning('No path to save checkpoints.')
            return

        os.makedirs(self.save_path, exist_ok=True)
        torch.save(state, os.path.join(self.save_path, name))
Ejemplo n.º 29
0
Archivo: node.py Proyecto: hw233/test-2
    def arise_event(self, event_type, event_arise_time):
        """节点上出现了随机事件
        """
        assert not self.is_event_arised()
        logger.debug("arise event[type=%d]" % event_type)

        if event_type == self.EVENT_TYPE_INVALID:
            logger.warning("Invalid event type[type=%d]" % event_type)
            return False
        elif event_type == self.EVENT_TYPE_SCOUT:
            self.status = self.NODE_STATUS_ENEMY
        else:
            if (event_type == self.EVENT_TYPE_ARENA
                    or event_type == self.EVENT_TYPE_DUNGEON
                    or event_type == self.EVENT_TYPE_WORLDBOSS):
                self.status = self.NODE_STATUS_ENEMY
            elif event_type == self.EVENT_TYPE_SEARCH:
                self.status = self.NODE_STATUS_ENEMY
                self.exploit_type = self.EXPLOITATION_TYPE_RANDOM_ITEM
            elif event_type == self.EVENT_TYPE_DEEP_MINING:
                self.status = self.NODE_STATUS_ENEMY
                self.exploit_type = self.EXPLOITATION_TYPE_ENCHANT_MATERIAL
            elif event_type == self.EVENT_TYPE_HERMIT:
                self.status = self.NODE_STATUS_ENEMY
                self.exploit_type = self.EXPLOITATION_TYPE_HERO_STAR_SOUL

            self.event_type = event_type
            self.event_arise_time = event_arise_time
            self.event_launch_time = 0

        return True
Ejemplo n.º 30
0
    def use_energy_item(self, num):
        """使用政令符物品
        Args:
            num[int] 使用的数量
        Returns:
            获得政令值
            计算失败返回 None
        """
        consume = self.consume(num)
        if not consume[0]:
            return False
        output_items = []
        output_items.append("[item=")
        output_items.append(utils.join_to_string(list(consume[1])))
        output_items.append("]")
        item = ''.join(output_items)
        log = log_formater.output_item(self, "use energy item",
                                       log_formater.ENERGY_ITEM, item)
        logger.notice(log)
        if not self.is_energy_item():
            logger.warning("Not energy item[basic id=%d]" % self.basic_id)
            return None

        energy = data_loader.ItemBasicInfo_dict[self.basic_id].value * num
        return energy
Ejemplo n.º 31
0
    def login(self):
        try:
            self.browser.open(CF.URL_LOGIN)
            enter_form = self.browser.get_form('enterForm')
        except Exception as e:
            logger.exception(e)
            logger.error("Open url failed.")
            return False

        enter_form['handle'] = self.user_id
        enter_form['password'] = self.password

        try:
            self.browser.submit_form(enter_form)
        except Exception as e:
            logger.exception(e)
            logger.error("Submit login form failed.")
            return False

        try:
            checks = list(
                map(lambda x: x.getText()[1:].strip(),
                    self.browser.select('div.caption.titled')))
            if self.user_id not in checks:
                logger.warning("Login failed, probably incorrect password.")
                return False
        except Exception as e:
            logger.exception(e)
            logger.error("Login status check failed.")
            return False

        return True
Ejemplo n.º 32
0
    def use_evolution_item(self, consume_num):
        """使用突破石物品
        1 判断物品是不是突破石
        2 消耗掉将魂石,计算将魂石可以获得的将魂数量
        Args:
            consume_num[int] 消耗的数量
        Returns:
            计算失败返回 False
        """
        if not self.is_evolution_item():
            logger.warning("Not evolution item[basic id=%d]" % self.basic_id)
            return False

        consume = self.consume(consume_num)
        if not consume[0]:
            return False
        output_items = []
        output_items.append("[item=")
        output_items.append(utils.join_to_string(list(consume[1])))
        output_items.append("]")
        item = ''.join(output_items)
        log = log_formater.output_item(self, "use evolution item",
                                       log_formater.EVOLUTION_ITEM, item)
        logger.notice(log)
        return True
Ejemplo n.º 33
0
Archivo: base.py Proyecto: lls3018/mdmi
 def __del__(self):
     try:
         if self.rest:
             del self.rest
     except Exception as e:
         logger.warning("error when del obj")
         raise Exception(e)
Ejemplo n.º 34
0
    def use_starsoul_item(self, consume_num):
        """使用将魂石物品
        1 判断物品是不是将魂石
        2 判断将魂石是不是和英雄对应,如果传入的 hero 为None,不进行此判断
        3 消耗掉将魂石,计算将魂石可以获得的将魂数量
        Args:
            consume_num[int] 消耗的数量
        Returns:
            使用将魂石之后可以获得的英雄的 basic id 和将魂数量,返回元组(basic_id, num)
            计算失败返回 None
        """
        if not self.is_starsoul_item():
            logger.warning("Not starsoul item[basic id=%d]" % self.basic_id)
            return None
        consume = self.consume(consume_num)
        if not consume[0]:
            return None
            output_items = []
            output_items.append("[item=")
            output_items.append(utils.join_to_string(list(consume[1])))
            output_items.append("]")
            item = ''.join(output_items)
            log = log_formater.output_item(self, "use starsoul item",
                                           log_formater.STARSOUL_ITEM, item)
            logger.notice(log)

        corresponding_hero_basic_id = data_loader.ItemBasicInfo_dict[
            self.basic_id].value
        return (corresponding_hero_basic_id, consume_num)
Ejemplo n.º 35
0
    def use_exp_item(self, consume_num):
        """使用英雄经验丹物品
        1 拥有的物品数量必须大于消耗的数量,消耗的数量必须大于0
        2 物品必须是经验丹
        3 消耗掉经验丹,计算可以获得的经验值
        Args:
            consume_num[int] 消耗的数量
        Returns:
            使用经验丹之后获得的经验值
            计算失败返回 None
        """
        if not self.is_hero_exp():
            logger.warning("Not hero exp item[basic id=%d]" % self.basic_id)
            return None
        consume = self.consume(consume_num)
        if not consume[0]:
            return None
        output_items = []
        output_items.append("[item=")
        output_items.append(utils.join_to_string(list(consume[1])))
        output_items.append("]")
        item = ''.join(output_items)
        log = log_formater.output_item(self, "use exp item",
                                       log_formater.EXP_ITEM, item)
        logger.notice(log)

        exp = data_loader.ItemBasicInfo_dict[self.basic_id].value * consume_num
        return exp, consume[1]
Ejemplo n.º 36
0
    def default_handler_to_transfer_request_to_agent(self) -> flask.Response:
        """
        If a request should be run one one instance only
        we can transfer it to one agent directly
        :return: flask.Response
        """
        # Merge data from firestore `features` collection with data from task
        data_form = MultiDict()
        data_form.update(flask.request.form)
        if self.enable_feature_toggle:
            feature_dict = self.db.get_agent_feature()
            data_form.update(feature_dict)
        response = self.send_request_to_agent(flask.request.method,
                                              flask.request.path,
                                              flask.request.args, data_form)

        if response:
            response_json = {}
            try:
                response_json = response.json()
            except Exception as e:
                logger.warning(
                    self.name,
                    f'It seems, there is no content in response {response}. Message: {e}'
                )
            return flask.make_response(flask.jsonify(response_json),
                                       response.status_code)
        else:
            return flask.make_response(
                flask.jsonify(
                    {MESSAGE_KEY: 'Failed to get report from agents'}),
                HTTPStatus.INTERNAL_SERVER_ERROR)
Ejemplo n.º 37
0
    def _update_query_info(self, union_response, data, req, timer):
        union_res = internal_union_pb2.InternalQueryUnionRes()
        union_res.ParseFromString(union_response)

        if union_res.status != 0:
            raise Exception("Query union res errro")

        res = union_pb2.QueryUnionRes()
        res.status = 0
        res.ret = union_res.ret

        if union_res.ret != union_pb2.UNION_OK:
            res.ret = union_res.ret
            logger.warning("User query union unexpected")

            if union_res.ret == union_pb2.UNION_NOT_MATCHED:
                union = data.union.get()
                if not union.leave_union(union.union_id, timer.now, False):
                    raise Exception("Leave union failed")

            defer = DataBase().commit(data)
            defer.addCallback(self._query_succeed, req, res, timer)
            return defer

        else:
            return self._patch_query_info(union_res, data, req, timer)
Ejemplo n.º 38
0
 def modify(self, profile_id, data=None, method_name='install', http_method='POST'):
     resource = "/API/v1/mdm/profiles/{profileid}/{method}".format(profileid=profile_id, method=method_name)
     try:
         res = self.rest.do_access(resource, http_method, self.parse_param(data), headers=self.aw_headers)
     except MDMiHttpError as e:
         logger.warning("%s vpn profile exception: %s" % (method_name, e))
         return False
     except Exception as e:
         logger.warning("%s vpn profile exception: %s" % (method_name, e))
         return False
     
     return True
Ejemplo n.º 39
0
 def service_remain_keys(self):
     logger.info('Client query remain keys!')
     sock = self.do_connect(defs.KEYPL_DEFAULT_TIMOUT)
     if not sock:
         logger.warning('Connect to key pool service failed!')
         return False
     try:
         data = self._send_cmd(sock, defs.KEYPL_CMDS['remain_keys'])
         return data.get('response')
     except Exception, e:
         logger.error('Client query remain keys exception: %s' % e)
         raise Exception(e)
Ejemplo n.º 40
0
 def get_all(self):
     size = self.__queue.qsize()
     task_data_list = []
     
     for i in xrange(size):
         try:
             data = self.__queue.get_nowait()
             task_data_list.append(data)
         except Exception as e:
             logger.warning('error occured in getting No.%d data from queue, queue size: %d, error: %s', i, size, e)
             break
     
     return task_data_list
Ejemplo n.º 41
0
  def add_absolute(self, time, target, arg=None):
    if time < clock.get_time():
      logger.warning('Scheduling action in the past!')

    if type(target) == types.LambdaType and not arg is None:
      logger.warning('Non-None argument ignored for lambda callback')

    # Adds the schedulable item (a target and argument tuple) to the
    # schedule.
    item = (target, arg)
    self.schedule.setdefault(time, list())
    self.schedule[time].append((target, arg))

    return time
Ejemplo n.º 42
0
    def parse_info_updates(self, updates_data):
        """
        Parses the data provided from the command:
        "yum info updates -v"

        Returns:
            A list of dictionaries for each package.
        """
        #sys_arch = systeminfo.system_architecture()

        clean_pkg_data = self._clean_up_info_updates(updates_data)

        package_dicts = {}
        for pkg_data in clean_pkg_data:
            split_data = self._split_package_data(pkg_data)

            pkg_dict = self._dict_from_split_pkg_data(split_data)
            pkg_name = pkg_dict.get(PkgKeys.name, '')

            if not pkg_name:
                logger.warning(
                    "No name for dict: {0}".format(pkg_dict)
                )
                continue

            # Avoid duplicate dicts
            if pkg_name in package_dicts:
                continue

            pkg_dict[PkgKeys.lookup_name] = \
                self._get_severity_lookup_name(pkg_dict)

            full_version = pkg_dict[PkgKeys.version]
            if pkg_dict.get(PkgKeys.release, ''):
                full_version = '{0}-{1}'.format(
                    full_version, pkg_dict[PkgKeys.release]
                )

            pkg_dict[PkgKeys.full_version] = full_version

            package_dicts[pkg_name] = pkg_dict

            # Can't discriminate by arch because x86_64 arch machines
            # can have i386 packages installed, which need updates.
            # pkg_arch = pkg_dict.get(PkgKeys.arch, 'noarch')

            # if pkg_arch == 'noarch' or pkg_arch == sys_arch:
            #     package_dicts.append(pkg_dict)

        return package_dicts.values()
Ejemplo n.º 43
0
 def _get_devinfo_from_aw(self, src_queue, dest_queue):
     aw_dev = AirwatchDevice(self.aw_account)
     list_udids = []
     while src_queue.qsize() > 0:
         devitems = src_queue.get(True)
         for dev in devitems:
             try:
                 udid = dev['attributes'].get('UDID')[0]
                 if udid and (udid != 'NA'):  # try get udid
                     list_udids.append(udid.lower()) #keep all string in lower
                 else:
                     logger.warning('Unknow device: %s' % repr(dev))
             except Exception, e:
                 logger.error('Unknow device, error info:%s' % repr(e))
                 pass
Ejemplo n.º 44
0
    def service_is_alive(self):
        logger.info('Client check service available!')
        sock = self.do_connect(defs.KEYPL_DEFAULT_TIMOUT)
        if not sock:
            logger.warning('Connect to key pool service failed!')
            return False

        try:
            data = self._send_cmd(sock, defs.KEYPL_CMDS['alive'])
            result = data.get('response')
            if result:
                return True
        except Exception, e:
            logger.error('Client check service status exception: %s' % e)
            return False
Ejemplo n.º 45
0
    def _process_message(self, message):
        logger.debug('process message: {}'.format(message))

        if 'text' not in message:
            return
        for pattern, handler in self._commands:
            match = re.search(pattern, message['text'], re.UNICODE)
            if match:
                return handler(
                    Message(self, **message),
                    **match.groupdict()
                )
        logger.warning(
            'message [{}] dont match any command pattern'.format(message)
        )
Ejemplo n.º 46
0
    def stop_vm(self, vmname, timeout1=10, timeout2=2):

        logger.info("Stop vm/network {0}".format(vmname))

        vms = [vm for vm in self.vms.values()
                if vm.name == vmname or
                   vm.name.startswith(vmname + self.DOM_SEPARATOR)]

        vm_names = " ".join(vm.name for vm in vms)
        logger.debug("Found next vm's, which match name glob {0}".format(vm_names))

        for xvm in vms:
            conn = self.get_vm_conn(xvm.name)
            logger.debug("Stop vm {0}".format(xvm.name))
            
            try:
                vm = conn.lookupByName(xvm.name)
            except libvirt.libvirtError:
                logger.debug("vm {0} don't exists - skip it".format(xvm.name))
            else:
                logger.debug("Shutdown vm {0}".format(xvm.name))
                
                try:
                    vm.shutdown()
                except libvirt.libvirtError:
                    pass
                else:
                    for i in range(timeout1):
                        time.sleep(1)
                        try:
                            vm = conn.lookupByName(xvm.name)
                        except libvirt.libvirtError:
                            return

                logger.warning("VM {0} don't shoutdowned - destroy it".format(xvm.name))
                vm.destroy()
                for i in range(timeout2):
                    time.sleep(1)
                    try:
                        vm = conn.lookupByName(xvm.name)
                    except libvirt.libvirtError:
                        return

                logger.error("Can't stop vm {0}".format(xvm.name))
                raise CloudError("Can't stop vm {0}".format(xvm.name))
            conn.close()
Ejemplo n.º 47
0
 def api_call(self, action, **data):
     url = self._api_url(action)
     resp = yield from aiohttp.request('POST', url, data=data)
     if resp.status == 200:
         return (yield from resp.json())
     elif resp.status == 502:  # telegram nginx bad gateway
         logger.warning(
             'telegram returns a 502 error from action {}'.format(action)
         )
         yield from asyncio.sleep(10)
     else:
         api_return = yield from resp.text()
         logger.critical(
             'could not process the return of telegram api: {}'.format(
                 api_return
             )
         )
Ejemplo n.º 48
0
  def _discover(self):
    assert self.server_level > 0

    request = DiscoveryRequest()
    request.client_id = self.server_id

    # Sends the request to a random task in the server job.
    response = self.downstream_job.get_random_task().Discovery_RPC(request)

    # If the response has a master_bns field we store the reference
    # to the master. If not there is no master, which would suck.
    if response.HasField('master_bns'):
      self.master = self.downstream_job.get_task_by_name(response.master_bns)
    else:
      self.master = None
      logger.warning('%s doesn\'t know who the master is.' % self.server_id)
      Counter.get('server.discovery_failure').inc()

    return self.master
Ejemplo n.º 49
0
 def run(self):
     self._run = True
     while self._run:
         if self._queue.size() == 0:
             # If wait Queue is empty and retry count less than `_retry`,
             # execute tasks from FailQueue, else push to DeadQueue.
             if self._retry > 0 and self._fail_queue.size() != 0:
                 task_id, task_raw, task, exc_info = self._fail_queue.dequeue()
                 retry_count = self._retry_count.get(task_id, 0)
                 # Retry count is out of limit.
                 if retry_count >= self._retry:
                     self._dead_queue.enqueue(task_id, exc_info)
                     logger.warning("Task #{0} is dead!".format(task_id))
                     continue
                 self._retry_count[task_id] = retry_count + 1
                 logger.info("Retry task: {0}".format(task.callable))
             # All queues is empty or does not need retry,
             # report results.
             else:
                 if self.no_task_in_ten_seconds() and not self._empty:
                     self.report()
                     self._empty = True
                 time.sleep(1)
                 continue
         # Execute normal task.
         else:
             self._empty = False
             task_id, task_raw, task = self._queue.dequeue()
             logger.info("Try task: {0}".format(task.callable))
         try:
             result = task.execute()
         except Exception as e:
             err_msg = '{0}: {1}'.format(type(e).__name__, e.args)
             retry_count = self._retry_count.get(task_id, 0)
             if self._retry > 0 and retry_count < self._retry:
                 self._fail_queue.enqueue(task_id, task_raw, err_msg)
             else:
                 self._dead_queue.enqueue(task_id, err_msg)
             logger.error("Execute task: #{0} got {1}".format(task_id, err_msg))
         else:
             self._success_queue.enqueue(task_id, result)
             logger.info("Task #{0} done! Got: {1}".format(task_id, result))
         self.reset_time()
    def __init__(self, mothership, args):
        """ 
        Carefully initialize the connection to the Mothership and register all
        methods defined by the subcless.
        """
        self.initialize(args)

        self.shuffle_keys = set()  # Contains the local set of shuffle keys
        self.stopped = False

        self.Port = randint(40000,90000)
        self.mothership = mothership
    
        try:
            self.hostname = socket.gethostname()

            # Ensure that we can set up a server or we die trying
            down = True
            while down:
                try:
                    logger.info( '%s:%d notifying %s of startup' % (socket.gethostname(), self.Port, mothership) )
                    sendto(socket.gethostname()+' '+str(self.Port)+' UP', (mothership, ClientRegistry.Port))
                    rpcserver = xmlrpcserver.XmlRpcHTTPServer((socket.gethostname(),self.Port))
                    down = False
                except socket.error:
                    logger.warning( 'Couldnt connect to mothership ' + mothership )
                    self.Port = randint(40000,90000)
                    time.sleep(1)
            
            # to register an object, do rpcserver.register('rpcname',object)
            # to register a method, do rpcserver.register('rpcname',method)
            rpcserver.register('map',self.rpc_map)
            rpcserver.register('shuffle',self.rpc_shuffle)
            rpcserver.register('terminate',self.rpc_terminate)

            logger.info( 'Client setup complete.' )

            while not self.stopped:
                rpcserver.handle_request()
            # rpcserver.serve_forever()
        finally: # cleanup no matter what happens
            self.terminate()
Ejemplo n.º 51
0
    def run(self):
        logger.info('Devices\' information sync with airwatch started at %s under account %d!' % (str(time.time()), self.aw_account))
        try:
            logger.info('Step 1: Get device information from RS.')
            rs_retval = self._get_devinfo_from_rs()
            logger.debug('Account %d includes %d device needed sync with airwatch!' % (self.aw_account, rs_retval))
            if rs_retval <= 0:
                # report
                return  # exit, no work to do

            logger.info('Step 2: Get device information from airwatch.')
            aw_retval = self._get_devinfo_from_aw(self.devinfo_original_queue, self.devinfo_refresh_queue)
            logger.debug('Account %d, get %d device information from airwatch!' % (self.aw_account, aw_retval))
            if (aw_retval != rs_retval):
                #The devices do not exist in airwatch needed to be updated as unenroll status.
                logger.warning('Account %d, Original device number (%d) and refresh device number (%d) NOT match!' % (self.aw_account, rs_retval, aw_retval))

            logger.info('Step 3: Sync device information to RS.')
            sync_retval = self._sync_devinfo()
            logger.debug('Account %d, sync %d device information with RS finished!' % (self.aw_account, sync_retval))
            if (aw_retval != sync_retval):
                #causes of updating RS fail
                logger.warning('Account %d, Refresh device number (%d) and Sync device number (%d) NOT match!' % (self.aw_account, aw_retval, sync_retval))

            # Step 4: Update the defect devices to be unenroll.
            if self.m_defect_devs:
                defect_cnt = len(self.m_defect_devs)
                logger.info('Step 4: Set %d devices to be "unenroll" status!' % defect_cnt)
                ok_cnt = self._unenroll_dev_status(self.m_defect_devs)
                if defect_cnt != ok_cnt:
                    logger.warning('Account %d, Set %d devices to be "unenroll status failed!"' % (self.aw_account, (defect_cnt - ok_cnt)))
            # Step 5: Report
        except Exception, e:
            logger.error(repr(e))
Ejemplo n.º 52
0
    def _retrieve_group(self, sync_source='MDM', **kwargs):
        rs = self._process_once('retrieve', 'Groups', sync_source)

        if not kwargs:
            logger.warning('the retrieve condition is empty or none, it will return all groups in metanate')
            return rs

        # get a key and value from kwargs
        key, value = self._get_key_value_from_dict(kwargs)
        if not key:
            logger.error('parameter did not contain "dn", "cn" or "objectguid"')
            raise MDMiInvalidParameterError(601, 'Invalid Parameter', 'parameter did not contain "dn", "cn" or "objectguid"')

        for r in rs.content:
            if isinstance(r, tuple) and len(r) == 2 and isinstance(r[1], dict):
                if key == 'dn':
                    if r[0].lower() == value.lower():
                        return r
                else:
                    r1 = self._convert_dict_key_to_lower(r[1])
                    if r1.has_key(key):
                        if isinstance(r1[key], list):
                            if key == 'objectguid':
                                if value in r1[key]:
                                    return r
                            else: # cn is case insensitive
                                r2 = [i.lower() if isinstance(i, basestring) else i for i in r1[key]]
                                if value.lower() in r2:
                                    return r
                        elif isinstance(r1[key], basestring):
                            if key == 'objectguid':
                                if r1[key] == value:
                                    return r
                            else: # cn is case insensitive
                                if r1[key].lower() == value.lower():
                                    return r

        logger.error('cannot find group with condition: %s', kwargs)
        return None
Ejemplo n.º 53
0
  def process_capacity_response(self, response):
    for resp in response.resource:
      assert resp.gets.capacity >= 0

      resource = self.find_resource(resp.resource_id)
      n = sum_leases(resource)

      if resp.gets.capacity < n:
        logger.warning(
            '%s shortfall for %s: getting %lf, but has %lf outstanding leases' %
            (self.get_server_id(), resource.resource_id,
             resp.gets.capacity, n))
        Counter.get('server_capacity_shortfall').inc()
        Gauge.get('server.%s.shortfall' %
                  self.get_server_id()).set(resp.gets.capacity - n)

      resource.has.CopyFrom(resp.gets)

      # Schedules an action at the expirty time to clear out the lease.
      scheduler.add_absolute(
          resource.has.expiry_time,
          lambda: self._maybe_lease_expired(resource.resource_id))
Ejemplo n.º 54
0
    def _pool_generator(self):
        """
        main generator engine. running in endless loop since generator is started.
        handling connection creating and managing process,
        checks status after each action and making queue from users in a case of pool overflow.
        """
        logger.info('*INITIAL STATUS: actual= %s, used = %s, max = %s, limit = %s'
                    % (self.actual_conn, self.used, self.max_size, self.limit))

        while self.actual_conn < self.max_size:

            if self.actual_conn == 0 and self.used == 0:
                self._create_connection()
                logger.debug('creating an INITIAL connection %s'
                             % self.CONNECTIONS[-1])
                logger.info('STATUS: actual=%s used =%s, limit = %s'
                            % (self.actual_conn, self.used, self.limit))

            elif self.actual_conn == 0 and self.used < self.max_size:
                self._create_connection()
                logger.warning('creating a new connection %s'
                               % self.CONNECTIONS[-1])
                logger.info('STATUS: actual=%s used =%s, limit = %s'
                            % (self.actual_conn, self.used, self.limit))

            elif self.actual_conn > 0 and self.used < self.max_size:
                logger.debug('gave to user an actual free connect.')
                self.actual_conn -= 1
                self.used += 1
                self.limit = self.max_size - self.actual_conn - self.used
                logger.info('*FINISH STATUS: actual=%s used =%s, limit = %s'
                            % (self.actual_conn, self.used, self.limit))
                yield self.OUTPUT.append(self.CONNECTIONS.pop())

            else:
                logger.warning('SORRY! ALL %s CONNECTIONS ARE USED AT THE MOMENT!' % self.used)
                print '|RETRY in 1 second|'
                print '|loop is waiting time to close timed-out connections'
                time.sleep(3)
                gc = [x for x in self.CONNECTIONS if time.time() - x.created_time > x.time_limit]
                print gc
                try:
                    gc = (x.close() for x in self.CONNECTIONS if time.time() - x.created_time > x.time_limit)
                    logger.warning('KILLED %f' % gc.next().created_time)
                    gc.next()

                except StopIteration:
                    logger.info('all connection are busy, retry in .. sec ')
                yield self.queue('putting user data(reference) to queue')
Ejemplo n.º 55
0
def start_sub_process(is_first=True):
    global socket_process
    global dispatch_process
    global schedule_process
    #global service_status
    global sock_file
    global to_stop

    if to_stop:
        return

    if not is_first and not socket_process.is_alive():
        logger.warning("socket process was stopped, try to start it")
    if is_first or (not is_first and not socket_process.is_alive()):
        socket_queue = Queue()
        socket_process = SocketProcess(sock_file, socket_queue)
        socket_process.start()

        try:
            status = socket_queue.get(timeout=3)
            if status != 0:
                stop_children()
                return
        except Exception:
            logger.error('Cannot start socket process at this time')
            stop_children()
            return

    if not is_first and not dispatch_process.is_alive():
        logger.warning("dispatch thread was stopped, try to start it")
    if is_first or (not is_first and not dispatch_process.is_alive()):
        dispatch_queue = Queue()
        dispatch_process = DispatchProcess(dispatch_queue)
        dispatch_process.start()

    if not is_first and not schedule_process.is_alive():
        logger.warning("schedule thread was stopped, try to start it")
    if is_first or (not is_first and not schedule_process.is_alive()):
        schedule_process = ScheduleProcess(dispatch_process)
        schedule_process.start()
Ejemplo n.º 56
0
def prepare_guest_debian(disk_path, hostname, passwords, eth_devs, format=None, apt_proxy_ip=None):

    logger.info("Prepare image for " + hostname)
    if format == 'lxc':
        gfs = LocalGuestFS(disk_path)
        gfs.rm('/etc/init/udev.conf')

        interfaces = []
        for dev, (hw, ip, sz, gw) in eth_devs.items():
            if ip == 'dhcp':
                interfaces.append("dhclient {0}".format(dev))
            else:
                interfaces.append("ifconfig {0} {1}/{2} up".format(dev, ip, sz))
        gfs.write('/etc/init/lxc_lan.conf', ifconfig_script.format("\n".join(interfaces)))
    else:
        gfs = guestfs.GuestFS()
        gfs.add_drive_opts(disk_path, format=format)
        logger.debug("Launch libguestfs vm")
        gfs.launch()
        logger.debug("ok")

        os_devs = gfs.inspect_os()
        if len(os_devs) > 1:
            msg = "Two or more bootable partitions - disk prepare impossible " + disk_path
            logger.error(msg)
            raise CloudError(msg)

        # for dev, fs_type in  gfs.list_filesystems():
        #     logger.debug("Fount partition {0} with fs type {1}".format(dev, fs_type))

        #     # TODO: add lvm support
        #     if fs_type in 'ext2 ext3 reiserfs3 reiserfs4 xfs jfs btrfs':
        #         gfs.mount(dev, '/')
        #         if gfs.exists('/etc'):
        #             logger.debug("Fount /etc on partition {0} - will work on it".format(dev))
        #             break
        #         gfs.umount(dev)
        #         logger.debug("No /etc dir found - continue")

        if 0 == len(os_devs):
            mounts = sorted(gfs.inspect_get_mountpoints(os_devs[0]))

            for mpoint, dev in mounts:
                gfs.mount(dev, mpoint)

                if not gfs.exists('/etc'):
                    msg = "Can't fount /etc dir in image " + disk_path
                    logger.error(msg)
                    raise CloudError(msg)
        else:
            gfs.mount(os_devs[0], '/')
            #gfs.mount('/dev/vda1', '/')

            if not gfs.exists('/etc'):
                msg = "Can't fount /etc dir in image " + disk_path
                logger.error(msg)
                raise CloudError(msg)

    logger.debug("Launch ok. Set hostname")
    #hostname
    gfs.write('/etc/hostname', hostname)

    #set device names
    logger.debug("Set device names and network imterfaces")
    templ = 'SUBSYSTEM=="net", DRIVERS=="?*", ATTR{{address}}=="{hw}", NAME="{name}"'
    rules_fc = []
    interfaces = ["auto lo\niface lo inet loopback"]

    for dev, (hw, ip, sz, gw) in eth_devs.items():
        rules_fc.append(templ.format(hw=hw, name=dev))
        interfaces.append("auto " + dev)

        if ip == 'dhcp':
            interfaces.append("iface {0} inet dhcp".format(dev))
        else:
            interfaces.append("iface {0} inet static".format(dev))
            interfaces.append("    address " + ip)
            network = int2ip(ip2int(ip) & ip2int(netsz2netmask(sz)))
            interfaces.append("    network " + network)
            interfaces.append("    netmask " + netsz2netmask(sz))

    gfs.write('/etc/udev/rules.d/70-persistent-net.rules', "\n".join(rules_fc))
    # gfs.write('/etc/network/interfaces', "\n".join(interfaces))
    gfs.write('/etc/network/interfaces.d/eth0', "\n".join(interfaces))

    # update passwords
    logger.debug("Update passwords")

    chars = "".join(chr(i) for i in range(ord('a'), ord('z') + 1))
    chars += "".join(chr(i) for i in range(ord('A'), ord('Z') + 1))
    chars += "".join(chr(i) for i in range(ord('0'), ord('9') + 1))

    hashes = {}
    for login, passwd in passwords.items():
        salt = "".join(random.choice(chars) for _ in range(8))
        hashes[login] = crypt.crypt(passwd, "$6$" + salt)

    new_shadow = []
    need_logins = set(hashes)

    for ln in gfs.read_file('/etc/shadow').split('\n'):
        ln = ln.strip()
        if ln != '' and ln[0] != '#':
            login = ln.split(':', 1)[0]
            if login in hashes:
                sh_templ = "{login}:{hash}:{rest}"
                sh_line = sh_templ.format(login=login,
                                          hash=hashes[login],
                                          rest=ln.split(':', 2)[2])
                new_shadow.append(sh_line)
                need_logins.remove(login)
        else:
            new_shadow.append(ln)

    for login in need_logins:
        new_sh_templ = "{login}:{hash}:{rest}"
        new_sh_line = new_sh_templ.format(login=login,
                                          hash=hashes[login],
                                          rest="0:0:99999:7:::")
        new_shadow.append(new_sh_line)

    gfs.write('/etc/shadow', "\n".join(new_shadow))

    # add new users to passwd
    ids = []
    logins = []
    passwd = gfs.read_file('/etc/passwd')
    for ln in passwd.split('\n'):
        ln = ln.strip()
        if ln != '' and ln[0] != '#':
            logins.append(ln.split(':', 1)[0])
            ids.append(ln.split(':')[2])
            ids.append(ln.split(':')[3])

    add_lines = []
    try:
        mid = max(i for i in ids if i < 65000)
    except ValueError:
        mid = 0
    mid += 1024

    for login in set(hashes) - set(logins):
        home = '/home/' + login
        add_lines.append(":".join([login, 'x', str(mid), str(mid), "", home, '/bin/bash']))
        if not gfs.exists(home):
            gfs.mkdir_p(home)
        mid += 1

    if add_lines != []:
        gfs.write('/etc/passwd', passwd.rstrip() + "\n" + "\n".join(add_lines))

    # if apt_proxy_ip is not None:
    #     logger.debug("Set apt-proxy to http://{0}:3142".format(apt_proxy_ip))
    #     fc = 'Acquire::http {{ Proxy "http://{0}:3142"; }};'.format(apt_proxy_ip)
    #     gfs.write('/etc/apt/apt.conf.d/02proxy', fc)

    logger.debug("Update hosts")

    hosts = gfs.read_file('/etc/hosts')

    new_hosts = ["127.0.0.1 localhost\n127.0.0.1 " + hostname]
    for ln in hosts.split('#'):
        if not ln.strip().startswith('127.0.0.1'):
            new_hosts.append(ln)

    gfs.write('/etc/hosts', "\n".join(new_hosts))

    # allow ssh passwd auth
    if gfs.is_file('/etc/ssh/ssh_config'):
        name = '/etc/ssh/ssh_config'
    elif gfs.is_file('/etc/ssh/sshd_config'):
        name = '/etc/ssh/sshd_config'
    else:
        logger.warning("Both '/etc/ssh/sshd_config' and '/etc/ssh/ssh_config' are absent. Skip ssh config patching")
        name = None

    if name is not None:
        sshd_conf = gfs.read_file('/etc/ssh/ssh_config')
        sshd_conf_lines = sshd_conf.split("\n")
        for pos, ln in enumerate(sshd_conf_lines):
            if "PasswordAuthentication" in ln:
                sshd_conf_lines[pos] = "PasswordAuthentication yes"
                break
        else:
            sshd_conf_lines.append("PasswordAuthentication yes")
        gfs.write('/etc/ssh/ssh_config', "\n".join(sshd_conf_lines))
Ejemplo n.º 57
0
    Get monitor warning threshold in configuration file according to the given name.
    If configuration file does not exist, the return default value. 
    """

    if os.path.isfile(MONITOR_CONFIG_PATH):
        file = open(MONITOR_CONFIG_PATH)
        try:
            file = open(MONITOR_CONFIG_PATH)
            thresholds = json.loads(file.read())
            return thresholds['thresholds'][name][key]
        except Exception, e:
            logger.error('get %s monitor threshold error: %s' % (name, repr(e)))
        finally:
            file.close()
    else:
        logger.warning('monitor configuration file does not exist')
        return 10

def count_task_by_num(name, success_num=0, fail_num=0):
    """
    Record tasks in monitor configuration file according to the given name, and success and failure num.
    """

    monitor_ini = '{path}/mdmi_monitor_{service}.ini'.format(path=MONITOR_COUNTER_PATH, service=name)
    conf = ConfigParser.SafeConfigParser()
    # create configuration file if it does not exist 
    try:
        file = open(monitor_ini, 'r+')
    except IOError, e:
        file = open(monitor_ini, 'a+')
Ejemplo n.º 58
0
 def _404(self, parameters):
     logger.warning("Web, 404: %s\nParameters: %s" % (
         self.url, parameters
     ))
     return self._response("Error", 404)
Ejemplo n.º 59
0
  def GetCapacity_RPC(self, request):
    assert request.IsInitialized()
    assert self.state.is_initialized()

    # If this server is not the master it cannot handle this request.
    # The client should do a new Discovery.
    if not self.is_master():
      self.state.assert_clean()
      logger.info('%s getting a GetCapacity request when not master' %
                  self.server_id)
      Counter.get('server.GetCapacity_RPC.not_master').inc()

      return None

    timer = Gauge.get('server.GetCapacity_RPC.latency')
    timer.start_timer()
    logger.debug(request)
    now = clock.get_time()

    # Cleanup the state. This removes resources and clients with expired
    # leases and such.
    self.state.cleanup()

    # A set of resources that we need to skip in step 2 (the actual
    # handing out of capacity.
    resources_to_skip = set()

    # First step: Go through the request and update the state with the
    # information from the request.
    for req in request.resource:
       # Finds the resource and the client state for this resource.
      (resource, cr) = self.state.find_client_resource(
          request.client_id,
          req.resource_id)

      # If this resource does not exist we don't need to do anything
      # right now.
      if resource:
        assert cr

        # Checks whether the last request from this client was at least
        # _kMinimumInterval seconds ago.
        if cr.HasField('last_request_time') and now - cr.last_request_time < _kMinimumInterval:
          logger.warning(
              '%s GetCapacity request for resource %s within the %d second '
              'threshold' %
              (self.server_id, req.resource_id, _kMinimumInterval))
          resources_to_skip.add(req.resource_id)
        else:
          # Updates the state with the information in the request.
          cr.last_request_time = now
          cr.priority = req.priority
          cr.wants = req.wants

          if req.HasField('has'):
            cr.has.CopyFrom(req.has)
          else:
            cr.ClearField('has')

    # Creates a new response object in which we will insert the responses for
    # the resources contained in the request.
    response = GetCapacityResponse()

    # Step 2: Loop through all the individual resource requests in the request
    # and hand out capacity.
    for req in request.resource:
      # If this is a resource we need to skip, let's skip it.
      if req.resource_id in resources_to_skip:
        continue

      # Finds the resource and the client state for this resource.
      (resource, cr) = (
          self.state.find_client_resource(
              request.client_id,
              req.resource_id))

      # Adds a response proto to the overall response.
      resp = response.response.add()
      resp.resource_id = req.resource_id

      # If this is an unknown resource just give the client whatever it
      # is asking for.
      if not resource:
        assert not cr

        logger.warning(
            '%s GetCapacity request for unmanaged resource %s' %
            (self.server_id, req.resource_id))
        resp.gets.expiry_time = now + _kDefaultLeaseTimeForUnknownResources
        resp.gets.capacity = req.wants
      else:
        # Sets the safe capacity in the response if there is one
        # configured for this resource.
        if resource.template.HasField('safe_capacity'):
          resp.safe_capacity = resource.template.safe_capacity

        # Finds the algorithm implementation object for this resource.
        algo = AlgorithmImpl.create(resource.template, self.server_level)

        # If the resource is in learning mode we just return whatever the client
        # has now and create a default lease.
        if resource.learning_mode_expiry_time >= now:
          if cr.HasField('has'):
            has_now = cr.has.capacity
          else:
            has_now = 0

          cr.has.CopyFrom(algo.create_lease(resource, has_now))
          Counter.get('server.learning_mode_response').inc()
        else:
          # Otherwise we just run the algorithm. This will update the
          # client state object.
          algo.run_client(resource, cr)
          Counter.get('server.algorithm_runs').inc()

        # Copies the output from the algorithm run into the response.
        resp.gets.CopyFrom(cr.has)

      assert resp.IsInitialized()
      logger.info(
          '%s for %s resource: %s wants: %lf gets: %lf lease: %d refresh: %d' %
          (self.server_id, request.client_id, req.resource_id, req.wants,
           resp.gets.capacity, resp.gets.expiry_time - now,
           resp.gets.refresh_interval))

    assert response.IsInitialized()

    timer.stop_timer()

    return response
Ejemplo n.º 60
0
  def gather_reporting_data(self, resource_id):
    logger.info('Gathering reporting data')
    now = clock.get_time()

    # Adds a record to the data set for this timestamp.
    self.data[now] = dict()
    self.summaries[now] = dict()

    # Adds a summary record for the clients
    p = ReportingData()
    p.total_wants = 0
    p.total_has = 0
    self.summaries[now]['clients'] = p
    self.all_summaries.add('clients')

    # Step 1: Goes through all the clients in the system, gets their
    # reporting data and adds it to the data set.
    for client in Client.all_clients():
      client_id = client.get_client_id()
      self.all_clients.add(client_id)
      data = client.get_reporting_data(resource_id)

      if data:
        self.data[now][client_id] = data
        logger.debug('%s: %s' % (client_id, str(data)))
        p.total_wants += data.wants
        p.total_has += data.has
      else:
        logger.warning('No reporting data received from %s' % client_id)

    # Step 2: Find the master server of every job, get its reporting data
    # and add it to the data set.
    for job in ServerJob.all_server_jobs():
      current_master = job.get_master()

      # If this job does not have a master then we got nothing to do.
      if not current_master:
        continue

      job_name = job.get_job_name()
      self.all_server_jobs.add(job_name)
      data = current_master.get_reporting_data(resource_id)

      if data:
        self.data[now][job_name] = data
        logger.debug('%s: %s' % (job_name, str(data)))
        key = 'level %d' % current_master.get_server_level()
        self.all_summaries.add(key)

        if not key in self.summaries[now]:
          p = ReportingData()
          p.total_wants = 0
          p.total_has = 0
          p.total_leases = 0
          p.total_outstanding = 0
          self.summaries[now][key] = p
        else:
          p = self.summaries[now][key]

        p.total_wants += data.wants
        p.total_has += data.has
        p.total_leases += data.leases
        p.total_outstanding += data.outstanding
      else:
        logger.warning(
            'No reporting data received from %s' %
            current_master.get_server_id())