Пример #1
0
    def post(self):
        class_id = self.get_body_argument('class_id', None)
        title = self.get_body_argument('title', None)
        image_url = self.get_body_argument('image_url', None)
        note = self.get_body_argument('note', None)
        content = self.get_body_argument('content', None)

        now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        data = {
            'class_id': class_id,
            'title': title,
            'image_url': image_url,
            'note': note,
            'content': content,
            'author': 'LiJiaF',
            'create_date': now,
            'write_date': now
        }

        log.info('添加文章:' + json.dumps(data))

        try:
            session = DBSession()
            new_article = Article(**data)
            session.add(new_article)
            session.commit()
            session.close()
        except Exception as e:
            log.error(e)
            return self.finish(json.dumps({'code': -1, 'msg': '添加失败'}))

        return self.finish(json.dumps({'code': 0, 'msg': '添加成功'}))
Пример #2
0
 def terminate(self):
     log.info('Terminating SS service...')
     self.set_sys_proxy('off')
     if self.pac_server and self.pac_server.daemon_thread.is_alive():
         self.pac_server.terminate()
     if self.ss_client_pipe:
         self.ss_client_pipe.terminate()
Пример #3
0
def write_active_beamlets(fname, beamlets):
    log.info("Writing fluences for active beamlets for beam to file %s" % fname)
    f = open(fname, 'w')
    for t in range(0, beamlets.size):
        if beamlets.active[t] >= 0:
            f.write('%g\n' % beamlets.fluence[t])
    f.close()
Пример #4
0
def update_invalid_node_status():
    # flask_app = create_app('development')
    # flask_context = flask_app.app_context()
    # flask_context.push()

    self_ip = get_host_ip()
    node_list = db.session.query(Node).filter(Node.ip_addr == self_ip).all()

    if len(node_list) == 0:
        # flask_context.pop()
        return
    else:
        for node in node_list:
            Node.query.filter_by(id=node.id).update(
                {"status": NODE_STATUS_INVALID})
            try:
                db.session.commit()
            except ProgrammingError as e:
                log.error(e)
            except IntegrityError as e:
                db.session.rollback()
                log.error(e)
            except DataError as e:
                log.error(e)
            else:
                log.info(
                    f"Update the node[{node.id}]'s status to {NODE_STATUS_INVALID}"
                )
Пример #5
0
 def login_app(self, mobile):
     """
     登录APP
     :return:
     """
     self.ca.open_app()
     sleep(5)
     currentActivity = "com.dnkj.chaseflower.ui.login.activity.LoginHomeActivity"
     if currentActivity == self.adb.getCurrentActivity():
         self.adb.touchByRatio(ratioWidth=0.5, ratioHigh=0.95)
         loginactivity = "com.dnkj.chaseflower.ui.login.activity.LoginActivity"
         sleep(2)
         if loginactivity == self.adb.getCurrentActivity():
             self.adb.touchByRatio(ratioWidth=0.5, ratioHigh=0.36)
             self.adb.sendText(mobile)
             self.adb.touchByRatio(ratioWidth=0.5, ratioHigh=0.48)
             self.adb.sendText('8888')
             if self.device_id in ('CLB7N18709015438', ''):
                 self.adb.touchByRatio(ratioWidth=0.5, ratioHigh=0.8088)
             elif self.device_id in ('621QACQS55GQQ', ''):
                 self.adb.touchByRatio(ratioWidth=0.5, ratioHigh=0.59)
             else:
                 self.adb.touchByRatio(ratioWidth=0.5, ratioHigh=0.56)
             sleep(2)
         self.adb.touchByRatio(ratioWidth=0.69, ratioHigh=0.92)
         if self.adb.getCurrentActivity(
         ) == "com.dnkj.chaseflower.ui.weather.activity.WeatherHomeActivity":
             log.info("login successfully")
     elif self.adb.getCurrentActivity(
     ) == "com.dnkj.chaseflower.ui.weather.activity.WeatherHomeActivity":
         log.info("already logged")
Пример #6
0
    def get_group_template_id(self):
        """
        Method used to get the the group template id. Used to associate a template to the templates group.

        :return: returns the template group id
        """
        group_template_id = None
        payload = {"jsonrpc": "2.0",
                   "method": "hostgroup.get",
                   "params": {
                       "output": "extend",
                       "filter": {
                           "name": [
                               "Templates"
                           ]
                       }
                   },
                   "auth": self.api_auth,
                   "id": 1
        }
        response = self.contact_zabbix_server(payload)
        
        for item in response['result']:
            group_template_id = item['groupid']
        log.info("Getting Template Group id ... \n Template Group id:%s" %group_template_id)
        return group_template_id
Пример #7
0
def upload_this_node_info():
    # flask_app = create_app('development')
    # flask_context = flask_app.app_context()
    # flask_context.push()

    # delete_the_node_info_conflict_with_me()
    node_info = generate_node_info()

    info_db = Node(ip_addr=node_info.get("ip_addr", None),
                   host_name=node_info.get("host_name", None),
                   node_type=node_info.get("node_type", None),
                   max_ps=node_info.get("max_ps", 10),
                   cur_ps=node_info.get("cur_ps", 0),
                   status=node_info.get("status", 0),
                   ps_id=node_info.get("ps_id", 0))

    db.session.add(info_db)
    try:
        db.session.commit()
    except ProgrammingError as e:
        log.error(e)
    except IntegrityError as e:
        db.session.rollback()
        log.error(e)
    except DataError as e:
        log.error(e)
    else:
        print(f"Get the node id: {info_db.id}")
        log.info(f"Upload new node id: {info_db.id}")
        node_running_info.update({"node_id": info_db.id})
        print(node_running_info)
    finally:
        pass
Пример #8
0
    def em(self, corpus, iterations, parser_class = Parser, mode = "forward"):
        """
        Run EM training on the provided corpus for a given number of iterations.
        Mode can be "forward" (parse first RHS, normalize weights by 
        LHS + second RHS if any), or synchronous" (parse both sides at the same
        time, weights normalized by LHS only)
        """
        normalization_groups = {}
      
        if mode == "synchronous" or isinstance(corpus[0],tuple) :
            for r in self:             
                normalization_groups[r] = self[r].symbol
            bitext = True
        elif mode == "forward":
            if type(self[self.keys()[0]].rhs2) is list:
                for r in self:             
                    normalization_groups[r] = (self[r].symbol, tuple(self[r].rhs2))
            else:
                for r in self:
                    normalization_groups[r] = (self[r].symbol, self[r].rhs2) 
        
            bitext = False 
        self.normalize_by_groups(normalization_groups)

        for i in range(iterations):
            ll = self.em_step(corpus, parser_class, normalization_groups, bitext = bitext)
            log.info("Iteration %d, LL=%f" % (i, ll))
Пример #9
0
 def break_clusters(self, clusters, *args, **kwargs):
     log.debug('Breaking clusters with:\n{}'.format(str(self)))
     result = []
     for i, cluster in enumerate(clusters):
         if self.to_break(cluster):
             try:
                 sub_clusters = self.break_cluster(cluster, *args, **kwargs)
                 if not sub_clusters:
                     log.warn('Cluster {} not broken'.format(cluster.id))
                     result.append(cluster)
                 else:
                     log.info(
                         'Breaking cluster {} into {} sub_clusters'.format(
                             cluster.id, len(sub_clusters)))
                     result.extend(sub_clusters)
             except (lpinterface.NoSolutionsError, UnboundLocalError,
                     TypeError, ValueError) as e:
                 log.error(
                     'Cluster breaking failed for cluster {} - see log'.
                     format(cluster.id))
                 log.debug(sys.exc_info())
                 result.append(cluster)
         else:
             result.append(cluster)
     return result
Пример #10
0
 def get_device_list(self):
     log.info("Gets the device ID that is now connected")
     devicestr = list(os.popen('adb devices').readlines())
     devicestr.pop()
     devicestr.pop(0)
     devicelist = [re.findall(r'^\w*\b', i)[0] for i in devicestr]
     return devicelist
Пример #11
0
    def nova_callback(self, ch, method, properties, body):
        """
        Method used by method nova_amq() to filter messages by type of message.

        :param ch: refers to the head of the protocol
        :param method: refers to the method used in callback
        :param properties: refers to the proprieties of the message
        :param body: refers to the message transmitted
        """
        payload = json.loads(body)
        try:
            tenant_name = payload['_context_project_name']
            type_of_message = payload['event_type']
            
            if type_of_message == 'compute.instance.create.end':
                instance_id = payload['payload']['instance_id']
                instance_name = payload['payload']['hostname']
                self.zabbix_handler.create_host(instance_name, instance_id, tenant_name)
                log.info("Creating a host in Zabbix Server :%s" %(instance_name+"-"+instance_id))

            elif type_of_message == 'compute.instance.delete.end':
                host = payload['payload']['instance_id']
                try:
                    host_id = self.zabbix_handler.find_host_id(host)
                    self.zabbix_handler.delete_host(host_id)
                    log.info("Deleting host from Zabbix Server %s " %host_id )
                except Exception,e:
                    log.error(str(e))    # TODO
        except Exception,e:
             print e
             log.error(str(e))    # TODO
Пример #12
0
 def query_ce_se(self):
     log.debug("Querying the following MyOSG URL: %s" % \
         self.resource_group_url)
     fd = urllib2.urlopen(self.resource_group_url)
     dom = parse(fd)
     ses = set()
     ces = set()
     for service_dom in dom.getElementsByTagName("Service"):
         service_type = None
         for name_dom in service_dom.getElementsByTagName("Name"):
             try:
                 service_type = str(name_dom.firstChild.data).strip()
             except:
                 pass
         uri = None
         for uri_dom in service_dom.getElementsByTagName("ServiceUri"):
             try:
                 uri = str(uri_dom.firstChild.data).strip()
             except:
                 pass
         if uri and service_type:
             if service_type == 'SRMv2':
                 ses.add(uri)
             elif service_type == 'CE':
                 ces.add(uri)
     log.debug("OIM returned the following CEs: %s." % ", ".join(ces))
     log.debug("OIM returned the following SEs: %s." % ", ".join(ses))
     log.info("OIM returned %i CEs and %i SEs" % (len(ces), len(ses)))
     self.ces_results, self.ses_results = ces, ses
     return len(ces), len(ses)
Пример #13
0
 def set_node(self, node):
     log.info('Setting node "{}"'.format(node['alias']))
     url = self.url('set_net_accel') + \
         '?id={}&type=other'.format(node['id'])
     r = self.session.get(url)
     r.raise_for_status()
     return not r.json()['code']
Пример #14
0
    def get(self):
        article_id = self.get_argument('article_id', None)

        log.info('获取文章信息:article_id ' + article_id)

        session = DBSession()
        article = session.query(Article).filter_by(id=article_id).first()
        if not article:
            return self.finish(json.dumps({'code': -1, 'msg': '该文章不存在'}))

        result = {
            'id':
            article.id,
            'class_id':
            article.class_id,
            'image_url':
            (domain_name + article.image_url) if article.image_url else '',
            'title':
            article.title,
            'note':
            article.note,
            'content':
            article.content
        }

        return self.finish(json.dumps({'code': 0, 'data': result}))
 def query_missing(self):
     now = time.time()
     log.info("Querying %i missing data entries." % len(self.missing))
     for mtime in self.missing:
         starttime = mtime
         endtime = mtime + datetime.timedelta(0, 3600)
         results = self.query_transfers(starttime, endtime)
         if not results:
             log.warning("No transfer results found for %s." % starttime)
         for result in results:
             res_time, count, volume_mb = result
             res_time = float(res_time)
             starttime = self._timestamp_to_datetime(res_time)
             if now-res_time >= 3600:
                 endtime = self._timestamp_to_datetime(res_time+3600)
             else:
                 endtime = self._timestamp_to_datetime(now)
             if res_time > now:
                 continue
             td = TransferData()
             td.starttime = starttime
             td.endtime = endtime
             td.count = count
             td.volume_mb = volume_mb
             self.data[starttime] = td
             log.debug("Successfully parsed results for %s." % starttime)
             self.save_cache()
    def query_jobs(self):
        params = self.get_params()

        response = gracc_query_jobs(self.es, jobs_summary_index, **params)

        results = response.aggregations.EndTime.buckets

        all_results = [ (x.Records.value or x.doc_count,
                         x.CoreHours.value,
                         x.key / 1000) for x in results ]

        log.info("GRACC returned %i results for daily jobs" % len(all_results))
        log.debug("Job result dump:")
        for count, hrs, epochtime in all_results:
            time_tuple = time.gmtime(epochtime)
            time_str = time.strftime("%Y-%m-%d %H:%M", time_tuple)
            log.debug("Day %s: Jobs %i, Job Hours %.2f" %
                (time_str, count, hrs))
        count_results = [i[0] for i in all_results]
        hour_results = [i[1] for i in all_results]
        num_results = int(self.cp.get("GRACC", "days"))
        count_results = count_results[-num_results-1:-1]
        hour_results = hour_results[-num_results-1:-1]
        self.count_results, self.hour_results = count_results, hour_results
        return count_results, hour_results
Пример #17
0
def save_voxel_to_D_row(v2Drow, fname):
    log.info("Saving total roimarks to cache file: %s" % fname)
    bnint = struct.pack("i", np.prod(v2Drow.shape))
    fout = open(fname, "wb")
    fout.write(bnint)
    v2Drow.tofile(fout)
    fout.close()
Пример #18
0
    def deploy(self, nodes):
        '''Warning: All old ss nodes in the router will be deleted'''
        '''警告: 部署时将删除路由器中所有旧SS节点'''

        log.info('Deploying new SS nodes')

        profiles = list()
        i = 0
        for node in nodes:
            i += 1
            profiles.append(self.format_node(i, node))
        profiles = '\n\n'.join(profiles)
        new_config = '\n\n'.join([self.base_config, profiles])

        s = None
        for line in new_config.split('\n'):
            s = '>>' if s else '>'
            log.info('... \t{}'.format(line))
            self.clinet.exec_command('echo "{l}" {s} {p}.tmp'.format(
                l=line,
                s=s,
                p=self.config['config_path']))
        self.clinet.exec_command(
            'mv {p} {p}.bak'.format(p=self.config['config_path']))
        self.clinet.exec_command(
            'mv {p}.tmp {p}'.format(p=self.config['config_path']))
Пример #19
0
    def em(self, corpus, iterations, parser_class=Parser, mode="forward"):
        """
        Run EM training on the provided corpus for a given number of iterations.
        Mode can be "forward" (parse first RHS, normalize weights by 
        LHS + second RHS if any), or synchronous" (parse both sides at the same
        time, weights normalized by LHS only)
        """
        normalization_groups = {}

        if mode == "synchronous" or isinstance(corpus[0], tuple):
            for r in self:
                normalization_groups[r] = self[r].symbol
            bitext = True
        elif mode == "forward":
            if type(self[self.keys()[0]].rhs2) is list:
                for r in self:
                    normalization_groups[r] = (self[r].symbol,
                                               tuple(self[r].rhs2))
            else:
                for r in self:
                    normalization_groups[r] = (self[r].symbol, self[r].rhs2)

            bitext = False
        self.normalize_by_groups(normalization_groups)

        for i in range(iterations):
            ll = self.em_step(corpus,
                              parser_class,
                              normalization_groups,
                              bitext=bitext)
            log.info("Iteration %d, LL=%f" % (i, ll))
Пример #20
0
    def __init__(self, section=None, **config):
        super(HiWiFi, self).__init__(section, ints=['port'], **config)

        log.info('ssh {0[username]}@{0[hostname]} -p {0[port]}'.format(
            self.config))

        self.clinet = paramiko.SSHClient()
        self.clinet.set_missing_host_key_policy(paramiko.AutoAddPolicy())
        self.clinet.connect(
            hostname=self.config['hostname'],
            username=self.config['username'],
            password=self.config['password'],
            port=self.config['port'])

        log.info('Getting base SS config')
        stdin, stdout, stderr = self.clinet.exec_command(
            'cat {}'.format(self.config['config_path']))
        current_config = ''.join(stdout)
        m = re.search(
            r'^config\s+interface\s+\'ssgoabroad\'$\n(?:^\s*option\s+.*$\n?)*',
            current_config, re.M)

        if m:
            self.base_config = m.group()
        else:
            self.base_config = ''
Пример #21
0
def stop_node(node_list, platon_dpos):
    auto = AutoDeployPlaton()
    del node_list[0]
    del node_list[9]
    del node_list[0]
    while True:
        time.sleep(random.randrange(100, 5000))
        verfier_info_list = platon_dpos.GetVerifiersList()
        num = len(verfier_info_list)
        if num <= 1:
            continue
        f = int((num - 1) / 3)
        if f < 2:
            continue
        stop_node_list = random.sample(node_list, random.randrange(1, f))
        q.put(1)
        log.info("关闭节点:{}".format(stop_node_list))
        number = platon_dpos.web3.eth.blockNumber
        sleep_time = get_sleep_time(number)
        auto.kill_of_list(stop_node_list)
        if sleep_time > 21:
            time.sleep(random.randrange(1, sleep_time - 20))
        log.info("恢复节点:{}".format(stop_node_list))
        auto.restart_list(stop_node_list)
        q.get()
Пример #22
0
def update_task_info(task_id, field, value):
    # flask_app = create_app('development')
    # flask_context = flask_app.app_context()
    # flask_context.push()

    SpiderTask.query.filter_by(id=task_id).update({field: value})

    try:
        db.session.commit()
    except ProgrammingError as e:
        log.error(e)
    except IntegrityError as e:
        db.session.rollback()
        log.error(e)
    except DataError as e:
        log.error(e)
    else:
        log.info(
            f"Update spider_task: id: {task_id}, field: {field}, value: {value}"
        )
        print(
            f"Update spider_task: id: {task_id}, field: {field}, value: {value}"
        )
    finally:
        pass
Пример #23
0
 def set_node(self, node):
     log.info('Setting node "{}"'.format(node['alias']))
     url = self.url('set_net_accel') + \
         '?id={}&type=other'.format(node['id'])
     r = self.session.get(url)
     r.raise_for_status()
     return not r.json()['code']
Пример #24
0
def process_pair(pair, image_dir: Path, curr_idx, len_pairs):
    """Pair is a list of two parsed filenames (see the function parse_filename below).
    Given these two files, use Scene to load the appropriate channels.
    Then save these original channels (.png and optionally .nc files).
    Then resample (colocate) to make the sensor channels match up.
    Then save these colocated channels.
    Crop the NaN edges, tag with meta information (which files were used as input),
    And finally save the numpy arrays (so we don't need to recompute next time)"""
    log.info(f'{rgb(255,0,0)}Processing{reset} timestep {bold}{curr_idx + 1}/{len_pairs}{reset}')
    dt = pair[0]["datetime"]
    log.info(f'Colocating {blue}{dt}{reset}')
    scn = Scene(reader='viirs_sdr', filenames=[f['path'] for f in pair])
    scn.load(all_channels + lat_long_both + lunar_data)
    #save_datasets(scn, 'ORIGINAL_', str(image_dir))

    log.info(f'Resampling {blue}{dt}{reset}')
    resample_scn = scn.resample(scn['DNB'].attrs['area'], resampler='nearest')

    log.info(f'Saving images {blue}{dt}{reset}')
    t = time.time()
    save_datasets(resample_scn, 'COLOCATED_', str(image_dir))
    log.debug(f'Saving images took {rgb(255,0,0)}{time.time() - t:.2f}{reset} seconds')

    log.info(f'Cropping nan edges of {blue}{dt}{reset}')
    t = time.time()
    data = crop.crop_nan_edges(resample_scn)
    log.debug(f'Cropping nan edges took {rgb(255,0,0)}{time.time() - t:.2f}{reset} seconds')

    data['channels'] = list(data)
    data['filenames'] = [f['filename'] for f in pair]
    data["datetime"] = dt
    return data
Пример #25
0
    def define_item(self, template_id, item, value_type):
        """
        Method used to define the items parameters

        :param template_id:
        :param item:
        :param value_type:
        :return: returns the json message to send to zabbix API
        """
        log.info("Creating item :%s" %item)
        payload = {"jsonrpc": "2.0",
                   "method": "item.create",
                   "params": {
                       "name": item,
                       "key_": item,
                       "hostid": template_id,
                       "type": 2,
                       "value_type": value_type,
                       "history": "90",
                       "trends": "365",
                       "units": "",
                       "formula": "1",
                       "lifetime": "30",
                       "delay": 10
                   },
                   "auth": self.api_auth,
                   "id": 1}

        return payload
Пример #26
0
 def vote_ticket(self):
     '''投票'''
     platon_dpos = PlatonDpos(self.node_list[1]["url"],
                              self.address_vote,
                              self.pwd,
                              abi=self.abi,
                              privatekey=self.private_vote)
     while True:
         node_dict = random.sample(self.node_dict.keys(),
                                   random.randrange(5, self.node_number))
         log.info("选出的投票节点:{}".format(node_dict))
         candidate_list = self.get_candidate_list()
         # log.info("可投票节点:{}".format(candidate_list))
         for node in node_dict:
             try:
                 if self.node_dict[node]["id"] not in candidate_list:
                     # log.info("不可投票:{}".format(self.node_dict[node]["id"]))
                     continue
                 allowed = random.randrange(1, self.allowed * 3)
                 vote_info, _ = platon_dpos.VoteTicket(
                     allowed, self.ticket_price, self.node_dict[node]['id'],
                     self.address_vote, allowed * self.ticket_price)
                 assert 'success' in vote_info['VoteTicketEvent'][
                     0], '投票失败,结果:{}'.format(vote_info)
                 log.info("投票成功:{},投票数:{}".format(node, allowed))
             except Exception as e:
                 log.error("投票节点:{}-{}".format(node, e))
         time.sleep(random.randrange(20, 200))
Пример #27
0
    def send_order(self, direction, action, type, symbol, price, amount, client_order_id=None):
        """提交委托"""
        exchange_symbol = self.__trans_symbol(symbol)

        binance_side = self._trans_side(direction, action)
        if binance_side is None:
            return
        binance_type = self.__trans_type(type)
        if binance_type is None:
            return

        log.info('send order: pair(%s), side(%s), type(%s), price(%f), amount(%f)' % (exchange_symbol, binance_side, binance_type, price, amount))
        ret = self.__client.create_order(symbol=exchange_symbol, side=binance_side, type=binance_type,
            timeInForce=TIME_IN_FORCE_GTC, price=price, quantity=amount)
        log.debug(ret)
        try:
            if ret['orderId']:

                #if ret['fills']:

                # self.debug('Return buy order ID: %s' % ret['orderId'])
                return ret['orderId']
            else:
                # self.debug('Place order failed')
                return None
        except Exception:
            # self.debug('Error result: %s' % ret)
            return None
Пример #28
0
 def test_verifiers(self):
     '''
     @Description: 验证人为候选人列表的前四,幸运票奖励发放
     @param {type} @@@@
     @return: @@@@
     '''
     before_reward = self.platon_dpos1.eth.getBalance(self.new_address)
     candidate_list = self.platon_dpos1.GetCandidateList()
     log.info("新的的入围节点列表:{}".format(candidate_list))
     candidate_id = [i['CandidateId'] for i in candidate_list]
     block_number = self.platon_dpos1.eth.blockNumber
     sleep_time = get_sleep_time(block_number)
     time.sleep(sleep_time)
     verfier_info_list = self.platon_dpos1.GetVerifiersList()
     log.info("验证人列表:{}".format(verfier_info_list))
     assert len(verfier_info_list) > 0,"查询结果异常,验证人列表为空。"
     verfier_list = [info['CandidateId'] for info in verfier_info_list]
     assert verfier_list == candidate_id[:4], "验证人没有取候选人的前四,验证人id:{}\n候选人id:{}".format(verfier_list,
                                                                                         candidate_list[:4])
     status = 1
     for verfier in verfier_list:
         if self.nodeid_dict['node4'] in verfier:
             status = 0
     assert status, '投票不达到门槛不能成为见证人'
     # 校验幸运选票是否发放奖励
     after_reward = self.platon_dpos1.eth.getBalance(self.new_address)
     assert after_reward > before_reward, '见证人奖励未发放,发放前余额:{},发放后余额:{}'.format(
         before_reward, after_reward)
Пример #29
0
 def test_init_token(self):
     '''
     验证链初始化后token各内置账户初始值
     :return:
     '''
     url = CommonMethod.link_list(self)
     platon_ppos = Ppos(url, self.address, self.chainid)
     FOUNDATION = platon_ppos.eth.getBalance(
         Web3.toChecksumAddress(conf.FOUNDATIONADDRESS))
     FOUNDATIONLOCKUP = platon_ppos.eth.getBalance(
         Web3.toChecksumAddress(conf.FOUNDATIONLOCKUPADDRESS))
     STAKING = platon_ppos.eth.getBalance(
         Web3.toChecksumAddress(conf.STAKINGADDRESS))
     INCENTIVEPOOL = platon_ppos.eth.getBalance(
         Web3.toChecksumAddress(conf.INCENTIVEPOOLADDRESS))
     DEVELOPERS = platon_ppos.eth.getBalance(
         Web3.toChecksumAddress(conf.DEVELOPERSADDRESS))
     token_init_total = conf.TOKENTOTAL
     if self.initial_amount['FOUNDATION'] != FOUNDATION:
         log.info("基金会初始金额:{}有误".format(FOUNDATION))
     elif self.initial_amount['FOUNDATIONLOCKUP'] != FOUNDATIONLOCKUP:
         log.info("基金会锁仓初始金额:{}有误".format(FOUNDATIONLOCKUP))
     elif self.initial_amount['STAKING'] != STAKING:
         log.info("质押账户初始金额:{}有误".format(STAKING))
     elif self.initial_amount['INCENTIVEPOOL'] != INCENTIVEPOOL:
         log.info("奖励池初始金额:{}有误".format(INCENTIVEPOOL))
     elif self.initial_amount['DEVELOPERS'] != DEVELOPERS:
         log.info("预留账户初始金额:{}有误".format(DEVELOPERS))
     reality_total = FOUNDATION + FOUNDATIONLOCKUP + STAKING + INCENTIVEPOOL + DEVELOPERS
     assert token_init_total == (
         reality_total), "初始化发行值{}有误".format(reality_total)
Пример #30
0
def normalize(db_path: Path):
    """Load case.npz, calculate normalized channels,
    then save all channel data out to case_norm.npz."""
    case_file = db_path.parent / 'case.npz'
    log.info(f'Loading {blue}{case_file.name}{reset}')
    with np.load(case_file) as f:
        case = dict(f)
Пример #31
0
    def check_instances(self):
        """
        Method used to verify existence of an instance / host

        """
        log.info("Check platform all VMs ...")
        servers = None
        tenant_id = None
        for item in self.group_list:
            tenant_name = item[0]
            if tenant_name == 'admin':
                tenant_id = item[1]

        auth_request = urllib2.Request(
            "http://" + self.nova_api_host + ":" + self.nova_api_port + "/v2/" + tenant_id +
            "/servers/detail?all_tenants=1")

        auth_request.add_header('Content-Type', 'application/json;charset=utf8')
        auth_request.add_header('Accept', 'application/json')
        auth_request.add_header('X-Auth-Token', self.token)
        try:
            auth_response = urllib2.urlopen(auth_request)
            servers = json.loads(auth_response.read())
        except urllib2.HTTPError, e:
            if e.code == 401:
                print '401'
                print '401:Check your keystone credentials\nToken refused!'
            elif e.code == 404:
                print '404:not found'
            elif e.code == 503:
                print '503:service unavailable'
            else:
                print 'unknown error: '
Пример #32
0
def postprocess(args):
    npz_path = Path(args.npz_path).resolve()
    model_path = Path(args.model_path).resolve()
    nick = args.nick
    log.info("starting MLR postprocessing")
    out_dir = ensure_outputdir(model_path, npz_path, nick)
    f = np.load(npz_path)
    shape = f['DNB_norm'].shape
    # make predictor/predictand arraysand flatten for use
    X = np.stack([f[key].flatten() for key in predict_channels], axis=-1)
    Y = np.stack([f[key].flatten() for key in predictand_channels], axis=-1)

    with open(model_path, 'rb') as g:
        model = pickle.load(g)
    MLR_truths = model.predict(X)
    metdict, denormed = {}, {}
    for i, c in enumerate(predictand_channels):
        Ycol = Y[:, i]
        MLRcol = MLR_truths[:, i]
        process_channel(Ycol, MLRcol, c, out_dir, nick, metdict, shape,
                        denormed)

    p = predictand_channels[0] if len(predictand_channels) == 1 else 'ALL'
    np.savez(out_dir / f'{nick}_MLR_{p}_denormed_true_pred.npz', **denormed)
    with open(out_dir / f'{nick}_MLR_{p}_postprocess_eventlog.txt', 'w') as f:
        x = Path(npz_path).resolve().parent.name
        print(datetime.now(), file=f)
        print(x, file=f)
        print(nick, file=f)
        pprint.pprint(metdict, stream=f)
Пример #33
0
    def get_tenants(self):
        """
        Method used to get a list of tenants from keystone

        :return: list of tenants
        """
        log.info("Initnating you tenants ....")
        tenants = None
        auth_request = urllib2.Request('http://' + self.keystone_host + ':'+self.keystone_admin_port+'/v2.0/tenants')
        auth_request.add_header('Content-Type', 'application/json;charset=utf8')
        auth_request.add_header('Accept', 'application/json')
        auth_request.add_header('X-Auth-Token', self.token)

        try:
            auth_response = urllib2.urlopen(auth_request)
            tenants = json.loads(auth_response.read())
        except urllib2.HTTPError, e:
            if e.code == 401:
                print '401'
                print '401:Check your keystone credentials\nToken refused!'
            elif e.code == 404:
                print '404:not found'
            elif e.code == 503:
                print '503:service unavailable'
            else:
                print 'unknown error: '
Пример #34
0
    def check_host_groups(self):
        """
        This method checks if some host group exists

        """
        for item in self.group_list:
            tenant_name = item[0]
            payload = {
                "jsonrpc": "2.0",
                "method": "hostgroup.exists",
                "params": {
                    "name": tenant_name
                },
                "auth": self.api_auth,
                "id": 1
            }
            response = self.contact_zabbix_server(payload)
            if response['result'] is False:
                log.warn("Host Group %s does not existed, Creating ... " %tenant_name)
                payload = {"jsonrpc": "2.0",
                           "method": "hostgroup.create",
                           "params": {"name": tenant_name},
                           "auth": self.api_auth,
                           "id": 2}
                self.contact_zabbix_server(payload)
            else:
                log.info("Host Group %s has already existed ..." %tenant_name)
Пример #35
0
def Cancel(config, jobid):
    """
    Cancel a job. The TERM signal is sent to allow the process to terminate
    gracefully within 5 seconds, followed by a KILL signal.

    :param str config: path to arc.conf
    :param str jobid: local job ID
    :return: ``True`` if successfully cancelled, else ``False``
    :rtype: :py:obj:`bool`
    """

    debug('----- starting forkCancel.py -----', 'fork.Cancel')

    configure(config)
    if Config.remote_host:
        ssh_connect(Config.remote_host, Config.remote_user, Config.private_key)

    info('Killing job with pid %s' % jobid, 'fork.Cancel')
    if not Config.remote_host:
        import signal
        try:
            os.kill(jobid, signal.SIGTERM)
            time.sleep(5)
            os.kill(jobid, signal.SIGKILL)
        except OSError:
            # Job already died or terminated gracefully after SIGTERM
            pass
        except:
            return False
    else:
        args = 'kill -s TERM %i; sleep 5; kill -s KILL %i' % (jobid, jobid)
        handle = execute_remote(args)

    debug('----- exiting forkCancel.py -----', 'fork.Cancel')
    return True
Пример #36
0
    def __init__(self,
                 blasr=blasr_wrapper.BlasrWrapper(),
                 consensus_builder=None,
                 allele_db=None,
                 filter_function=None,
                 result_filter=None):
        if allele_db:
            self.allele_db = allele_db
        else:
            # import create_allele_db
            self.allele_db = '../database/Complete.Human.IGHV_IMGT.Feb2018.Corey.linear.modified.fasta'  # TODO implement either fetching the default database file (instead of hard coded) or make a getter that creates a temporary fasta of the allele_db loaded from create allele_db
        log.info('Using the following allele database:\n' + self.allele_db)
        self.description.append('Allele database: ' + self.allele_db)

        if blasr:
            self.blasr = blasr
        else:
            import blasr_wrapper
            self.blasr = blasr_wrapper.BlasrWrapper()
        if filter_function:
            log.info('Setting blasr result filter function')
            self.filter_function = filter_function
        else:
            self.filter_function = lambda y: y.score  # key that returns selection criteria to choose the best hit for the blasr mapping of consensus to allele database. Results will be sorted in ascending order using this key
        if consensus_builder:
            self.consensus_builder = consensus_builder
Пример #37
0
    def place(self, order_params):
        """
        This function actually sends a request to GDAX and places the order.

        dict order_params: side + order_type specific arguments.

        Returns the order ID (if the order went through) and the full response object.
        """

        # Prevent multiple invocations with the same OID.
        if self.oid() is not None:
            return self.oid()

        # Common params across all orders
        # https://docs.gdax.com/?python#orders
        data = {
                'side': self.__side,
                'type': self.__order_type,
                'product_id': self.__product,
            }
        data.update(order_params)

        log.info('placing ORDER')
        self.__resp = httpapi.post(
                common.api_url + 'orders',
                data=json.dumps(data),
                auth=common.auth,
            )

        return self.oid(), self.__resp
Пример #38
0
    def get_next_consensus_wheel(self, number=1, file=conf.PLATON_CONFIG_PATH):
        '''
        获取下个共识轮
        :param :
        :return:
        '''
        data = LoadFile(file).get_data()
        Interval = data['EconomicModel']['Common']['Interval']
        PerRoundBlocks = data['EconomicModel']['Common']['PerRoundBlocks']
        ValidatorCount = data['EconomicModel']['Common']['ValidatorCount']
        ConsensusSize = Interval * PerRoundBlocks * ValidatorCount
        ConsensusSize = ConsensusSize * number
        url = CommonMethod.link_list(self)
        platon_ppos = Ppos(url, self.address, self.chainid)
        current_block = platon_ppos.eth.blockNumber
        differ_block = ConsensusSize - (current_block % ConsensusSize)
        current_end_block = current_block + differ_block
        log.info('当前块高:{} ,下个共识轮周期结束块高:{}'.format(current_block,
                                                  current_end_block))

        while 1:
            time.sleep(self.time_interval)
            current_block = platon_ppos.eth.blockNumber
            differ_block = ConsensusSize - (current_block % ConsensusSize)
            log.info('当前块高度:{},还差块高:{}'.format((current_block), differ_block))
            if current_block > current_end_block:
                break
Пример #39
0
 def download(self, url):
     resp = requests.get(url, headers={'User-Agent': ua.get_ua()})
     if resp.status_code == 200:
         log.info('GET %s 200 OK' % url)
         self.parse(resp.text)
     else:
         log.error('Error %s %s ' % (url, resp.status_code))
Пример #40
0
def airflow_db_ready():
    from sqlalchemy.orm import Session
    from airflow import settings
    from airflow.utils import db

    def check_db():
        with db.create_session() as session:
            db.check(session)

    last_ex = None
    for i in range(ZARIFLOW_DB_WAIT_TRIES):
        try:
            check_db()
            log.info("DB ready!")
            # flush log
            sleep(0.010)
            return True
        except Exception as ex:
            log.info(
                f"DB not ready, waiting {ZARIFLOW_DB_WAIT_INTERVAL} [s] before reattempt ({i}/{ZARIFLOW_DB_WAIT_TRIES})"
            )
            last_ex = ex
            sleep(ZARIFLOW_DB_WAIT_INTERVAL)

    raise Exception("Database not ready", last_ex)
Пример #41
0
    def input(self, fname=None, subfolder=None, check=True):
        """ Jeżeli plik fname istnieje w folderze rootfolder i podfolderze subfolder,
        to jest bezpośrednio do niego zwracany uchwyt. Jak nie istnieje to plik jest kopiowany na podstawie konfiguracji
        rassdataconfig.

        :param fname: nazwa pliku
        :param subfolder: nazwa podkatalogu w folderze input
        :return: pełna ścieżka do pliku
        """
        file = self._get_folder("input", fname, subfolder)
        
        if check:
            if not os.path.isfile(file):
                if fname in self.data["files"]:
                    sourcefile = self.data["files"][fname]["sourcefile"]
                    if os.path.exists(sourcefile) and os.path.isfile(sourcefile):
                        shutil.copy(sourcefile, file)
                    else:
                        log.error("The sourcefile %s defined in %s file for %s does not exist." % (sourcefile, self.rassdata_configfile, fname))

            if not os.path.isfile(file) and not os.path.isdir(file):
                log.info("The file %s does not exist and is not defined in %s file." % (file, self.rassdata_configfile))
                file = "File %s does not exist and is not defined in rassdata.json" % fname

        return file
Пример #42
0
def rebuild(prediction, dims_desired):
    output = np.zeros(dims_desired)
    patches = all_patches(256, *dims_desired)
    log.info(f"patches leng is {len(patches)} and predict is {len(prediction)}")
    for i in range(len(patches)): 
        output[patches[i]]=prediction[i]
    return output
 def query_ce_se(self):
     log.debug("Querying the following MyOSG URL: %s" % \
         self.resource_group_url)
     fd = urllib2.urlopen(self.resource_group_url)
     dom = parse(fd)
     ses = set()
     ces = set()
     for service_dom in dom.getElementsByTagName("Service"):
         service_type = None
         for name_dom in service_dom.getElementsByTagName("Name"):
             try:
                 service_type = str(name_dom.firstChild.data).strip()
             except:
                 pass
         uri = None
         for uri_dom in service_dom.getElementsByTagName("ServiceUri"):
             try:
                 uri = str(uri_dom.firstChild.data).strip()
             except:
                 pass
         if uri and service_type:
             if service_type == 'SRMv2':
                 ses.add(uri)
             elif service_type == 'CE':
                 ces.add(uri)
     log.debug("OIM returned the following CEs: %s." % ", ".join(ces))
     log.debug("OIM returned the following SEs: %s." % ", ".join(ses))
     log.info("OIM returned %i CEs and %i SEs" % (len(ces), len(ses)))
     self.ces_results, self.ses_results = ces, ses
     return len(ces), len(ses)
Пример #44
0
 def test_candidate_cap(self):
     '''
     @Description: 候选人列表容量
     @param {type} @@@@
     @return: @@@@
     '''
     status = 0
     candidate_info = self.platon_dpos1.GetCandidateDetails(
         self.nodeid_dict['node5'])
     result_1 = self.platon_dpos1.CandidateDeposit(self.nodeid_dict['node1'], self.new_address, self.fee,
                                                   self.ip_dict['node1'],
                                                   self.port_dict['node1'], self.extra, value=131)
     self.check_event(result_1)
     result_2 = self.platon_dpos1.CandidateDeposit(self.nodeid_dict['node2'], self.new_address, self.fee,
                                                   self.ip_dict['node2'],
                                                   self.port_dict['node2'], self.extra, value=135)
     self.check_event(result_2)
     result_3 = self.platon_dpos1.CandidateDeposit(self.nodeid_dict['node3'], self.new_address, self.fee,
                                                   self.ip_dict['node3'],
                                                   self.port_dict['node3'], self.extra, value=135)
     self.check_event(result_3)
     result_4 = self.platon_dpos1.CandidateDeposit(self.nodeid_dict['node4'], self.new_address, self.fee,
                                                   self.ip_dict['node4'],
                                                   self.port_dict['node4'], self.extra, value=140)
     self.check_event(result_4)
     candidate_list = self.platon_dpos1.GetCandidateList()
     log.info("入围节点列表:{}".format(candidate_list))
     assert candidate_info not in candidate_list, "node5还在候选人列表中"
Пример #45
0
 def split_result_database_str(output):
     databases = output.replace('"', '').split(os.linesep)
     if 'DATABASE_NAME' in databases:
         databases.remove('DATABASE_NAME')
     databases = [name.lower() for name in databases]
     log.info('split_result_database_str is:{}'.format(databases))
     return databases
Пример #46
0
 def ping_timer(self):
     while not self._stop.isSet():
         if self.job.is_done():
             self.session.close()
             self.stop()
         else:
             info(self.ping())
         time.sleep(PING_INTERVAL)
Пример #47
0
def cmd_args_parse():
    #Currently script_name secondary_ir lineno
    log.info("Command args:"+str(sys.argv))
    if(len(sys.argv)!=3):#Script+InputName+Lineno
        log.error("Command args number:"+str(len(sys.argv)))
        sys.exit("Improper arguments")
    else:
        #Return input_file_name , Lineno
        return [sys.argv[1],int(sys.argv[2])]
    return
Пример #48
0
    def first_run(self):
        log.info("Platform first Run, Now Collecting info ,Wait ....")
        self.api_auth = self.get_zabbix_auth()
        self.proxy_id = self.get_proxy_id()
        self.template_id = self.get_template_id()

        tenants = self.get_tenants()
        self.group_list = self.host_group_list(tenants)
        self.check_host_groups()
        self.check_instances()
Пример #49
0
    def __init__(self, rabbit_host, rabbit_user, rabbit_pass, zabbix_handler):

        """
        TODO
        :type self: object
        """
        self.rabbit_host = rabbit_host
        self.rabbit_user = rabbit_user
        self.rabbit_pass = rabbit_pass
        self.zabbix_handler = zabbix_handler
        log.info('Nova listener started ... ')
Пример #50
0
def runbd(lmc, l, v, tpsolver):
    tmp = tempfile.NamedTemporaryFile(delete=False, dir="lmcs")
    tmp.write(lmc)
    tmp.close()
    vstring = "-v" if v else ""
    log.info("./bdsolver -l " + str(l) + " " + vstring + " -tpsolver " + tpsolver + " file: " + tmp.name)

    try:
        return subprocess.check_output(["./bdsolver", "-l", str(l), vstring, "-tpsolver", tpsolver, tmp.name])
    except Exception, e:
        return str(e) + "<br><br> Maybe there is something wrong with your lmc or lambda value? Are you sure the lmc is stochastic?"
Пример #51
0
    def set_sys_proxy(self, mode):
        pipe = subprocess.Popen(
            [self.HELPER_PATH, mode],
            stdin=subprocess.PIPE,
            stdout=subprocess.PIPE)
        r = pipe.wait()
        if r or self.sys_proxy_status != mode:
            raise Exception('Failed to modify system proxy.')

        if mode != 'off':
            log.info('Proxy mode set to "{}".'.format(mode))
Пример #52
0
    def __init__(self, rabbit_host, rabbit_user, rabbit_pass, zabbix_sender, zabbix_eventlet_pool):

        """
        TODO
        :type self: object
        """
        self.rabbit_host = rabbit_host
        self.rabbit_user = rabbit_user
        self.rabbit_pass = rabbit_pass
        self.zabbix_sender = zabbix_sender
        self.pool = zabbix_eventlet_pool
        log.info("Ceilometers listener started ...")
Пример #53
0
    def host_group_list(self, tenants):
        """
        Method to "fill" an array of hosts

        :param tenants: receive an array of tenants
        :return: parsed list of hosts [[tenant_name1, uuid1], [tenant_name2, uuid2], ..., [tenant_nameN, uuidN],]
        """
        host_group_list = []
        for item in tenants['tenants']:
            if not item['name'] == 'service':
                host_group_list.append([item['name'], item['id']])
        log.info("Host Group need to Create...")
        return host_group_list
 def query_sites(self):
     fd = urllib2.urlopen(self.resource_group_url)
     dom = parse(fd)
     sites = set()
     for site_dom in dom.getElementsByTagName("Site"):
         for name_dom in site_dom.getElementsByTagName("Name"):
             try:
                 sites.add(str(name_dom.firstChild.data))
             except:
                 pass
     log.debug("OIM returned the following sites: %s" % ", ".join(sites))
     log.info("OIM has %i registered sites." % len(sites))
     self.sites_results = sites
     return sites
def configure():
    usage = "usage: %prog -c config_file"
    parser = optparse.OptionParser()
    parser.add_option("-c", "--config", help="PR Graph config file",
        dest="config", default="/etc/osg_display/osg_display.conf")
    parser.add_option("-q", "--quiet", help="Reduce verbosity of output",
        dest="quiet", default=False, action="store_true")
    parser.add_option("-d", "--debug", help="Turn on debug output",
        dest="debug", default=False, action="store_true")
    parser.add_option("-T", "--notimeout",
        help="Disable alarm timeout; useful for initial run",
        dest="notimeout", default=False, action="store_true")
    opts, args = parser.parse_args()

    if not opts.config:
        parser.print_help()
        print
        log.error("Must pass a config file.")
        sys.exit(1)

    log.handlers = []

    if not opts.quiet:
        handler = logging.StreamHandler(sys.stdout)
        log.addHandler(handler)

    for handler in log.handlers:
        formatter = logging.Formatter("%(asctime)s - %(levelname)s - " \
            "%(message)s")
        handler.setFormatter(formatter)

    if opts.debug:
        log.setLevel(logging.DEBUG)

    if not opts.quiet:
        log.info("Reading from log file %s." % opts.config)

    cp = ConfigParser.SafeConfigParser()
    cp.readfp(open(opts.config, "r"))

    cp.notimeout = opts.notimeout

    logging.basicConfig(filename=cp.get("Settings", "logfile"))

    for handler in log.handlers:
        formatter = logging.Formatter("%(asctime)s - %(levelname)s - " \
            "%(message)s")
        handler.setFormatter(formatter)

    return cp
Пример #56
0
 def create_items(self, template_id):
     """
     Method used to create the items for measurements regarding the template
     :param template_id: receives the template id
     """
     log.info("Creating items ... ")
     items_list = self.vm_meters
     for item in items_list:
         if item == 'cpu':
             value_type = 3
         else:
             value_type = 0
         payload = self.define_item(template_id, item, value_type)
         self.contact_zabbix_server(payload)
Пример #57
0
    def __init__(self, section=None, **config):
        super(GeeWan, self).__init__(section, ints=['timeout'], **config)

        log.info('Logging in Geewan router...')

        self.session = requests.session()
        self.session.headers.update({'User-Agent': 'Mozilla/5.0'})

        r = self.session.post(
            'http://{}/cgi-bin/luci'.format(self.config['hostname']),
            dict(username='******', password=self.config['password']))
        r.raise_for_status()

        m = re.search(';stok=([\da-f]{32,})/', r.text)
        self.stok = m.group(1)
Пример #58
0
 def deploy_new_nodes(self, nodes):
     for n in nodes:
         log.info('Deploying new node "{}"'.format(n.name))
         r = self.session.post(
             self.url('set_other_account'),
             dict(
                 type='ss',
                 alias='{0.name}: {0.test_result}'.format(n),
                 server=n.server,
                 server_port=n.port,
                 method=n.method,
                 password=n.password,
                 timeout=self.config['timeout']
             ))
         r.raise_for_status()
Пример #59
0
    def get_nodes(self):
        log.info('Getting nodes from CloudSS...')
        url = self.url_product_details + self.product_ids[0]
        r = self.session.get(url)
        r.raise_for_status()
        # 节点列表, 加密方式, 连接端口, 连接密码
        method = self.find_value(r.content, '加密方式')
        port = self.find_value(r.content, '连接端口')
        password = self.find_value(r.content, '连接密码')
        hosts = self.find_value(r.content, '节点列表')
        hosts = re.findall('(?:[\w\-]+\.)+[\w\-]+', hosts)

        nodes = Nodes()
        nodes.get_nodes(hosts, port, password, method)
        return nodes
Пример #60
0
    def get_zabbix_auth(self):
        """
        Method used to request a session ID form Zabbix API by sending Admin credentials (user, password)

        :return: returns an Id to use with zabbix api calls
        """
        payload = {"jsonrpc": "2.0",
                   "method": "user.login",
                   "params": {"user": self.zabbix_admin_user,
                              "password": self.zabbix_admin_pass},
                   "id": 2}
        response = self.contact_zabbix_server(payload)
        zabbix_auth = response['result']
        log.info("Getting zabbix auth ... \nauth:%s" %zabbix_auth)
        return zabbix_auth