Exemple #1
0
 def connect_modem(self, timeout=0):
     logging.info('connecting to 4G ...')
     timeout = 18  # 18*5=90s
     while not os.path.exists('/dev/cdc-wdm0') and timeout > 0:
         logging.info('waiting while enabling 4G module ...')
         time.sleep(5)
         timeout -= 1
     if timeout <= 0:
         logging.erro('failed to connect 4G due to hardware problem')
         return False
     time.sleep(1)
     self._p_modem = subprocess.Popen('/home/dst/ec20',
                                      stdout=subprocess.PIPE,
                                      stderr=subprocess.STDOUT,
                                      shell=True)
     while True:
         for line in iter(self._p_modem.stdout.readline, ''):
             print line,
             if 'obtained, lease time' in line:
                 logging.info('4G is connected')
                 time.sleep(3)
                 return True
             if 'QMUXError = 0xe' in line:  # No sim card
                 logging.error(
                     'failed to connect 4G due to no SIM card found')
                 time.sleep(3)
                 return False
Exemple #2
0
def adduser(username):
    if username == '':
        return -3
    cmdmkdir='hdfs dfs -mkdir /user/'+username
    cmdchown='hdfs dfs -chown '+username+' /user/'+username
    #user目录不存再
    if checkuserdir() != 1:
        return -1
    #用户已经存在
    elif checkuserisexist(username) != 1:
        return -2
    else:
        logging.info(cmdmkdir)
        statumk,resultr=commands.getstatusoutput(cmdmkdir)
        logging.info(cmdchown)
        statucm,resultn=commands.getstatusoutput(cmdchown)
        if statumk+statucm != 0:
            logging.error(reslutr+'\n'+resultn)
        elif addusertopolicy(username) == 1:
            return 1
        #在policy里面增加用户失败,需要把创建的目录删除
        else:
            cmd='hdfs dfs -rm -r /user/'+username
            statu,result=commands.getstatusoutput(cmd)
            if statu != 0:
                logging.erro('there is some error happen while remove the user home:'+result)
def CreateMFTFileList(mft_file_or_dir):

    logging.debug('+++ CreateMFTFileList(' + mft_file_or_dir + ')')
        
    # Sanity checks
    if len(mft_file_or_dir) == 0:
        logging.error('CreateMFTFileList(mft_file_or_dir) - mft_file_or_dir param is empty.')
        return -1
    
    if os.path.exists(mft_file_or_dir) == False:
        logging.error('CreateMFTFileList(mft_file_or_dir) - mft_file_or_dir param path does not exist.')
        return -1
    
    lstMFTFiles = []

    # Verify a single mft file
    if os.path.isfile(mft_file_or_dir):
        res = IsMFTFile(mft_file_or_dir)
        if res != -1 and res != False:
            lstMFTFiles.append(mft_file_or_dir)
        else:
            logging.erro('--- ' + mft_file_or_dir + 'is not a mft file.')
            print '--- ', mft_file_or_dir, ' is not a mft file.'
        return lstMFTFiles

    # Verify a dir of MFTDump output files
    dir_list = os.listdir(mft_file_or_dir)
    for afile in dir_list:
        res = IsMFTFile(mft_file_or_dir + '\\' + afile)
        if res != -1 and res != False:
            lstMFTFiles.append(mft_file_or_dir + '\\' + afile)
        else:
            print '---', mft_file_or_dir + '\\' + afile, 'is not a MFT file.'
    
    return lstMFTFiles
    def _process_data(self, functions):
        self.ndata = len(functions)
        self.functions = functions
        if self.compute_invcdf:
            logging.erro("Not implemented yet")

        self.coeff_mat = self.get_spline_mat(functions)
        self.bary = np.mean(self.coeff_mat, axis=0)
Exemple #5
0
def generate_arrays_from_file(d,
                              ori_idr,
                              label_dir,
                              batch_size=batch_size,
                              is_infinite=True):
    while 1:
        for name in os.listdir(d):
            video_path = os.path.join(d, name)
            if os.path.isdir(video_path):
                ori_path = os.path.join(video_path, ori_idr)
                label_path = os.path.join(video_path, label_dir)

                batch_counter = 0
                xs = []
                ys = []

                # there is a 'Output' dir in the ori_path
                # for img_name in os.listdir(ori_path):
                for img_name in os.listdir(label_path):
                    ori_img_path = os.path.join(ori_path, img_name)
                    label_img_path = os.path.join(label_path, img_name)

                    x = load_image(ori_img_path)
                    y = load_image(label_img_path)

                    xs.append(x)
                    ys.append(y)

                    del ori_img_path, label_img_path, x, y

                    batch_counter += 1
                    if batch_counter >= batch_size:
                        xs = np.array(xs, dtype=np.float32)
                        ys = np.array(ys, dtype=np.float32)

                        yield (xs, ys)

                        batch_counter = 0

                        del xs, ys
                        xs = []
                        ys = []

                if len(ys) != 0:
                    xs = np.array(xs, dtype=np.float32)
                    ys = np.array(ys, dtype=np.float32)

                    yield (xs, ys)

                del ori_path, label_path, batch_counter, xs, ys
            else:
                logging.erro('the path %s is not a dir' % video_path)
                exit(0)

            del video_path

        if not is_infinite:
            break
Exemple #6
0
 def run(self):
         global msgsForWeb
         while 1:          
             if not msgsForWeb.empty():
                 self.tosend=msgsForWeb.get()
                 logging.info("Message to send from Webscoket thread:" + self.tosend)
                 try:
                     self.server.send_message_to_all(self.tosend)
                 except:
                     logging.erro("Unexpected error:", sys.exc_info()[0])
             time.sleep(0.1)
Exemple #7
0
def sendMail(messageText, emailSubject, **kwargs):
    """
    send textmessage from hardcoded email to some recipients over SMTP
    """
    msg = MIMEMultipart()
    send_from = "*****@*****.**"
    msg['From'] = send_from
    send_to = ["*****@*****.**"]#, "*****@*****.**"]
    msg['To'] = COMMASPACE.join(send_to)
    msg['Date'] = formatdate(localtime=True)
    msg['Subject'] = emailSubject
    try:
        msg.attach(MIMEText(messageText))
        smtpObj = smtplib.SMTP()
        smtpObj.connect()
        smtpObj.sendmail(send_from, send_to, msg.as_string())
        smtpObj.close()
    except Exception as e:
        logging.erro("Error: unable to send email: %s" % (e.__class__))
Exemple #8
0
def collect_range_details(baseUrl, headers, cert, subnetId):

    logging.info(f"Retrieving Details for subnetId '{subnetId}'")

    response = requests.get(baseUrl + "/subnets/" + subnetId,
                            verify=cert,
                            headers=headers)

    if response.status_code == 200:
        logging.info(f"fGet Subnet '{subnetId} ' was sucessfull.")
        body = response.json()

        subnetData = body["data"]

        subnetDetails = {
            "id": subnetData["id"],
            "name": subnetData["subnet"],
            "startIPAddress": subnetData["calculation"]["Min host IP"],
            "endIPAddress": subnetData["calculation"]["Max host IP"],
            "description": subnetData["description"],
            "ipVersion": subnetData["calculation"]["Type"],

            #"sectionId": rangeData["sectionId"],
            "subnetPrefixLength": subnetData["calculation"]["Subnet bitmask"],
            "dnsServerAddresses":
            subnetData["nameservers"]["namesrv1"].split(";"),
            "gatewayAddress": subnetData["gateway"]["ip_addr"],

            # "dnsSearchDomains": [subnetData["custom_dnsSearchDomain"]],

            # "domain": subnetData["custom_domain"],
        }

        return subnetDetails

    else:
        #nothing found.
        logging.erro(f"Error Retrieving Subnet '{subnetId}'.")
Exemple #9
0
def do_with_connect():
    try:
        logging.info('链接地址' + str(addr))
        c.send('welcome to mds')
        time.sleep(20)
        c.close()
    except socket.error, msg:
        #print 'Error code:' + '\033[1;32;40m' +str(msg[0])+'\033[0m Error message:'+ '\033[1;31;40m' + msg[1] + '\033[0m'
        logging.error('send error:' + str(msg))


try:
    s = socket.socket()
except socket.error, msg:
    logging.erro('\033[1;32;40m create socket failed:\033[m' + str(msg))
    sys.exit()
else:
    logging.info("the socket has been created")

host = '127.0.0.1'
port = 9999
try:
    s.bind((host, port))
    s.listen(105)
    logging.info('start to  listen no  port 9999')
except socket.error, msg:
    logging.error(msg)
    sys.exit()
#else:
#  print "server has been start!"
Exemple #10
0
 def clear(self):
     self.request_handler.clear_cookie("session_id")
     try:
         self.request_handler.redis.delete("sess_%s" % self.session_id)
     except Exception as e:
         logging.erro(e)
Exemple #11
0
def fn_delete(name):
    ###################################################################
    #
    #
    # fn_delete -- Allow the adimin to delete values on the table tb_student or the table tb_teacher
    #
    #
    # Parameters
    #
    # name -- The name of the user, student or techer
    #
    #
    ###################################################################
    #Teste para comentário
    if name == 'student':
        name_pt = 'Aluno'
    else:
        name_pt = 'Professor'

    while True:
        opcao = input(
            'Escolha uma opção: \n1 - Deletar pelo id. \n2 - Deletar pelo nome. \n3 - Voltar ao menu. \nDigite: '
        )
        if opcao == '1':
            try:
                id_db = input('Digite o id do {}: '.format(name_pt))
                sql_select = 'SELECT * FROM tb_{} WHERE id = {}'.format(
                    name, id_db)
                sql = 'DELETE FROM tb_{} WHERE id = {}'.format(name, id_db)
                stmt = ibm_db.exec_immediate(conn, sql_select)
                tuple_select = ibm_db.fetch_tuple(stmt)
                stmt = ibm_db.exec_immediate(conn, sql)
                logging.debug('Value deleted from tb_{}!! Value: {}'.format(
                    name, tuple_select))
                print('Valor deletado com sucesso\n\n')
                break
            except:
                logging.error('Id {} not deleted from tb_{}'.format(
                    id_db, name))
                print('Valor não deletado!\n')
        elif opcao == '2':
            try:
                f_name = input('Digite o primeiro nome: ')
                l_name = input('Digite o sobrenome: ')
                sql = "DELETE FROM tb_{} WHERE f_name = '{}' and l_name = '{}'".format(
                    name, f_name, l_name)
                sql_select = "SELECT * FROM tb_{} where f_name = '{}' and l_name = '{}'".format(
                    name, f_name, l_name)
                stmt = ibm_db.exec_immediate(conn, sql_select)
                tuple_select = ibm_db.fetch_tuple(stmt)
                print('Valor deletado com sucesso\n\n')
                logging.debug('Value deleted from tb_{}!! Value: {}'.format(
                    name, tuple_select))
                break
            except:
                print('Valor não deletado \n')
                logging.erro(
                    'Value with f_name = {} and l_name {} not deleted from tb_{}.'
                    .format(f_name, l_name, name))
        elif opcao == '3':
            break
        else:
            print('Opção invalida.')
Exemple #12
0
from flask_wtf import CSRFProtect
from flask_session import Session

import redis
import logging
from logging.handlers import RotatingFileHandler
from ihome.utils.commons import ReConverter

# 数据库
db = SQLAlchemy()

# 创建redis连接对象
redis_store = None

# 设置日志的记录等级
'''
logging.erro('错误级别')  # 错误级别
logging.warn("警告级别")  # 警告级别
logging.info('消息提示级别')  # 消息提示级别
logging.debug("调试级别")  # 调试级别
'''
logging.basicConfig(level=logging.DEBUG)  # 调试debug级
# 创建日志记录器,指明日志保存的路径,每个日志文件的量大小、保存的日志文件个数上限
file_log_handler = RotatingFileHandler("logs/log",
                                       maxBytes=1024 * 1024 * 100,
                                       backupCount=10)
# 创建日志记录的格式 日志等级    输入日志信息的文件名  行数   日志信息
formatter = logging.Formatter(
    '%(levelname)s %(filename)s:%(lineno)d %(message)s')
# 为刚创建的日志记录器设置日志记录格式
file_log_handler.setFormatter(formatter)
Exemple #13
0
def parse_json(input_file, repos_dir):
    result = []
    json_content = json.load(input_file)['apps']
    n_errors = 0
    n_news = 0
    n_updated = 0
    off = 0
    off_in_disk = 0
    number_of_apps = len(json_content)

    for i, app in enumerate(json_content):

        package = app["package"]
        repo_url_str = app.get("source_repo").strip()
        destiny_folder = "{}/{}".format(repos_dir, package.replace(".", "-"))

        logging.info("{}/{} application".format(i + 1, number_of_apps))
        p = Path(destiny_folder)

        repo_on = is_online(repo_url_str)

        app['status'] = {
            'store_on':
            is_online(
                "https://play.google.com/store/apps/details?id={}".format(
                    package)),
            'fdroid_on':
            is_online("https://f-droid.org/en/packages/{}/".format(package)),
            'repo_on':
            repo_on
        }

        if repo_on:
            if p.exists() and p.is_dir():
                logging.info(
                    "Repository of application {}[{}] already exist!".format(
                        app['name'], package))
                try:
                    repo = Repo(str(p))
                    if repo_url_str.replace(
                            "https://",
                            "https://:@") != repo.remote('origin').url:
                        logging.warn(
                            "Repository exist {}[{}] with a different remote {} -> {}"
                            .format(app['name'], package, repo_url_str,
                                    repo.remote('origin').url))
                        repo.close()
                        logging.debug("Moving repo to avoid conflic")
                        process = subprocess.run("mv {} conflict/".format(
                            str(p)),
                                                 universal_newlines=True,
                                                 check=True,
                                                 shell=True,
                                                 stdout=PIPE,
                                                 stderr=PIPE)
                        if process.returncode != 0:
                            logging.error(process.stderr)
                        repo = clone_repo(repo_url_str, str(p))
                        n_news = n_news + 1
                    else:
                        n_updated = n_updated + 1
                        active_branch = repo.active_branch
                        origin = repo.remote('origin')
                        info = origin.pull(active_branch)
                        logging.info("Updating repo from {} {}".format(
                            origin.name, active_branch))
                        for i in info:
                            logging.debug("Last commit found {}".format(
                                i.commit))

                except InvalidGitRepositoryError as e:
                    logging.erro(
                        "Existent repository {}[{}], is not a git repo".format(
                            app['name'], package))

            else:
                repo = clone_repo(repo_url_str, str(p))
                n_news = n_news + 1
        else:
            if p.exists() and p.is_dir():
                logging.info(
                    "Repository is offline, but there is old version in disk {}[{}]!"
                    .format(app['name'], package))
                try:
                    repo = Repo(str(p))
                    off_in_disk = off_in_disk + 1
                except InvalidGitRepositoryError as e:
                    logging.erro(
                        "Existent repository {}[{}], is not a git repo".format(
                            app['name'], package))
                    off = off + 1
            else:
                logging.error(
                    "Repo {},{}-[{}] offline and no version found in disk".
                    format(app['name'], package, repo_url_str))
                off = off + 1
                result.append(app)
                continue

        info = get_info(repo)
        app = {**app, **info}
        repo.close()
        result.append(app)

    logging.info(
        "From {} apps, {} were cloned, {} were updated, {} offline in disk, {} offline and {} failed."
        .format(number_of_apps, n_news, n_updated, off_in_disk, off, n_errors))
    return result
Exemple #14
0
def _timeout_routing (agent):
  try:
    _agent = magent.instance ()

    try:
      reposInfo =  _agent.Ldap.search_s (_agent._base, ldap.SCOPE_SUBTREE,
            filterstr='(objectClass=Repository)') 
    except ldap.NO_SUCH_OBJECT as e:
      logging.error ("Repository not found")
      return True 
    except ldap.LDAPError as e:
      logging.error ("LDAP Error %s" % e)
      return True

    if len (reposInfo) == 0:
      logging.error ("Repository not found")
      return True

    data = reposInfo[0][1]

    lock = 0
    if data.has_key ('Lock'): 
      logging.info ("Lock defined %s", data.get('Lock'))
      lock = string.atoi (data.get('Lock')[0])
    else:
      logging.info ("Lock not defined")

    if (lock & UPDATE_LOCK):
      return True

    logging.debug ("check Repository Revision")
    if not data.has_key ('Revision'): 
      return True

    revision = data.get ('Revision')[0]
    logging.debug ("Repository Revision %s" % revision)

    val = _agent.get_attr ('updateRevision')
    if val is None:
      updateRevision = "0"
    else:
      updateRevision = val[0]
    logging.debug ("update Revision %s" % updateRevision)

    val = _agent.get_attr ('curRevision')
    if val is None:
      curRevision = []
    else:
      curRevision = val

    ''' package revison format
      curRevision = desktop-meta:1
      curRevision = nc-meta:1
      curRevision = clc-meta:2
    '''
    revisions = {}

    for l in curRevision:
      pkg, rev = l.split (':')
      revisions [pkg] = rev
    
    logging.debug ("check UPDATE_LOCK")
    if lock & UPGRADE_LOCK:
      return True

    if (updateRevision != revision):
      if not data.has_key('uri'):
        logging.error ( "repository uri not defined" )
        return True

      # update uri file
      try:
        source_list_fn = '/etc/apt/sources.list.d/cloudbot.list'
        fh = open (source_list_fn , 'w')
        
        for uri in data.get('uri'):
          fh.write (uri)
          fh.write ("\n")
        fh.close ()
      except IOError as e:
        logging.error ( "cannot open %s for write %s" % (source_list_fn, e) )
        return True

      retval = os.system ('apt-get update -y -qq')
      if  retval == 0:
        _agent.set_attr ('updateRevision', revision)
      else:
        logging.error ("apt-get update failed:%d" % retval)
        return True

    l = []

    
    for pkg, mask in _agent.capability.items ():
      if revisions.get(pkg) != revision:
        for enable in data.get('updateEnable', []):
          try:
            enable = int (enable)
          except ValueError as e:
            continue
          if enable & mask:
            l.append (pkg) 
          else:
            logging.info ("upgrade disabled: %s" % pkg)
          
    if len (l) == 0:
      return True
    logging.info ("update packages:%s" % l)

    cc = Cache ()
    ag = cc.actiongroup()
    with ag:
      us = set ()
      history = {}
      for pkg in l:
        upgrades = mark_upgrade_deep (cc, pkg, history)
        if upgrades is None:
          logging.error ('failed to get dependency of %s' % pkg)
        else:
          us.update(upgrades)
          revisions[pkg] = revision
      history = None

      if len (us):
        try:
          cc.commit ()
        except apt.cache.LockFailedException as e:
          logging.erro ('apt cache lock failed:%s', e) 
        except Exception as e:
          logging.error ( "unhandled exception:%s" % e)
      else:
        logging.warn ( "no upgrades" )

      try:
        logging.info ( "update package revision in LDAP:%s" % revisions)
        _agent.set_attr ('curRevision', [ "%s:%s" % (pkg, ver) for pkg,ver in revisions.items()])
      except ldap.LDAPError as e:
        logging.error ('failed to update curRevision in LDAP: %s' % e)
    cc = None

  except Exception as e:
    logging.fatal ("unhandled exception:%s" % e)
 
  return True