コード例 #1
0
def capture_temp_humid_from_pi(base_path, pi_ip_address, pi_id):
    path = os.path.join(base_path, pi_id)

    # Run infintely
    while 1:
        humidity, temperature_C = Adafruit_DHT.read_retry(sensor, pin)
        # Convert Celcius temperature to Fahrenheit
        temperature_F = temperature_C * 9 / 5.0 + 32
        # Choose which to report
        temperature = temperature_F
        #temperature = temperature_C

        fname = datetime.now().strftime("%Y-%m-%d %H%M%S_TempHumid.txt")
        ###not sure what file type is best. Also this creates a new file for every reading,
        ###but not sure if we want to append to 1 file (or 1 per day?)

        # Check that both captured without errors
        if humidity and temperature:
            try:
                # Open new file we just made and write data to it
                file = open(fname, "w")
                file.write(humidity + " " + temperature)
                file.close()
            except Exception as e:
                logging.CRITICAL(
                    "Unable to take reading and write to disk. Error: {}. File: {}"
                    .format(e, fname))
        elif not temperature and humidity:  ###not sure if this is correct way to state this
            logging.CRITICAL(
                "Capture from pi failed for file: {}".format(fname))
        # Only try to take a reading every 10 seconds
        time.sleep(timeDelay)
コード例 #2
0
def tesa(a):
    logging.info("into the function tesa ")
    #logging.critical("the function got the value "+str(a))
    logging.critical("the function got the value " + str(a))
    logging.error("the function got the value " + str(a))
    logging.warning("the function got the value " + str(a))
    pdb.set_trace()
    logging.info("the function got the value " + str(a))
    logging.debug("the function got the value " + str(a))

    try:
        b = a / 2
    except Exception as ex:
        logging.DEBUG("an exception has occured DEBUG")

        logging.INFO("an exception has occuredINFO")

        logging.WARNING("an exception has occuredWARNING")

        logging.ERROR("an exception has occuredERROR")

        logging.CRITICAL("an exception has occuredCRITICAL")
        logging.CRITICAL(str(ex))
        logging.ERROR(str(ex))
        logging.WARNING(str(ex))
        logging.INFO(str(ex))
        logging.DEBUG(str(ex))
コード例 #3
0
    def find_fandom_fic_cnt(self, ffnet_url):
        logging.debug('find-cnt')

        oDB = FanFicSql(self._Path)
        oDB.FilePath = self._Path
        logging.debug('DB: ' + self._Path)
        oUrl = FanfictionNetUrlBuilder(ffnet_url, "http://",
                                       "www.fanfiction.net/")
        # cnt = 810
        fic_cnt = 0
        sUrl = oUrl.generate_page_url(1)
        logging.debug('surl: ' + sUrl)
        try:
            html = urlopen(sUrl)
        except:
            print('sleep')
            time.sleep(60)
            try:
                html = urlopen(sUrl)
            except:
                logging.CRITICAL("html = urlopen(sUrl) failed" + sUrl)
                print("ERROR")
                return fic_cnt
        bsObj = BeautifulSoup(html, "html5lib")
        icnt = self.get_fandom_length(bsObj)
        sUrl = oUrl.generate_page_url(icnt)
        try:
            html = urlopen(sUrl)
        except:
            print('sleep')
            time.sleep(60)
            try:
                html = urlopen(sUrl)
            except:
                logging.CRITICAL("html = urlopen(sUrl) failed" + sUrl)
                print("ERROR")
                return fic_cnt
        bsObj = BeautifulSoup(html, "html5lib")
        nameList = bsObj.findAll("div", class_='z-list zhover zpointer ')
        last_pg_cnt = len(nameList)
        if icnt == 1:
            return last_pg_cnt
        else:
            icnt -= 1

            fic_cnt = icnt * 25
            fic_cnt += last_pg_cnt
            return fic_cnt
コード例 #4
0
    def is_oldest_fics_in_db(self, info):
        logging.debug('find-cnt')

        oDB = FanFicSql(self._Path)
        oDB.FilePath = self._Path
        logging.debug('DB: ' + self._Path)
        oUrl = FanfictionNetUrlBuilder(info.FandomUrl, "http://",
                                       "www.fanfiction.net/")
        # cnt = 810
        fic_cnt = 0
        sUrl = oUrl.generate_page_url(1)
        logging.debug('surl: ' + sUrl)
        try:
            html = urlopen(sUrl)
        except:
            print('sleep')
            time.sleep(60)
            try:
                html = urlopen(sUrl)
            except:
                logging.CRITICAL("html = urlopen(sUrl) failed" + sUrl)
                print("ERROR")
                return fic_cnt
        bsObj = BeautifulSoup(html, "html5lib")
        icnt = self.get_fandom_length(bsObj)
        sUrl = oUrl.generate_page_url(icnt)
        html = urlopen(sUrl)
        bsObj = BeautifulSoup(html, "html5lib")
        fics = self.get_fic_from_page(bsObj)
        for fic in fics:
            if not oDB.is_fic_in_Db(fic.FFNetID):
                return False
        return True
コード例 #5
0
def FillClosePrice(market, tradingdate):
    try:
        stock_order.objects.raw(
            'update d_eq008,stockclose set d_eq008.F_ClosePrice=stockclose.Close where (d_eq008.F_Status IS NULL or d_eq008.F_Status=\'1\') AND (stockclose.stocksymbol_id=concat(d_eq008.F_SKID,\'.\',d_eq008.F_Market) and stockclose.tDate=d_eq008.F_Date);'
        )
    except:
        logging.CRITICAL("FillClosePrice error:", sys.exc_info()[0])
コード例 #6
0
 def connect(self):
     try:
         dydb = boto3.resource('dynamodb', region_name=self.region)
         conn = dydb.Table(self.table)
         return conn
     except:
         print("Problema na conexao com DynamoDB")
         logging.CRITICAL("Problema na conexao com DynamoDB")
         return False
コード例 #7
0
def build_dep(bind,args):
    global instruction_granularity
    blocks = bind.blocks
    #bind.writetofile(args)
    if len(blocks) == 0:
        logging.critical("build_dep:NO BLOCKS FOUND")
        print "ENTER A VALID ASM"
        exit(-1)

    else:
        #Every block
        totalhazards = 0
        for alp in xrange(bind.countblocks()):
            logging.info("build_dep:alp="+str(alp))
            care,insts = parseinst(bind.getblock(alp))
            for i in insts:
                logging.info("build_dep:Returned:"+i.rawtxt)

            logging.info("analyzing block "+str(alp))
            analyze = 0                                 #analyze is if blocks have a hazard.
            issues = []                                 #issues is an array of arrays. Each element in this array
                                                        #has two members start index , end index.
                                                        #the start index + 1 value represents the LW
                                                        #The end index represents until where to check
                                                        #@dev: lim not checked , handled outside.
                                                        #an array of arrays because there might be more than one hazard
                                                        #in the same block.
            #Runs several times per block.if a caremuch issue exists
            for each in care:
                #If next instruction is valid
                if len(insts) > each+1:
                    #If dependency exists
                    if insts[each].dest in insts[each+1].dep:
                        if insts[each+1].branchmuch ==0:
                            print alp , insts[each].rawtxt , insts[each+1].rawtxt
                            analyze = 1
                            issues.append([])
                            issues[len(issues)-1].append(each)
                            issues[len(issues)-1].append(each+instruction_granularity-1)
                            totalhazards = totalhazards + 1
                        else:
                            logging.CRITICAL("BUILDDEP:AVOIDED EDGE CASE - LD , JMP or LD , BR")
                else:
                    logging.info( "\tNo optimizations possible in block "+str(alp))

            #Runs once per block
            if analyze == 1:
                lel = graphit(insts,issues)
                bind.overwriteblock(rawoverwrite(lel,alp,bind.getformatterstat()),alp)

        print "Total Possible Hazards "+str(totalhazards)
        logging.debug("Total Possible Hazards "+str(totalhazards))
    bind.writetofile(args)
コード例 #8
0
def connection_database(user, password, host, port, database):
    try:
        db = psycopg2.connect(user=user,
                              password=password,
                              host=host,
                              port=port,
                              database=database)
        cursor = db.cursor()
        logging.info('Successful connection')
        return db, cursor
    except:
        logging.CRITICAL('Connection error')
コード例 #9
0
 def run(self):
     log_file = os.path.join(self.base_dir, "i3geoweather.log")
     handler = logging.handlers.RotatingFileHandler(log_file,
                                                    maxBytes=200000,
                                                    backupCount=2)
     logging.basicConfig(
         level=self.log_level,
         format='%(asctime)s %(levelname)s: %(message)s',
         handlers=(handler, ),
     )
     logging.debug("i3geoweather starting")
     self.read_caches()
     fname = os.path.join(self.base_dir, "i3geoweather.txt")
     while True:
         try:
             lat, lon = self.geolocate()
             if lat is not None and lon is not None:
                 self.latitude, self.longitude = (lat, lon)
             location, temp = self.get_weather(self.latitude,
                                               self.longitude)
             if location is not None and temp is not None:
                 self.location, self.temperature = (location, temp)
                 sleep = WAIT_SUCCESS
             else:
                 sleep = WAIT_FAILURE
             if self.location is not None and self.temperature is not None:
                 self.write_weather(fname, self.location, self.temperature)
             logging.debug("next update attempt in %d seconds" % sleep)
             time.sleep(sleep)
         except:
             logging.CRITICAL("Unhandled exception")
             logging.CRITICAL(traceback.format_exc())
             fd, tmpname = tempfile.mkstemp()
             os.write(fd, "i3geoweather error\n".encode())
             os.close(fd)
             os.rename(tmpname, fname)
             logging.CRITICAL("i3geoweather stopping")
             self.stop()
             logging.CRITICAL("i3geoweather stopped")
             break
コード例 #10
0
def fetchReleaseAndPlatform(ctx, lines):

    try:
        line = lines[0]
    except IndexError:
        line = 'null'
        newState = "MISSING_REL_PLAT"
        return (newState, lines)

    platform = ""
    release = ""

    z = re.search(r'ucs-(.*?)-huu-(.*?).iso', line)
    if z:

        platform = z.group(1)
        release = z.group(2)

        if release != "":
            if release in ctx.finalData.keys():
                if platform != "":
                    if platform in ctx.finalData[release].keys():
                        logging.critical('duplicate REL=%s PLAT=%s, skipping',
                                         release, platform)
                        newState = 'DUPLICATE_REL_PLAT'
                        return (newState, lines[1:])
                    ctx.finalData[release][platform] = {}
                else:
                    logging.critical("Error fetching 'Platform'")
                    sys.exit()
            else:
                ctx.finalData[release] = {}
                if platform != "":
                    ctx.finalData[release][platform] = {}
                else:
                    logging.critical("Error fetching 'Platform'")
                    sys.exit()

        else:
            logging.CRITICAL("Error fetching 'Release'")
            sys.exit()

    if platform != "" and release != "":
        ctx.release = release
        ctx.platform = platform
        logging.info('RELEASE %s PLATFORM %s line %s', release, platform,
                     ctx.current_lineNo)
        newState = "FETCH_COMPONENTS"
    else:
        newState = "FETCH_REL_PLAT"

    return (newState, lines[1:])
コード例 #11
0
ファイル: ui.py プロジェクト: Perf-Org-5KRepos/NetDash
def start_gui():
    """Initialize and start the tkinter GUI"""

    # Hopefully this will catch any startupt errors tk might have
    try:
        root = tk.Tk()
    except tk.TclError as exc:
        logging.CRITICAL("Could not start GUI:" + exc)
        sys.exit(3)

    root.title("NetDash")
    app = App(root)
    root.mainloop()
コード例 #12
0
def run_game(num_times):

    print(num_times)

    button = browser.find_element_by_class_name('retry-button')

    while button.is_displayed() == False:
        rand_udrl()

    button.click()

    if num_times > 0:
        run_game(num_times - 1)
    elif num_times < 0:
        logging.CRITICAL("num_times must be larger than 0")
コード例 #13
0
ファイル: flumecli.py プロジェクト: JamesOBenson/Home_Portal
def loadCredentials(config):
    if not config["tokenfile"]:
        quit("You have to provide a token file.")
        logging.CRITICAL("You have to provide a token file.")
    else:
        logging.debug(f"Reading token info from: <{config['tokenfile']}>")
        with open(config["tokenfile"], "r") as f:
            token = json.load(f)
        config["access_token"] = token["access_token"]
        config["refresh_token"] = token["refresh_token"]
        token = JWT()
        decoded_token = token.decode(config["access_token"],
                                     do_verify=False,
                                     algorithms="HS256")
        config["user_id"] = decoded_token["user_id"]
コード例 #14
0
ファイル: BlockChain.py プロジェクト: ivar2563/ivar_coin
 def start_up(self):
     """
     Will add the data from the json file to the linked list at startup
     """
     json_path = str(self.get_json_path())
     try:
         if self.is_empty() is not True:
             with open(json_path, "r") as fp:
                 loaded_json = json.load(fp)
                 for key, element in loaded_json.items():
                     string = element["Validated_string"]
                     di = {key: element}
                     self.add(di, string, state=True)
         else:
             pass
     except json.JSONDecodeError as e:
         logging.CRITICAL(e)
コード例 #15
0
    def read(self, filePath):
        try:
            self.iniFile.read(filePath)
            if not self.iniFile.has_section(NBGMINIReader.COMMON):
                logging.CRITICAL('Cannot find the section %s!' % COMMON)
                return False
            if not self.iniFile.has_section(NBGMINIReader.REGIONS):
                logging.CRITICAL('Cannot find the section %s!' % REGIONS)
                return False
            if not self.iniFile.has_section(NBGMINIReader.FEATURE_TEST_LEVES):
                logging.CRITICAL('Cannot find the section %s!' %
                                 FEATURE_TEST_LEVES)
                return False
            waitTime = self.iniFile.getint(NBGMINIReader.COMMON, 'WaitTime')
            self.waitTime = waitTime / 1000.0
            ops = self.iniFile.options(NBGMINIReader.REGIONS)
            if not ops:
                logging.CRITICAL('Section [Regions] is empty!')
                return False
            regions = {}
            for item in ops:
                if item.find('map') != -1:
                    regions[item] = ''
            for item in ops:
                if item.find('input') != -1:
                    map_key = item.replace('input', 'map')
                    if regions.has_key(map_key):
                        regions[map_key] = item
            for k, v in regions.iteritems():
                mapPath = self.iniFile.get(NBGMINIReader.REGIONS, k).strip()
                inputPath = self.iniFile.get(NBGMINIReader.REGIONS, v).strip()
                if mapPath and inputPath:
                    self.regionList.append(Region(mapPath, inputPath))

            testLevels = self.iniFile.options(NBGMINIReader.FEATURE_TEST_LEVES)
            if not testLevels:
                logging.CRITICAL('Section [Feature Test Levels] is empty!')
                return False
            for level in testLevels:
                items = self.iniFile.get(NBGMINIReader.FEATURE_TEST_LEVES,
                                         level).split(',')
                data = []
                for i in items:
                    data.append(LevelDspProp(i))
                if data:
                    self.testLevels[level] = data
            return True
        except Exception, e:
            print(e)
            logging.CRITICAL('The ini file %s is invalid!' % filePath)
            return False
コード例 #16
0
ファイル: flumecli.py プロジェクト: JamesOBenson/Home_Portal
def obtainCredentials(config):
    logger.info("Getting auth token")
    if config["verbose"]:
        logging.info("Getting auth token")

    if (config["clientid"] and config["clientsecret"] and config["username"]
            and config["password"]):
        if config["verbose"]:
            logging.info("All required parameters passed for auth token")
        url = "https://api.flumetech.com/oauth/token"
        payload = ('{"grant_type":"password","client_id":"' +
                   config["clientid"] + '","client_secret":"' +
                   config["clientsecret"] + '","username":"******"username"] + '","password":"******"password"] +
                   '"}')
        headers = {"content-type": "application/json"}

        resp = requests.request("POST", url, data=payload, headers=headers)
        logging.info(f"Response from server: {resp.text}")
        dataJSON = json.loads(resp.text)

        if dataJSON["http_code"] == 200:
            logging.info("Got 200 response from auth token request.")
            config["access_token"] = dataJSON["data"][0]["access_token"]
            token = JWT()
            decoded_token = token.decode(config["access_token"],
                                         do_verify=False,
                                         algorithms="HS256")
            config["user_id"] = decoded_token["user_id"]
            config["refresh_token"] = dataJSON["data"][0]["refresh_token"]

            if config["tokenfile"]:
                outline = {}
                outline["access_token"] = config["access_token"]
                outline["refresh_token"] = config["refresh_token"]
                logging.info("Saving access and refresh token to : " +
                             config["tokenfile"])
                logging.debug(outline)
                f = open(config["tokenfile"], "w")
                f.write(json.dumps(outline))
                f.close()
        else:
            logging.CRITICAL("ERROR: Failed to obtain credentials")
コード例 #17
0
ファイル: views.py プロジェクト: stajama/DjangoPractice
def getHumblePic():
    '''Finds random pick from recent pictures on hubblesite.org. Returns link
    to image as a string. Returns None if there is a problem.'''

    x = requests.get('http://hubblesite.org/images/gallery')
    soup = bs4.BeautifulSoup(x.text, 'lxml')
    pickList = []
    for i in soup.find_all('a'):
        if i.get('href') == None:
            continue
        if re.match(r'/image/\d{4}/', i.get('href')) != None:
            pickList.append("http://hubblesite.org" + i.get('href'))
    selected = pickList[random.randint(0, len(pickList) - 1)]
    x = requests.get(selected)
    soup = bs4.BeautifulSoup(x.text, 'lxml')
    for i in soup.find_all('a'):
        if 'http://imgsrc.' in i.get('href'):
            return i.get('href')
    logging.CRITICAL("getHumblePic --- Houston, we have a problem...")
    return
コード例 #18
0
ファイル: photobooth.py プロジェクト: Borck/Photobooth
    def on_enter_Restart(self):
        logging.debug("now on_enter_Restart")
        logging.debug("restart Camera")

        self.camera.close()

        # Setup Camera
        try:
            self.camera = picamera.PiCamera()
        except:
            logging.CRITICAL("error initializing the camera - exiting")
            raise SystemExit

        self.camera.resolution = (self.photo_w, self.photo_h)
        self.camera.hflip = self.flip_screen_h
        self.camera.vflip = self.flip_screen_v
        self.startpreview()

        # load the Logo of the Photobooth and display it
        self.overlayscreen_logo = self.overlay_image_transparency(self.screen_logo, 0, 5)

        self.to_Start()
コード例 #19
0
def download_mp3(download_url, s, path_filename):
    """
    Download a song from z1.fm.

    Args:
    download_url (str): url to load the mp3 from
    s (requests.Session): session with already stored headers for z1.fm

    Returns:
    flag, if the down load was successful
    """
    # Download and save mp3

    # Get the link
    try:
        song_request_response = s.get(download_url)
    except Exception as e:
        logger.error(f"The following error was encountered: {e}")
        logger.CRITICAL(download_url)

    with open(os.path.join(path_filename), "wb") as f:
        f.write(song_request_response.content)
コード例 #20
0
ファイル: tw_log.py プロジェクト: lmao420blazeit/tw_bot
def log(_level: str, _log: str) -> None:
    if _level == "debug":
        logging.DEBUG(_log)
        return
    
    elif _level == "info":
        logging.INFO(_log)
        return
    
    elif _level == "warning":
        logging.WARNING(_log)
        return
    
    elif _level == "error":
        logging.ERROR(_log)
        return
    
    elif _level == "critical":
        logging.CRITICAL(_log)
        return
    
    else:
        return
コード例 #21
0
ファイル: main.py プロジェクト: b-eastwood/hvir-calc-py
def main():
    # Get the arguments from the command-line except the filename
    argv = sys.argv[1:]
    params = get_params(argv)
    print('Program started, input file: %s' % params['filepath'])

    if 'debug' in params.keys():
        if params['debug'] == '1':
            print('Setting debug level: Debug')
            logging.getLogger().setLevel(logging.DEBUG)
        else:
            print('Setting debug level: Warnings')
            logging.getLogger().setLevel(logging.WARNING)
    else:
        logging.getLogger().setLevel(logging.INFO)

    params['data_params'], type_dict = reader.get_data_settings(params['config_file'])
    header, raw_data = reader.get_data(params)
    type_selector, converters = reader.validate_data_format(params['data_params'], header)
    key_fails, failed_rows, surveys,quality_assessment, out_keys,meta, raw_surveys = data_processor.process_rows(raw_data, header, params, converters)
    if 'logfile' in params:
        if params['logfile'].endswith('txt'):
            writer.write_log(logfiler.write_txt_log(params, key_fails, raw_data, failed_rows, meta), params['logfile'], 'Log:')
        elif params['logfile'].endswith('csv'):
            writer.write_log(logfiler.create_pbi_log(surveys,quality_assessment, meta['attribute_quality'], meta, failed_rows,params), params['logfile'], ['Key', 'Value'])
        else:
            logging.CRITICAL('Logfile not written, wrong file extension provided')
    out_header = surveys[0].keys()
    if params['outfile'].endswith('.csv'):
        writer.write_data(surveys, out_header, params,rounding=9,raw_surveys=raw_surveys)
        if len(quality_assessment) > 0:
            writer.write_data(quality_assessment, quality_assessment[0].keys(), params, sub_file='group_qual')
        if len(meta['attribute_quality']) > 0 and 'logfile' in params:
            writer.write_data(meta['attribute_quality'], meta['attribute_quality'][0].keys(), params,
                              sub_file='attr_qual')
    else:
        logging.critical("Your output filename: %s must end with .csv" % params['outfile'])
コード例 #22
0
}

uniq_names = get_uniq_names(NAMES_FILE)

logging.debug('by max_latitude: %s', by_coordinates.max_latitude())
logging.debug("by min_latitude: %s", by_coordinates.min_latitude())
logging.debug("by max_longitude: %s", by_coordinates.max_longitude())
logging.debug("by min_longitude: %s", by_coordinates.max_longitude())

logging.info('Creating the database connection')
try:
    db = Database()
    cursor = db.cursor()
    db.create_tables()
except Exception as e:
    logging.CRITICAL("Unable to create the database: %s", e)


for nationality in NATIONALITIES:
    for i in range(1000):
        name = random.choice(uniq_names)
        random_lat = random.uniform(
            NATIONAL_COORDINATES[nationality].min_latitude(),
            NATIONAL_COORDINATES[nationality].max_latitude()
            )
        random_long = random.uniform(
            NATIONAL_COORDINATES[nationality].min_longitude(),
            NATIONAL_COORDINATES[nationality].max_longitude()
            )

        cursor.execute(
コード例 #23
0
    url2 = url + str(year) + month + day
    return url2

def obtain_titles(list_of_urls):
    driver = webdriver.Chrome()
    for i in range(0, len(list_of_urls)):
        dia = list_of_urls[i][-2:]
        mes = list_of_urls[i][-5: -3]
        ano = list_of_urls[i][-10: -6]

        driver.get(list_of_urls[i])

        lista_titulares = driver.find_elements_by_class_name('noticia')
        print('\n\n' + " [+] Los titulares del día " + str(dia) + " de " + str(mes) + " de " + str(ano) + '\n\n')

        for j in range(0, len(lista_titulares)):
            titular = lista_titulares[j].text
            logging.critical(' [!] ' + titular)

        lista_titulares.clear()
        # sleep(2)

    driver.close()

for i in range(2):
    listaweb.append(create_url(i,month[8], year[2])) #Primeros dos dias de Septiembre de 2018

obtain_titles(listaweb)

logging.CRITICAL(' ---- End of the program ---- ')
コード例 #24
0
ファイル: emotibot.py プロジェクト: danielsig727/GooluluDuck
import logging
import json
import requests
import re
import os
import sys
import opencc

from slackbot import settings
from slackbot.bot import respond_to
from slackbot.bot import listen_to
from slackbot_utils import *

appid = os.getenv('EMOTIBOT_APPID', None)
if not appid:
    logging.CRITICAL(
        'no Emotibot AppId is found. Make sure EMOTIBOT_APPID is set')
    sys.exit(1)

url = 'http://idc.emotibot.com/api/ApiKey/openapi.php'

userid_db = {}
userid_db_file = 'emotibot_userid_db.json'
userid_db_loaded = False


def emotibot_register():
    register_data = {"cmd": "register", "appid": appid}
    r = requests.post(url, params=register_data)
    response = json.dumps(r.json(), ensure_ascii=False)
    jsondata = json.loads(response)
    datas = jsondata.get('data')
コード例 #25
0
ファイル: siemapi.py プロジェクト: satori7/SIEM-API
    if completion == True:
        break

# Now that the query is done, get the results.
timestart = time.time()
fw = open("output.json", "w+")
print("Getting results...")
rows = 0
while True:
    getres = 'qryGetResults?startPos={}&numRows=1000000&reverse=false'.format(
        rows)
    data3 = {"resultID": resultID2}
    try:
        r3 = client.post(url + getres, headers=headers, json=data3)
    except requests.exceptions.RequestException as e:
        logging.CRITICAL(e)
        print(e)
        sys.exit(1)
    rows = rows + 10000
    # Write the results to a file
    fw.write(r3.text)
    print("Running for %s seconds." % (round(time.time() - timestart)))
    #TODO: This is really hacky. Find a better way of doig this (once I have another SIEM to work with).
#    fsize = os.stat('./output.json')
#    lastrow = str(r3.json()['columns'])
#    if fsize.st_size > 10000 and re.search('name', lastrow):
#        break

# Close the result so the ESM doesn't get jammed up
close = 'qryClose?resultID=' + resultID2
try:
コード例 #26
0
    def reindex_archive(self, ffnet_url, fandom_name, isXover, start_page_num):
        logging.debug('')
        self._is_xover = isXover
        self._Fandom = fandom_name
        oDB = FanFicSql(self._Path)
        oDB.FilePath = self._Path
        logging.debug('DB: ' + self._Path)

        logging.debug('lastDate: ' + str(0))
        oUrl = FanfictionNetUrlBuilder(ffnet_url, "http://",
                                       "www.fanfiction.net/")
        # cnt = 810
        cnt = 3
        fic_cnt = 0
        sUrl = oUrl.generate_page_url(1)
        logging.debug('surl: ' + sUrl)
        html = urlopen(sUrl)
        bsObj = BeautifulSoup(html, "html5lib")
        if not isXover:
            self._Fandom = self.get_fandom(bsObj)
            print('fandom: ' + self._Fandom)
            logging.debug('Fandom: ' + self._Fandom)
        icnt = self.get_fandom_length(bsObj)
        logging.debug('Length: ' + str(icnt))
        icnt2 = 0
        for x in range(start_page_num, icnt):

            sUrl = oUrl.generate_page_url(x)
            logging.debug('surl: ' + sUrl)
            try:
                html = urlopen(sUrl)
            except:
                print('sleep')
                time.sleep(60)
                try:
                    html = urlopen(sUrl)
                except:
                    logging.CRITICAL("html = urlopen(sUrl) failed" + sUrl)
                    print("ERROR")
                    return fic_cnt

            bsObj = BeautifulSoup(html, "html5lib")
            try:
                _icnt = self.get_fandom_length(bsObj)
            except:
                pass
            logging.debug('Length: ' + str(_icnt))
            if _icnt > 0:
                icnt2 = _icnt
            fic_list = self.get_fic_from_page(bsObj)
            fic_cnt += len(fic_list)
            self.save_fic_list(fic_list)
            logging.debug('fic count: ' + str(fic_cnt))
            print('page_num : ' + str(x))
            # time.sleep(6)
            time.sleep(5)
        if icnt2 > icnt:
            for a in range(icnt, icnt2):
                sUrl = oUrl.generate_page_url(a)
                html = urlopen(sUrl)
                bsObj = BeautifulSoup(html, "html5lib")
                fic_list = self.get_fic_from_page(bsObj)
                fic_cnt += len(fic_list)
                self.save_fic_list(fic_list)
                print('page_num: ' + str(a))
                time.sleep(5)
        return fic_cnt
import logging
logging.basicConfig(level=logging.INFO)


# 注入 500mb 的内存
def exhaust_mem(num=500, unit='MB', duration=3000):
    if unit == 'MB':
        s = ' ' * (num * 1024 * 1024)
    else:
        s = ' ' * (num * 1024 * 1024 * 1024)
    time.sleep(duration)


# 注入 500mb 的内存泄漏
if __name__ == '__main__':
    try:
        threads = []
        thread_num = 1
        for i in range(0, thread_num):
            thread = threading.Thread(target=exhaust_mem, args=(500, 'MB',))
            threads.append(thread)
        for t in threads:
            t.start()
        for t in threads:
            t.join()
    except Exception as e:
        logging.exception(e)
        logging.CRITICAL("Error: unable to start thread")


コード例 #28
0
ファイル: photobooth.py プロジェクト: Borck/Photobooth
    def __init__(self):
        # create the card objects
        self.layout = [PhotoCard(), PhotoCard()]

        self.initStateMachine()

        logging.debug("Read Config File")
        self.readConfiguration()

        logging.debug("Config GPIO")
        GPIO.setwarnings(False)
        GPIO.setmode(GPIO.BCM)
        GPIO.setup(self.pin_button_right, GPIO.IN, pull_up_down=GPIO.PUD_UP)
        GPIO.setup(self.pin_button_left, GPIO.IN, pull_up_down=GPIO.PUD_UP)
        if self.pin_flash >= 0 :
            GPIO.setup(self.pin_flash, GPIO.OUT)
        GPIO.add_event_detect(self.pin_button_right, GPIO.FALLING, callback=self.Button2pressed, bouncetime=500)
        GPIO.add_event_detect(self.pin_button_left, GPIO.FALLING, callback=self.Button1pressed, bouncetime=500)

        logging.debug("Set TimeStamp for Buttons")
        self.time_stamp_button1 = time.time()
        self.time_stamp_button2 = time.time()

        self.button1active = False
        self.button2active = False

        logging.debug("Setup Camera")
        # Setup Camera
        try:
            self.camera = picamera.PiCamera()
        except:
            logging.CRITICAL("error initializing the camera - exiting")
            raise SystemExit

        self.camera.resolution = (self.photo_w, self.photo_h)
        self.camera.hflip = self.flip_screen_h
        self.camera.vflip = self.flip_screen_v
        self.startpreview()

        self.photonumber = 1

        self.cycleCounter = 0

        # load the Logo of the Photobooth and display it
        self.overlayscreen_logo = self.overlay_image_transparency(self.screen_logo, 0, 5)

        # find the USB Drive, if connected
        self.PhotoCopyPath = self.GetMountpoint()

        # path for saving photos on usb drive
        if self.PhotoCopyPath is not None:
            self.PhotoCopyPath = self.PhotoCopyPath + "/Fotos"
            logging.debug("Photocopypath = " + self.PhotoCopyPath)
            if not os.path.exists(self.PhotoCopyPath):
                logging.debug("os.mkdir(self.PhotoCopyPath)")
                os.mkdir(self.PhotoCopyPath)
        else:
            logging.debug("self.PhotoCopyPath not Set -> No USB Drive Found")

        # find the USB Drive with card layout configuration
        self.CardConfigFile = self.GetMountpoint()

        # read card config data
        if self.CardConfigFile is not None:
            self.CardConfigFile = self.CardConfigFile + '/Fotobox/card.ini'
            logging.debug("Config file for Card creating:")
            logging.debug(self.CardConfigFile)

        else:
            self.CardConfigFile = os.path.join(REAL_PATH, 'Templates/Default/card.ini')
            logging.debug("Default Config file for Card creating:")
            logging.debug(self.CardConfigFile)

        # read card configuration if config exists
        if not os.path.exists(self.CardConfigFile):
            self.CardConfigFile = os.path.join(REAL_PATH, 'Templates/Default/card.ini')

        # load the Card Layout
        self.readCardConfiguration(self.CardConfigFile)

        # Start the Application
        self.on_enter_PowerOn()
コード例 #29
0
ファイル: __main__.py プロジェクト: Avanade/piControllerMenu
# Author: Thor Schueler
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
import logging
from controllerMenu import ControllerMenu

logging.basicConfig(level=logging.DEBUG)
try:
    if __name__ == '__main__':
        menu = ControllerMenu()
        menu.Run()
except:
    logging.CRITICAL("Oops! Exception occured:", exc_info=True)
コード例 #30
0
ファイル: HtmlReport.py プロジェクト: japfei/permcrawl
def generate_reports_from_json(indir, outdir=""):
    # Returns a DataFrame object representing the basic analysis report (overview)
    # If outdir is specified it creates HTML reports

    # Need to deactivate the linewidth limit
    pd.set_option('display.max_colwidth', -1)

    basic_reports = dict()
    analyzed_apps = 0
    analyzable_apps = 0

    for file in os.listdir(indir):
        if file.endswith(".json"):
            try:
                json_file = open(os.path.realpath(indir) + '/' + file, "r")
                # Format a single report for each JSON file
                app_dict = format_single_analysis(json.loads(json_file.read()))

                analyzed_apps += 1

                # If outdir specified create report in it
                report = generate_report(
                    app_dict, ((outdir + "reports/") if outdir else ""))
                del app_dict

                basic_reports.update(report)

                try:
                    d = pd.DataFrame(
                        basic_reports,
                        index=[
                            "Analyzable", "Target SDK", "Declared Permissions",
                            "Permissions asked up-front",
                            "Permissions educated up-front",
                            "Permissions asked in-context",
                            "Permissions educated in-context",
                            "Non-backtracable asked permissions",
                            "Non-backtracable educated permissions"
                        ]).T
                    # For testing purposes (does not influence runtime heavily)
                    x = d.to_html(escape=False)
                    del d
                    del x

                except:
                    logging.CRITICAL(
                        "Error when creating DataFrame caused by report: %s" %
                        report)

                # Can only be a set with one element
                report_key = list(report.keys())[0]

                if report[report_key]["Analyzable"]:
                    analyzable_apps += 1

                json_file.close()

            except:
                print("Could not load %s" % file)

    # Change orientation s.t. app names form lines
    df = pd.DataFrame(basic_reports).T

    if outdir:
        # Write overall report to output directory
        with open(os.path.realpath(outdir) + "/index.html", "w") as index:
            try:
                report_html = df.to_html(escape=False)
                html = "<h1>Report from %s</h1>\n" \
                    "<p>Out of overall %d apps, %d were successfully analyzed.</p>\n" \
                    "<h2>Report Overview:</h2>\n" \
                    "%s" % (datetime.now().strftime('%Y-%m-%d %H:%M:%S'), analyzed_apps, analyzable_apps,
                           report_html)
                index.write(html)
                return df

            except:
                (ex_type, ex_value, tb) = sys.exc_info()
                traceback.print_exception(ex_type, ex_value, tb)
                logging.error("Analyzed apps: %d" % analyzed_apps)
                logging.error("Error when writing the overall HTML Report")
    else:
        return df