from dl_model import Net
import urllib3
module_end = time.time()
print("[TimeMeasurement] module loaded time : ", module_end - module_start)

from watcher import Watcher
watching = Watcher(td=True)

model, _ = watching.watch(pickle.load, [open('torch_model.pkl', 'rb')],
                          name='torch_loaded')
model = model.double()
print("[BreakTime] Torch no need model read time")

from watcher import Watcher
watching = Watcher(td=True)
http = urllib3.PoolManager()

ImageFile.LOAD_TRUNCATED_IMAGES = True


def image_decoder(baseImg):
    x = base64.b64decode(baseImg)  # base64 to bytes
    splited_x = x.split(b'\r\n')  # split bytes
    data_decoded_bytes = splited_x[4]
    print(splited_x)
    try:
        data_decoded_bytes.decode('utf-8')
        data_decoded_bytes = base64.b64decode(data_decoded_bytes)
        print(data_decoded_bytes)
    except:
        print("need not decode base64.")
コード例 #2
0
import urllib3
import json
import re
import os
from . import exception, _isstring

# Suppress InsecurePlatformWarning
urllib3.disable_warnings()

_pools = {
    'default': urllib3.PoolManager(num_pools=3,
                                   maxsize=10,
                                   retries=3,
                                   timeout=30),
}

_onetime_pool_spec = (urllib3.PoolManager,
                      dict(num_pools=1, maxsize=1, retries=3, timeout=30))


def _create_onetime_pool():
    cls, kw = _onetime_pool_spec
    return cls(**kw)


def _methodurl(req, **user_kw):
    token, method, params, files = req
    return 'https://api.telegram.org/bot%s/%s' % (token, method)


def _which_pool(req, **user_kw):
コード例 #3
0
def write_to_walkup_db(data_dir='data',
                       fname='WalkUpMusic.csv',
                       overwrite=False,
                       music_lookup=False):
    # Get team IDs
    teams_df = pd.read_csv(
        'https://raw.githubusercontent.com/chadwickbureau/baseballdatabank/master/core/Teams.csv'
    )
    teams2_df = teams_df[teams_df.yearID == max(teams_df.yearID)].filter(
        items=['name', 'teamIDBR'])
    # Some of the shorthands don't match, so correct by hand
    teams2_df.replace(['CHW', 'KCR', 'SDP', 'SFG', 'TBR', 'WSN'],
                      ['CWS', 'KC', 'SD', 'SF', 'TB', 'WSH'])

    # Query MLB.com, each team has it's own walk up music page
    http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',
                               ca_certs=certifi.where())
    d = []
    url_prefix = 'https://www.mlb.com/entertainment/walk-up/'
    for idx, team in teams2_df.iterrows():
        print(team)
        url = url_prefix + team['teamIDBR']
        r = http.request('GET', url)
        sleep(
            2
        )  # Waiting 2 seconds between pages allows time for data to load for whatever reason
        html_decode = r.data.decode('utf-8')
        soup = BeautifulSoup(html_decode, 'html.parser')

        # For each player, find the corresponding song title and artist
        table_rows = soup.findAll('tr')
        for jdx, row in enumerate(table_rows):
            name = None
            song_artist = None
            song_name = None
            if row.find('p', class_="player-name") is not None:
                name = row.find('p', class_="player-name").string.strip()
                if ' - ' in name:
                    name = name.split(' - ')[0]

                # HTML is inconsistently written,
                # usually span has a class but sometimes...
                if row.find('span', class_="song-artist") is not None:
                    song_artist = row.find('span', class_="song-artist").string
                    song_title = row.find('span', class_="song-title").string
                    lang = 'en'  # english unless otherwise specified

                # Song title/artist is in one tag, and that needs to be parsed
                else:
                    div = row.find('div', class_="song-name")
                    if len(div.findAll('span')) == 0:
                        continue
                        # If nothing in the span, no walk up music
                        song_artist = None
                        song_title = None
                        lang = None
                    else:
                        song_artist = div.find('span').string
                        if len(div.findAll('span')) == 2:
                            # If there are two spans with song & artist in each
                            song_title = row.findAll('span')[1].string
                            lang = row.findAll('span')[1]['lang'].lower()
                        else:
                            # If there is only one span with artist outside of span
                            div_sib = div.find('span').next_sibling
                            lang = 'en'
                            if div_sib is not None:
                                song_title = div_sib.string.strip()
                            else:
                                song_title = song_artist
                                song_artist = None

                # Finally, add player's information to a dict
                d.append({
                    'Team_Name': team['name'],
                    'Team_ID': team['teamIDBR'],
                    'Player_Name': name,
                    'Song_Artist': song_artist,
                    'Song_Title': song_title,
                    'Language': lang,
                    'Date_Updated': datetime.datetime.today().date()
                })

    # Save to a .csv file (this will be the database for now until it gets to big)
    data_path = data_dir + '/' + fname
    if not os.path.isfile(data_path) or overwrite:
        if not os.path.exists(data_dir):
            os.makedirs(data_dir)
        pd.DataFrame(d).to_csv(data_path, index=False)
    else:
        old_d = pd.read_csv(data_path)
        old_d.append(d).to_csv(data_path, index=False)

    return pd.DataFrame(d)
コード例 #4
0
def program2():
    http = urllib3.PoolManager()
    url = sehir2
    response = http.request('GET', url)
    soup = BeautifulSoup(response.data, 'lxml')

    dfff = pd.read_html(sehir2)
    saatler2 = []

    for table in soup.findAll('div', attrs={'class': 'race-block__time'}):
        for td in table.findAll('ul', attrs={'class': None}):
            for a in td.findAll('a', attrs={'class': None}):
                saatler2.append(a['href'])
    for b in range(0, len(saatler2)):
        df2 = pd.read_html(saatler2[b])
        tarih2 = []
        for table in soup.findAll(
                'table',
                attrs=
            {
                'class':
                'table-format-normal perfomance-border perfomance-table-style'
            }):
            for td in table.findAll('td', attrs={'class': None}):
                for a in td.findAll('a', attrs={'class': None}):
                    tarih2.append(a['href'])
        ss2 = []
        for t in range(0, len(tarih2)):
            df3 = pd.read_html(tarih2[t])

            o = list(df3[0]['H.P'].values)
            #o.sort()
            ss2.append(o)

            deneme2 = []
        for j in range(0, len(df2)):

            for k in range(1, len(df2[j])):
                tot = len(df2[j]) + len(df2[j - 1])

                if j % 2 == 0:
                    continue
                if len(df2[j].values[0]) < 8:
                    continue

                if j == 1:
                    d = ({
                        'Tarih': df2[j].values[k][1:][0],
                        'Sehir': df2[j].values[k][2:][0],
                        'Şart:': df2[j].values[k][3:][0],
                        'Pist': df2[j].values[k][4:][0],
                        'PD': df2[j].values[k][5:][0],
                        'KG': df2[j].values[k][6:][0],
                        'Jokey': df2[j].values[k][7:][0],
                        'SN': df2[j].values[k][8:][0],
                        'S/K': df2[j].values[k][9:][0],
                        'Derece': df2[j].values[k][10:][0],
                        'Takı': df2[j].values[k][11:][0],
                        'D/800': df2[j].values[k][12:][0],
                        'Tabela & B. Farkları': df2[j].values[k][13:][0],
                        'AGF': df2[j].values[k][14:][0],
                        'H.P': df2[j].values[k][15:][0],
                        'İkr': df2[j].values[k][16:][0],
                        'yarış saati': b,
                        'Tablo': j,
                        'Geçmiş H.P': ss2[len(df2[j]):][k - 1]
                    })

                d = ({
                    'Tarih': df2[j].values[k][1:][0],
                    'Sehir': df2[j].values[k][2:][0],
                    'Şart:': df2[j].values[k][3:][0],
                    'Pist': df2[j].values[k][4:][0],
                    'PD': df2[j].values[k][5:][0],
                    'KG': df2[j].values[k][6:][0],
                    'Jokey': df2[j].values[k][7:][0],
                    'SN': df2[j].values[k][8:][0],
                    'S/K': df2[j].values[k][9:][0],
                    'Derece': df2[j].values[k][10:][0],
                    'Takı': df2[j].values[k][11:][0],
                    'D/800': df2[j].values[k][12:][0],
                    'Tabela & B. Farkları': df2[j].values[k][13:][0],
                    'AGF': df2[j].values[k][14:][0],
                    'H.P': df2[j].values[k][15:][0],
                    'İkr': df2[j].values[k][16:][0],
                    'yarış saati': b,
                    'Tablo': j,
                    'Geçmiş H.P': ss2[tot:][k]
                })

                deneme2.append(d)

        deneme2 = pd.DataFrame(deneme2)
        ad2 = 'şehir2 yaris{0}.xlsx'.format(b)
        ad2 = str(ad2)
        writer2 = pd.ExcelWriter(ad2)
        deneme2.to_excel(writer2, 'Sheet{0}')
        writer2.save()
コード例 #5
0
ファイル: check_httpcode_status.py プロジェクト: z-ge/zabbix
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import urllib3
import certifi
import sys

if len(sys.argv) < 2:
    print("Usage: %s web_url" % sys.argv[0])
    print("example: %s http://www.baidu.com" % sys.argv[0])
    sys.exit(1)

weburl = sys.argv[1]
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',ca_certs=certifi.where())
try:
    r = http.request('GET',weburl,retries=2,timeout=3.0)
except urllib3.exceptions.HTTPError:
    print('Connection failed.')
else:
    print(r.status)
コード例 #6
0
ファイル: stock_1.py プロジェクト: TheRaptors/Fund
def send_message(msg, robot = robot):
    headers = {'Content-Type': 'application/json'}
    data = json.dumps({"msgtype": "markdown", "markdown": {"content": msg}})
    http = urllib3.PoolManager(cert_reqs = 'CERT_REQUIRED', ca_certs = certifi.where())
    send = http.request(method = 'POST', url = robot, body = data, headers = headers)
    print(send.data.decode())
コード例 #7
0
def submit(data):
    global __submit_pool
    if  "PRODUCTION" in os.environ:
        if __submit_pool is None:
            __submit_pool = urllib3.PoolManager(maxsize=32)
        
        res = None
        while True:
            try:
                v = __submit_pool.request(
                    "POST", "http://gather.service/data",
                    body=json.dumps(data, ensure_ascii=False).encode("utf-8"),
                    headers={
                        'Content-Type': 'application/json',
                        'Authorization': os.environ["SPIDER_NAME"] if "SPIDER_NAME" in os.environ else "unknown"
                    }
                )
                str_data = v.data.decode("utf-8")
                res = json.loads(str_data)
            except UnicodeDecodeError as e:
                print(res.data, e, "decode error retry after 5 seconds")
                time.sleep(5)
                continue
            except json.JSONDecodeError as e:
                print( str_data, e, "json decode error retry after 5 seconds")
                time.sleep(5)
                continue
            except urllib3.exceptions.MaxRetryError as e:
                print(e, "retry after 5 seconds")
                time.sleep(5)
                continue
            break
        return res
    else:
        from .checker import checkHTML
        for kw in ["lang", "src", "cat", "subcat", "body", "meta"]:
            if kw not in data:
                return {
                    "code": 2,
                    "msg": "failed to parse json",
                    "_": "required key `%s`" % kw
                }
        if data["lang"] not in ["zh", "en"]:
            return {
                "code": -1,
                "msg": "invalid language",
                "_": data["lang"]
            }
        v = checkHTML(data["body"])
        if v.ok:
            return {
                "code": 0,
                "msg": "ok",
                "_": v
            }
        else:
            return {
                "code": 3,
                "msg": "Invalid html format",
                "_": v
            }
コード例 #8
0
    def __init__(self, fig):
        self.sysStats = {}
        self.fig = fig
        plt.subplots_adjust(wspace=.3)
        self.ax1 = fig.add_subplot(221,
                                   projection='3d',
                                   xlim=(-sysStatsLogLengthInSecs, 0),
                                   zlim=(0, 100),
                                   xlabel='time (s)',
                                   ylabel='CPU#',
                                   zlabel='CPU%')
        self.ax1.set_zlabel('CPU%', rotation='vertical')
        self.ax1props = {
            'xlim': (-sysStatsLogLengthInSecs, 0),
            'zlim': (0, 100),
            'xlabel': 'time (s)',
            'ylabel': 'CPU#',
            'zlabel': 'CPU%'
        }
        self.ax2 = fig.add_subplot(222,
                                   xlim=(-sysStatsLogLengthInSecs, 0),
                                   ylim=(0, 100),
                                   xlabel='time (s)',
                                   ylabel='T$_{CPU}$ ($^o$C)')
        self.ax2.yaxis.label.set_color('r')
        self.ax2.tick_params(axis='y', colors='r')
        self.ax3 = self.ax2.twinx()
        self.ax3.set(ylim=(0, 100), ylabel='mem%')
        self.ax3.yaxis.tick_left()
        self.ax3.yaxis.set_label_position('left')
        self.ax3.yaxis.label.set_color('c')
        self.ax3.tick_params(axis='y', colors='c')
        for axItr in [self.ax2]:
            axItr.grid(True, alpha=.5)
            axItr.yaxis.set_label_position('right')
            axItr.yaxis.tick_right()


#         self.ax4 = fig.add_subplot(224, projection=ccrs.NearsidePerspective(central_longitude=-74, central_latitude=40.5))
        self.ax4 = plt.subplot2grid((2, 2), (1, 0),
                                    colspan=2,
                                    projection=ccrs.Robinson())
        self.ax4TimeToUpdate = datetime.now()
        self.ax4TimeToGeoLoc = datetime.now()
        self.ax4GeoLoc = CitiesCoords['New York']
        # initialize
        self.sysStats = updateSysStats(self.sysStats)
        self.lines = [[], {}, [], []]
        sysStatsX = extractSysStats(self.sysStats, 'time')
        #CPU percentage
        cpuPercent = extractSysStats(self.sysStats, 'cpuPercent')
        #CPU temperature
        cpuTempCel = extractSysStats(self.sysStats, 'cpuTempCel')
        line, = self.ax2.plot(sysStatsX, cpuTempCel, '2:r', ms=3, lw=.5)
        self.lines[1]['cpuTempCel'] = line
        #memory percertange
        memPercent = extractSysStats(self.sysStats, 'memPercent')
        line, = self.ax3.plot(sysStatsX, memPercent, '.-c', ms=3, lw=.5)
        self.lines[1]['memPercent'] = line
        #globe
        #         self.ax4.stock_img()
        self.http = urllib3.PoolManager()
コード例 #9
0
ファイル: web_utilities.py プロジェクト: cgd1/Artemis
def get_pool_manager():
    """Return a urllib3.PoolManager object."""
    return urllib3.PoolManager(ca_certs=get_cacert_file())
コード例 #10
0
def get_request(url,
                no_json=False,
                is_api_request=False,
                get_response_code=False,
                is_octoprint_request=False):
    global http, last_request_response_code, op_down_check

    url = url.replace(" ", "%20")
    response_code = None

    hdr = {
        'User-Agent':
        'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 '
        + 'Safari/537.11',
        'Accept':
        'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Accept-Charset':
        'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
        'Accept-Encoding':
        'none',
        'Accept-Language':
        'en-UK,en;q=0.8',
        'Connection':
        'keep-alive'
    }

    try:
        if IS_PY3:
            if http is None:
                http = urllib3.PoolManager()

            the_request = http.request("GET", url, headers=hdr)

            response_code = the_request.status

            if not get_response_code:
                content = the_request.data
            else:
                content = response_code
        else:
            req = urllib2.Request(url, headers=hdr)
            the_request = urllib2.urlopen(req)

            response_code = the_request.getcode()

            if not get_response_code:
                content = the_request.read()
            else:
                content = the_request.getcode()

        the_request.close()
    except Exception as e:
        log("[ERROR] - Failed to request url; " + str(url) + ". Error is; " +
            str(e))

        if "403" in str(e) and is_api_request:
            log("API request is forbidden - API key probably no longer valid, getting the new"
                )
            get_octoprint_api_key()
            website_ping_update("&octoprint_api_key=" + octoprint_apikey())

        down_to_set = ""

        if "503" in str(e) and is_api_request:
            if not config.getboolean("info", "octoprint_down"):
                op_down_check = 0
                down_to_set = "True"
                log("OctoPrint is down (503: Service Unavailable) - waiting to make sure it's really down, "
                    "then restarting")
                website_ping_update("&octoprint_status=Shutdown")
            else:
                op_down_check += 1
                if op_down_check > 6:
                    down_to_set = "False"
                    log("OctoPrint is STILL down - restarting the OctoPrint service"
                        )
                    os.system("sudo service octoprint restart")

            if down_to_set != "":
                set_config_key("info", "octoprint_down", down_to_set)
                set_config()

        return False

    last_request_response_code = response_code

    # Check if OctoPrint is OK
    if is_octoprint_request and response_code is not None and int(
            response_code) in [500, 503]:
        # OctoPrint is down
        if not config.getboolean("info", "octoprint_down"):
            down_to_set = "True"
            log("OctoPrint is down - waiting to make sure it's really down, "
                "then restarting")
        else:
            down_to_set = "False"
            log("OctoPrint is STILL down - restarting the OctoPrint service")
            os.system("sudo service octoprint restart")

        set_config_key("info", "octoprint_down", down_to_set)
        set_config()

        website_ping_update("&octoprint_status=Shutdown")
        return False

    if no_json:
        return content
    else:
        content = content.decode("utf-8")
        if str(content) != "Printer is not operational":
            try:
                json_content = json.loads(content)
                return json_content
            except:
                if response_code == 403:
                    log(
                        "Could not jsonify request to URL; " + str(url) +
                        "; 403 error", "info")

                log("[ERROR] - Failed to jsonify request; " + str(content),
                    "debug")
                return False
        else:
            # Just not connected, not an error
            return False
コード例 #11
0
#----------------------

scrollbar_organism = tk.Scrollbar(group_organism, orient='vertical')

listbox_organism = tk.Listbox(group_organism,
                              selectmode='browse',
                              yscrollcommand=scrollbar_organism.set,
                              font=('Tahoma', 10), height=10)

scrollbar_organism.config(command=listbox_organism.yview)
listbox_organism.pack(side='left', fill='both', expand=True)
scrollbar_organism.pack(side='left', fill=tk.Y)

#---------------------------------------

query = urllib3.PoolManager()

lines = query.request('GET','http://rest.kegg.jp/list/genome')

lines = lines.data.decode('UTF-8')

lines = lines.strip().split('\n')

ORGANISMS = []

for line in lines:
        
    if ('; ' in line):
        data,name = line.split('; ')
        
        name = name.strip()
コード例 #12
0
def get_item(home_directory):
    """
    Return all item information from all maps
    :return: item information
    """

    # Log current status of program
    my_tools.log_status('Getting Item Grid')

    # Change directory to HTML pages
    os.chdir(''.join([home_directory, '/HTML Pages']))

    # Create urllib3 pool to download each web page
    http_pool = urllib3.PoolManager()
    main_url = my_tools.get_web_page(page_name='Item',
                                     path='/Items',
                                     http_pool=http_pool)

    # For formatting
    my_tools.log_status('\n')

    # Use the item page and set up parsing
    with my_globals.bs4_lock:
        item_grid_html = BeautifulSoup(markup=main_url, features='lxml')

    # Find the item grid and start to parse
    finished_items_html = item_grid_html.find(id='item-grid')

    # Loop through item grid for each item section
    for cnt, null in enumerate(finished_items_html.contents):

        # Add section to dictionary
        if cnt % 4 == 1:
            # Save current section being worked on
            category = finished_items_html.contents[cnt].text.strip()

            # Skip sections not used by calculator
            if category == 'Potions and Consumables' or \
               category == 'Distributed' or \
               category == 'Removed items' or \
               category == 'Trinkets':
                continue

            # Log status of program
            my_tools.log_status(''.join([
                'Starting Section: ',
                finished_items_html.contents[cnt].text.strip()
            ]))

            # Create entry for current section in global dictionary
            my_globals.item_info[
                finished_items_html.contents[cnt].text.strip()] = {}

        # Search though section for items
        if cnt % 4 == 3:
            # Save current section being worked on
            category = finished_items_html.contents[cnt - 2].text.strip()

            # Skip sections not used by calculator
            if category == 'Potions and Consumables' or \
               category == 'Distributed' or \
               category == 'Removed items' or \
               category == 'Trinkets':
                continue

            # Array to hold threads
            all_item_threads = []

            # Get the page for each item in the category and start to parse
            for item in finished_items_html.contents[cnt]:
                # Save item path and readable names
                item_name = item.contents[0].contents[0].contents[0].get(
                    'href')
                current_item_name = item_name[6:].replace('%27', '\'').replace(
                    '_', ' ')

                # Create thread for each item being parsed
                while True:
                    # Only create a thread if limit has not been exceeded
                    if my_globals.thread_count < my_globals.thread_max:
                        # Signal a new thread is being created
                        with my_globals.counter_lock:
                            my_globals.thread_count += 1

                        # Create thread and process each item
                        thread = threading.Thread(target=get_item_page,
                                                  args=(item, cnt,
                                                        finished_items_html,
                                                        category, http_pool),
                                                  name=current_item_name)

                        # Append current thread to list and start thread
                        all_item_threads.append(thread)
                        thread.start()

                        # Exit loop once processing is done
                        break

                    # Wait if a thread queue is full
                    time.sleep(2)
                # break

            # Wait for all threads to finish processing
            for thread in all_item_threads:
                thread.join()

                # For formatting
                my_tools.log_status('\n')

            #FOR DEBUGGING, STOP AFTER FIRST SECTION
            # break
    #FOR DEBUGGING, CREATE LOCAL COPY AS GLOBAL VARIABLE DOES NOT SHOW UP IN THE DEBUGGER
    temp = my_globals.item_info.copy()
    return
コード例 #13
0
ファイル: compat.py プロジェクト: oppianmatt/scout_apm_python
def urllib3_cert_pool_manager(**kwargs):
    if sys.version_info >= (3, 0):
        CERT_REQUIRED = "CERT_REQUIRED"
    else:
        CERT_REQUIRED = b"CERT_REQUIRED"
    return urllib3.PoolManager(cert_reqs=CERT_REQUIRED, ca_certs=certifi.where())
コード例 #14
0
def worker(filepath):
    # initialize saucenao
    saucenao_core = SauceNao(
        directory='directory',
        databases=9,
        # 999 by default, 5 for pixiv, 9 for booru.
        minimum_similarity=65,
        combine_api_types=False,
        api_key=saucenao_api_key,
        exclude_categories='',
        move_to_categories=False,
        use_author_as_category=False,
        output_type=SauceNao.API_JSON_TYPE,
        start_file='',
        log_level=logging.ERROR,
        title_minimum_similarity=90)
    # search image on saucenao
    try:
        result = search(saucenao_core, filepath)
    except requests.exceptions.ConnectionError:
        print("Failed to connect saucenao!")
        return -1
    except saucenao.exceptions.DailyLimitReachedException:
        print("Saucenao daily limit reached! try 1 hour later!")
        return -2
    if (len(result) is 0):
        print('Image not found on danbooru!')
        return 1
    else:
        danbooru_id = result[0]['data']['danbooru_id']
        print('Image Found, ID=' + str(danbooru_id))
        # GET danbooru tag json
        try:
            http = urllib3.PoolManager()
            # disable  https cert check warning
            urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
            url = 'https://danbooru.donmai.us/posts/' + str(
                danbooru_id) + '.json'
            headers = urllib3.util.make_headers(basic_auth=danbooru_login +
                                                ':' + danbooru_api_key)
            r = http.request('GET', url, headers=headers)
            r_data = r.data
            if isinstance(r_data, bytes):
                r_data = str(r_data, 'utf-8')
            tags = json.loads(r_data)['tag_string']
            taglist = tags.split()
        except requests.exceptions.ConnectionError:
            print('failed to GET tag data from danbooru')
            return -1
        # Write XMP Metadata to image
        xmpfile = XMPFiles(file_path=filepath, open_forupdate=True)
        xmp = xmpfile.get_xmp()
        # if image has no xmp data, create one
        if (xmp is None):
            xmp = XMPMeta()
        # write the tags
        for each in taglist:
            # check whether XMP includes 'subject' property,
            # if not, create a new one
            if (not xmp.does_property_exist(consts.XMP_NS_DC, 'subject')):
                xmp.append_array_item(consts.XMP_NS_DC, 'subject', each, {
                    'prop_array_is_ordered': True,
                    'prop_value_is_array': True
                })
            # check whether tag has been written to file
            if (not xmp.does_array_item_exist(consts.XMP_NS_DC, 'subject',
                                              each)):
                xmp.append_array_item(consts.XMP_NS_DC, 'subject', each)
        if (xmpfile.can_put_xmp(xmp)):
            xmpfile.put_xmp(xmp)
            xmpfile.close_file()
            return 0
        else:
            print('Unable to write XMP data!')
            return -1
コード例 #15
0
import time
import urllib3
from Queue import Queue
from urllib import urlencode
from threading import Lock, current_thread
from django.conf import settings
from django.core.cache import cache
from graphite.intervals import Interval, IntervalSet
from graphite.node import LeafNode, BranchNode
from graphite.readers import FetchInProgress
from graphite.logger import log
from graphite.util import unpickle, logtime, timebounds
from graphite.render.hashing import compactHash
from graphite.worker_pool.pool import get_pool

http = urllib3.PoolManager(num_pools=10, maxsize=5)


def prefetchRemoteData(remote_stores, requestContext, pathExpressions):
    if requestContext['localOnly']:
        return

    if requestContext is None:
        requestContext = {}

    (startTime, endTime, now) = timebounds(requestContext)
    log.info(
        'thread %s prefetchRemoteData:: Starting fetch_list on all backends' %
        current_thread().name)

    # Go through all of the remote nodes, and launch a fetch for each one.
コード例 #16
0
def jasonNewFileCheck(satName, ogdr_cycles_page_URL):
    '''function to check for new jason 2/3 file
	it will keep downloading and processing files from the last downloaded file
	until the latest file'''

    print("***************** CHECKING FOR NEW FILE OF", satName,
          "********************")
    print("URL of cycles web page :", ogdr_cycles_page_URL)
    print()

    #getting information about last downloaded file
    print("opening last loaded file :", satName + '_last.txt ....')
    with open(op.join(app_dir, 'data', 'last', satName + '_last.txt'),
              'r') as last:
        lcycle = last.readline()[:-1]  #last accessed cycle
        lfile = last.readline()[:-1]  #last downloaded file name
    print("last downloaded file loaded")
    print("last cycle =", lcycle)
    print("last file =", lfile)
    print()

    #creating PoolManager
    print("creating PoolManager (connection timout= 15.0s)")
    http = ulib.PoolManager(timeout=ulib.Timeout(connect=15.0))

    #downlaoding the page with list of cylces
    print("donwloading webpage with list of cycles ....")
    ogdr_cycles_page = http.request('GET', ogdr_cycles_page_URL)
    # saving the html in a file
    with open(op.join(app_dir, 'data', 'temp', 'ogdr_cycles_page.html'),
              'wb') as f:
        f.write(ogdr_cycles_page.data)
    print("download complete.")
    print()

    #opening the cycles file for reading
    print("opening HTML file for reading")
    with open(op.join(app_dir, 'data', 'temp', 'ogdr_cycles_page.html'),
              'r') as html:
        cflag = False
        for line in html:
            flag = True
            if 'cycle' in line:
                cflag = True
                if lcycle not in line:
                    continue
                flag = False
                #last accessed cycle link found
                #downloading the corresponding cycle web page and saving the web page
                print("cycle found :", lcycle)
                print("downloading webpage...")
                lcycle_page = http.request('GET',
                                           ogdr_cycles_page_URL + lcycle)
                with open(op.join(app_dir, 'data', 'temp', 'lcycle.html'),
                          'wb') as f:
                    f.write(lcycle_page.data)
                print("download complete.")
                print("opening HTML file and passing to checkData")
                with open(op.join(app_dir, 'data', 'temp', 'lcycle.html'),
                          'r') as f:
                    lfile = checkData(satName, lcycle,
                                      ogdr_cycles_page_URL + lcycle, f, lfile)
                print("checking for new files in ", lcycle, "done.")
                print("latest downloaded file :", lfile)
                print()

                #updating lcycle variable
                num = int(lcycle[5:])
                num = num + 1
                t = num
                count = 0
                while t > 0:  #counting digits in num
                    t = t // 10
                    count = count + 1
                if count == 1:
                    lcycle = 'cycle00' + str(num)
                elif count == 2:
                    lcycle = 'cycle0' + str(num)
                elif count == 3:
                    lcycle = 'cycle' + str(num)
            if cflag and flag and 'cycle' not in line:
                #decrementing lcycle variable it is greater than available cycles
                num = int(lcycle[5:])  #cycle number
                num = num - 1
                t = num
                count = 0
                while t > 0:  #counting digits in num
                    t = t // 10
                    count = count + 1
                if count == 1:
                    lcycle = 'cycle00' + str(num)
                if count == 2:
                    lcycle = 'cycle0' + str(num)
                elif count == 3:
                    lcycle = 'cycle' + str(num)
                print("no more new cycles available")
                print("last downloaded cycle :", lcycle)
                print()
                break
            if cflag:
                print("next cycle to check :", lcycle)
                c = input("want to download new cycle ? (y/n) > ")
                if c == 'y' or c == 'Y':
                    continue
                else:
                    break

    # writing updates to JAx_last.txt
    print("writing last downloads info to", satName + '_last.txt')
    print('lcycle={} lfile={}'.format(lcycle, lfile))
    with open(op.join(app_dir, 'data', 'last', satName + '_last.txt'),
              "w") as f:
        f.write(lcycle + '\n')
        f.write(lfile + '\n')
    print('write complete')
    print()
    print("*************** NEW FILE CHECK COMPLETED FOR", satName,
          "******************")
    print()
    return