Пример #1
0
def task5():

    app = QtCore.QCoreApplication.instance()
    if app is None:
        app = QtWidgets.QApplication(sys.argv)
    window = FileDownloader()
    window.show()

    app.exec_()
Пример #2
0
def catalystTextParse(path, overallTime):
    with open(path, "r") as catalystDescFile:
        lines = catalystDescFile.readlines()
    entryList = []

    nameList = []
    secondsList = []
    for line in lines:
        sections = line.split(' - ')
        timeString = sections[0]
        name = sections[1]
        name = name.replace("\n", "")
        seconds = timestring_to_seconds(timeString)
        nameList.append(name)
        secondsList.append(seconds)

    assert len(nameList) == len(secondsList)
    startTime = 0
    ## add last time
    secondsList.append(overallTime)
    for index in range(1, len(secondsList)):
        # will go through end times for all if last time appended
        currentEnd = secondsList[index]
        name = nameList[index - 1]
        artist = "Solar Fields"
        entry = FileDownloader.Entry(name, artist, index, startTime,
                                     currentEnd)
        entryList.append(entry)
        startTime = currentEnd

    return entryList
def tp_vs(mask_filepath, variable, mask=None, longname=""):
    """
    df = dp.download_data(mask_filepath)
    df_var = df[['time','tp', variable]]
    #df_mean = df_var.groupby('time').mean()
    """

    if mask == None:
        df = dd.download_data(mask_filepath)
    else:
        cds_filepath = fd.update_cds_monthly_data()
        da = dd.apply_mask(cds_filepath, mask)
        df = da.to_dataframe().reset_index()

    df = df[["time", "tp", variable]]
    # gilgit = ds.interp(coords={'longitude':74.4584, 'latitude':35.8884 }, method='nearest')
    df_var = df.dropna()

    # Plot
    df_var.plot.scatter(x=variable, y="tp", alpha=0.2, c="b")
    plt.title("Upper Indus Basin")
    plt.ylabel("Total precipitation [m/day]")
    plt.xlabel(longname)
    plt.grid(True)
    plt.show()
def cds_downloader(basin, ensemble=False, all_var=False):
    """ Return CDS Dataframe """

    if ensemble == False:
        cds_filepath = fd.update_cds_monthly_data(area=basin)
    else:
        cds_filepath = fd.update_cds_monthly_data(
            product_type="monthly_averaged_ensemble_members", area=basin)

    da = xr.open_dataset(cds_filepath)
    if "expver" in list(da.dims):
        da = da.sel(expver=1)

    multiindex_df = da.to_dataframe()
    cds_df = multiindex_df.reset_index()

    return cds_df
def indice_downloader(all_var=False):
    """ Return indice Dataframe """

    nao_url = "https://www.psl.noaa.gov/data/correlation/nao.data"
    n34_url = "https://psl.noaa.gov/data/correlation/nina34.data"
    n4_url = "https://psl.noaa.gov/data/correlation/nina4.data"

    n34_df = fd.update_url_data(n34_url, "N34")

    if all_var == False:
        ind_df = n34_df.astype("float64")
    else:
        nao_df = fd.update_url_data(nao_url, "NAO")
        n4_df = fd.update_url_data(n4_url, "N4")
        ind_df = n34_df.join([nao_df, n4_df])

    return ind_df
Пример #6
0
 def __init__(self, host='https://ilearn2.fcu.edu.tw', lan='繁體中文'):
     super(iLearnManager, self).__init__()
     self.web = requests.Session()
     self.NID = ""
     self.Pass = ""
     self.courseList = []
     self.host = host
     self.string = language.string()
     self.string.setLanguage(lan)
     self.downloader = {
         "forum/discuss": FileDownloader.discuss(),
         "folder/resource": FileDownloader.folderResource(),
         "resource": FileDownloader.resource(),
         "url": FileDownloader.url(),
         "page": FileDownloader.page(),
         "assign": FileDownloader.assign(),
         "videos": FileDownloader.videos()
     }
     for ele in self.downloader:
         self.downloader[ele].setLanguage(lan)
         self.downloader[ele].signal_downloadNextFile.connect(
             self.finishDownload)
         self.downloader[ele].signal_errorMsg.connect(self.showErrorMsg)
         self.downloader[ele].signal_printMsg.connect(self.print)
         self.downloader[ele].signal_setStatusProcessBar.connect(
             self.setStatusProcessBar)
         self.downloader[ele].signal_setStatusBarText.connect(
             self.setStatusBarText)
Пример #7
0
def download_current_warnings(shapefile_output_directory, logg):

    logg.write_info("calling FileDownloader")
    url = NOAA_URL
    file_downloader = FileDownloader.FileDownloader(
        url, shapefile_output_directory, "current_warnings.zip", True)
    if file_downloader.file_downloaded():
        logg.write_info(
            "downloaded successfully: %s" %
            os.path.join(shapefile_output_directory, "current_warnings.zip"))
    else:
        logg.write_error("current_warnings.zip not downloaded successfully")
        return None

    return file_downloader
    def eof_formatter(filepath, basin, name=None):
        """ Returns DataFrame of EOF over UIB  """

        da = xr.open_dataset(filepath)
        if "expver" in list(da.dims):
            da = da.sel(expver=1)
        (latmax, lonmin, latmin, lonmax) = fd.basin_extent(basin)
        sliced_da = da.sel(latitude=slice(latmin, latmax),
                           longitude=slice(lonmin, lonmax))

        eof_ds = sliced_da.EOF
        eof2 = eof_ds.assign_coords(time=(eof_ds.time.astype("datetime64")))
        eof_multiindex_df = eof2.to_dataframe()
        eof_df = eof_multiindex_df.dropna()
        eof_df.rename(columns={"EOF": name}, inplace=True)
        return eof_df
Пример #9
0
def dubstepParse(description, overallTime):
    entryList = []

    validLines = []
    lines = description.splitlines()
    pattern = re.compile("[a-zA-Z]\s([0-9]:)?[0-9]{1,2}:[0-9]{1,2}$"
                         )  #Killigrew - Timeless As The Waves 1:44:58
    for line in lines:
        matchobj = pattern.search(line)
        if matchobj != None:
            validLines.append(line)

    nameList = []
    artistList = []
    secondsList = []
    for line in validLines:
        index = line.rindex(' ')
        prefixBlob = line[:index].split(' - ')
        name = prefixBlob[1]
        artist = prefixBlob[0]
        timeString = line[index:]
        seconds = timestring_to_seconds(timeString)
        nameList.append(name)
        artistList.append(artist)
        secondsList.append(seconds)

    assert len(nameList) == len(secondsList)
    startTime = 0
    ## add last time
    secondsList.append(overallTime)
    for index in range(1, len(secondsList)):
        # will go through end times for all if last time appended
        currentEnd = secondsList[index]
        name = nameList[index - 1]
        artist = artistList[index - 1]
        entry = FileDownloader.Entry(name, artist, index, startTime,
                                     currentEnd)
        entryList.append(entry)
        startTime = currentEnd

    return entryList
import cartopy.crs as ccrs
import matplotlib.cm as cm
import cartopy.feature as cf
import matplotlib.pyplot as plt
import matplotlib.ticker as tck

from shapely.geometry import Polygon, shape, LinearRing
from cartopy.io import shapereader
from cartopy import config
from scipy import signal

import FileDownloader as fd
import DataPreparation as dp
import DataDownloader as dd

data_filepath = fd.update_cds_monthly_data()
mask_filepath = "Data/ERA5_Upper_Indus_mask.nc"
"""
tp_filepath = 'Data/era5_tp_monthly_1979-2019.nc'
mpl_filepath = 'Data/era5_msl_monthly_1979-2019.nc'
"""


def sample_timeseries(data_filepath,
                      variable="tp",
                      longname="Total precipitation [m/day]"):
    """ Timeseries for Gilgit, Skardu and Leh"""

    da = xr.open_dataset(data_filepath)
    if "expver" in list(da.dims):
        print("expver found")
Пример #11
0
import time
import json
from datetime import datetime
import CsvWriter
import FileDownloader

import logging

logging.basicConfig(level=logging.DEBUG,
                    format='%(asctime)s - %(levelname)s - %(message)s')

if __name__ == "__main__":
    writer = CsvWriter.CsvWriter()
    error_writer = CsvWriter.CsvWriter()
    downloader = FileDownloader.FileDownloader()

    writer.header = ['name', 'time', 'bank_name',
                     'account_no', 'ip_address', 'country']
    writer.path = './data.csv'

    error_writer.header = ['name', 'time', 'bank_name',
                     'account_no', 'ip_address', 'country']
    error_writer.path = './data.err.csv'

    data = writer.read()

    writer.path = './data.geolocation.csv'

    failed = []

    for i in range(0, len(data)):
Пример #12
0
import FileDownloader
import PageScraper

qconurl = 'https://qconsf.com/schedule/sf2018/tabular'
filepath = '/home/jyang/QCon2018/'

links = PageScraper.getLinks(qconurl)

print('Download started...')
for link in links:
    if (len(link) > 0):
        FileDownloader.doDownload(link.decode("utf-8"), filepath)

print("Download of %d files completed." % len(links))
    # save

    scraper.save_output(output_json_path, output_json, pretty_print=True)

# si pas de téléchargement
if download_files == False:
    scraper.close()

# [START] téléchargement des fichiers ----------------------------------------------------------------------------------------------------

else:

    data = scraper.load_output(output_json_path)

    file_downloader = FileDownloader()

    print(str(len(data)) + " produit(s) scrapé(s)")

    for i in range(len(data)):

        # if i <= 906: continue

        product = data[i]

        if product["product_name_complement"] != "":
            formatted_name = product["product_name"] + " " + product[
                "product_name_complement"]
        else:
            formatted_name = product["product_name"]
Пример #14
0
 fd = FileDownloader({
     'usenetrc':
     opts.usenetrc,
     'username':
     opts.username,
     'password':
     opts.password,
     'quiet':
     (opts.quiet or opts.geturl or opts.gettitle or opts.getthumbnail
      or opts.getdescription or opts.getfilename or opts.getformat),
     'forceurl':
     opts.geturl,
     'forcetitle':
     opts.gettitle,
     'forcethumbnail':
     opts.getthumbnail,
     'forcedescription':
     opts.getdescription,
     'forcefilename':
     opts.getfilename,
     'forceformat':
     opts.getformat,
     'simulate':
     opts.simulate,
     'skip_download':
     (opts.skip_download or opts.simulate or opts.geturl or opts.gettitle
      or opts.getthumbnail or opts.getdescription or opts.getfilename
      or opts.getformat),
     'format':
     opts.format,
     'format_limit':
     opts.format_limit,
     'listformats':
     opts.listformats,
     'outtmpl': ((opts.outtmpl is not None
                  and opts.outtmpl.decode(preferredencoding()))
                 or (opts.format == '-1' and opts.usetitle
                     and u'%(stitle)s-%(id)s-%(format)s.%(ext)s')
                 or (opts.format == '-1' and opts.useliteral
                     and u'%(title)s-%(id)s-%(format)s.%(ext)s')
                 or (opts.format == '-1' and u'%(id)s-%(format)s.%(ext)s')
                 or (opts.usetitle and opts.autonumber
                     and u'%(autonumber)s-%(stitle)s-%(id)s.%(ext)s')
                 or (opts.useliteral and opts.autonumber
                     and u'%(autonumber)s-%(title)s-%(id)s.%(ext)s')
                 or (opts.usetitle and u'%(stitle)s-%(id)s.%(ext)s')
                 or (opts.useliteral and u'%(title)s-%(id)s.%(ext)s')
                 or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s')
                 or u'%(id)s.%(ext)s'),
     'ignoreerrors':
     opts.ignoreerrors,
     'ratelimit':
     opts.ratelimit,
     'nooverwrites':
     opts.nooverwrites,
     'retries':
     opts.retries,
     'continuedl':
     opts.continue_dl,
     'noprogress':
     opts.noprogress,
     'playliststart':
     opts.playliststart,
     'playlistend':
     opts.playlistend,
     'logtostderr':
     opts.outtmpl == '-',
     'consoletitle':
     opts.consoletitle,
     'nopart':
     opts.nopart,
     'updatetime':
     opts.updatetime,
     'writedescription':
     opts.writedescription,
     'writeinfojson':
     opts.writeinfojson,
     'writesubtitles':
     opts.writesubtitles,
     'subtitleslang':
     opts.subtitleslang,
     'matchtitle':
     opts.matchtitle,
     'rejecttitle':
     opts.rejecttitle,
     'max_downloads':
     opts.max_downloads,
     'prefer_free_formats':
     opts.prefer_free_formats,
     'verbose':
     opts.verbose,
 })
def mean_downloader(basin):
    def mean_formatter(filepath, coords=None, name=None):
        """ Returns dataframe averaged data over a optionally given area """

        da = xr.open_dataset(filepath)

        if "expver" in list(da.dims):
            da = da.sel(expver=1)
            da = da.drop(["expver"])

        if coords != None:
            da = da.sel(
                latitude=slice(coords[0], coords[2]),
                longitude=slice(coords[1], coords[3]),
            )

        mean_da = da.mean(dim=["longitude", "latitude"], skipna=True)
        clean_da = mean_da.assign_coords(
            time=(mean_da.time.astype("datetime64")))
        multiindex_df = clean_da.to_dataframe()
        df = multiindex_df  # .reset_index()
        if name != None:
            df.rename(columns={"EOF": name}, inplace=True)

        return df

    # Temperature
    temp_filepath = fd.update_cds_monthly_data(variables=["2m_temperature"],
                                               area=basin,
                                               qualifier="temp")
    temp_df = mean_formatter(temp_filepath)

    # EOFs for 200hPa
    eof1_z200_c = mean_formatter("Data/regional_z200_EOF1.nc",
                                 coords=[40, 60, 35, 70],
                                 name="EOF200C1")
    eof1_z200_b = mean_formatter("Data/regional_z200_EOF1.nc",
                                 coords=[19, 83, 16, 93],
                                 name="EOF200B1")
    eof2_z200_c = mean_formatter("Data/regional_z200_EOF2.nc",
                                 coords=[40, 60, 35, 70],
                                 name="EOF200C2")
    eof2_z200_b = mean_formatter("Data/regional_z200_EOF2.nc",
                                 coords=[19, 83, 16, 93],
                                 name="EOF200B2")

    # EOFs for 500hPa
    eof1_z500_c = mean_formatter("Data/regional_z500_EOF1.nc",
                                 coords=[40, 60, 35, 70],
                                 name="EOF500C1")
    eof1_z500_b = mean_formatter("Data/regional_z500_EOF1.nc",
                                 coords=[19, 83, 16, 93],
                                 name="EOF500B1")
    eof2_z500_c = mean_formatter("Data/regional_z500_EOF2.nc",
                                 coords=[40, 60, 35, 70],
                                 name="EOF500C2")
    eof2_z500_b = mean_formatter("Data/regional_z500_EOF2.nc",
                                 coords=[19, 83, 16, 93],
                                 name="EOF500B2")

    # EOFs for 850hPa
    eof1_z850_c = mean_formatter("Data/regional_z850_EOF1.nc",
                                 coords=[40, 60, 35, 70],
                                 name="EOF850C1")
    eof1_z850_b = mean_formatter("Data/regional_z850_EOF1.nc",
                                 coords=[19, 83, 16, 93],
                                 name="EOF850B1")
    eof2_z850_c = mean_formatter("Data/regional_z850_EOF2.nc",
                                 coords=[40, 60, 35, 70],
                                 name="EOF850C2")
    eof2_z850_b = mean_formatter("Data/regional_z850_EOF2.nc",
                                 coords=[19, 83, 16, 93],
                                 name="EOF850B2")

    eof_df = pd.concat(
        [
            eof1_z200_b,
            eof1_z200_c,
            eof2_z200_b,
            eof2_z200_c,
            eof1_z500_b,
            eof1_z500_c,
            eof2_z500_b,
            eof2_z500_c,
            eof1_z850_b,
            eof1_z850_c,
            eof2_z850_b,
            eof2_z850_c,
        ],
        axis=1,
    )

    mean_df = pd.merge_ordered(temp_df, eof_df, on="time")

    return mean_df
Пример #16
0
	fd = FileDownloader({
		'usenetrc': opts.usenetrc,
		'username': opts.username,
		'password': opts.password,
		'quiet': (opts.quiet or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat),
		'forceurl': opts.geturl,
		'forcetitle': opts.gettitle,
		'forcethumbnail': opts.getthumbnail,
		'forcedescription': opts.getdescription,
		'forcefilename': opts.getfilename,
		'forceformat': opts.getformat,
		'simulate': opts.simulate,
		'skip_download': (opts.skip_download or opts.simulate or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat),
		'format': opts.format,
		'format_limit': opts.format_limit,
		'listformats': opts.listformats,
		'outtmpl': ((opts.outtmpl is not None and opts.outtmpl.decode(preferredencoding()))
			or (opts.format == '-1' and opts.usetitle and u'%(stitle)s-%(id)s-%(format)s.%(ext)s')
			or (opts.format == '-1' and opts.useliteral and u'%(title)s-%(id)s-%(format)s.%(ext)s')
			or (opts.format == '-1' and u'%(id)s-%(format)s.%(ext)s')
			or (opts.usetitle and opts.autonumber and u'%(autonumber)s-%(stitle)s-%(id)s.%(ext)s')
			or (opts.useliteral and opts.autonumber and u'%(autonumber)s-%(title)s-%(id)s.%(ext)s')
			or (opts.usetitle and u'%(stitle)s-%(id)s.%(ext)s')
			or (opts.useliteral and u'%(title)s-%(id)s.%(ext)s')
			or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s')
			or u'%(id)s.%(ext)s'),
		'ignoreerrors': opts.ignoreerrors,
		'ratelimit': opts.ratelimit,
		'nooverwrites': opts.nooverwrites,
		'retries': opts.retries,
		'continuedl': opts.continue_dl,
		'noprogress': opts.noprogress,
		'playliststart': opts.playliststart,
		'playlistend': opts.playlistend,
		'logtostderr': opts.outtmpl == '-',
		'consoletitle': opts.consoletitle,
		'nopart': opts.nopart,
		'updatetime': opts.updatetime,
		'writedescription': opts.writedescription,
		'writeinfojson': opts.writeinfojson,
		'writesubtitles': opts.writesubtitles,
		'subtitleslang': opts.subtitleslang,
		'matchtitle': opts.matchtitle,
		'rejecttitle': opts.rejecttitle,
		'max_downloads': opts.max_downloads,
		'prefer_free_formats': opts.prefer_free_formats,
		'verbose': opts.verbose,
		})
Пример #17
0
'''
Created on Jul 26, 2016

@author: jack
'''
import FileDownloader

if __name__ == '__main__':
    dubsteppath = "https://www.youtube.com/watch?v=fWRISvgAygU"
    dubsteppattern = "ARTIST - TITLE HH:MM:SS"
    catalystpath = "https://www.youtube.com/watch?v=PNjqP9JLN-c"
    catalystpattern = ""
    FileDownloader.download(catalystpath)