コード例 #1
0
def init_api(pelican):
	global cache, smugmug
	logging.debug('Initializing SmugMug API')
	settings = pelican.settings
	settings.setdefault('SMUGMUG_CACHE', 
						os.path.join(settings['CACHE_PATH'], 'smugmug'))
	cache = percache.Cache(settings['SMUGMUG_CACHE'])

	smugmug = SmugMugCache(api_key=settings['SMUGMUG_API_KEY'])
コード例 #2
0
ファイル: DishinUtils.py プロジェクト: papoon/aw007
#-*- coding: utf-8 -*-
# Python 3
import subprocess
from constants import *
import percache

cache = percache.Cache("../cache/DishinUtils")


@cache
def callDishin(term1, term2):
    """
    Calls DiShIn python script and retrieves the console result.
    Requires: term1 and term2, the terms to be analyzed by DiShIn with the HDO.
    Ensures: calls the command and retrieves the console result as
    string (semantic similarity between term1 and term2 according to HDO).
    """

    result = subprocess.run(["python3", DISHIN_py_path, DISHIN_DB_path, term1, term2], \
                            cwd=DISHIN_path, stdout=subprocess.PIPE)
    return result.stdout.decode('utf-8')


def processDishinOutput(resultText):
    """
    Processes the output of callDishin.
    Requires: resultText, the output text from callDishin.
    Ensures: returns the Resnik DiShIn semantic similarity result.
    """
    lines = resultText.split('\n')
    for line in lines:
コード例 #3
0
# Find the most recently released series.
RELEASED_SERIES = MAINTAINED_SERIES[-1]

# Find the series being developed.
SERIES_IN_DEVELOPMENT = [
    name for name, info in sorted(SERIES_INFO.items())
    if info.status == 'development'
][0]

# Do not modify this variable.
ALL_SERIES = list(sorted(SERIES_INFO.keys()))

SERIES_PAT = re.compile('^(' + '|'.join(ALL_SERIES) + ')/')

cache = percache.Cache("./OS_GOVERNANCE_DATA_CACHE")


def initialize_logging(debug, verbose):
    """Initialize the Logger."""

    logger = logging.getLogger()
    formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
    handler = logging.StreamHandler()
    handler.setFormatter(formatter)
    logger.addHandler(handler)

    if verbose:
        logger.setLevel(logging.INFO)

    if debug:
コード例 #4
0
import percache
cache = percache.Cache('/tmp/collatz-cache')


@cache
def collatz(n):
    out = []
    while n != 1:
        if n % 2 == 0:
            n /= 2
        else:
            n = 3 * n + 1
        out.append(n)
    return len(out)


for i in range(1000):
    collatz_len = {i: len(collatz(i))}

sorted = sorted(collatz_len.items(), key=operator.itemgetter(1))
print(sorted[0])
コード例 #5
0
import csv
from datetime import datetime, timedelta
import pandas as pd

pd.options.mode.chained_assignment = None
import GPy
import matplotlib.pyplot as plt
from dask import compute, delayed
from dask.distributed import Client

np.set_printoptions(precision=2, suppress=True)
import dask_dp4gp
import percache
from dialysis_analysis import *

cache = percache.Cache(
    "cache")  #some of the methods to load patient data are cached
from scipy.stats import pearsonr
from dialysis_analysis.prophet import ProphetException

verbose = True
veryverbose = False

import numpy as np
import matplotlib.pyplot as plt


def hinton(matrix, max_weight=None, ax=None):
    """
    Draw Hinton diagram for visualizing a weight matrix.
    
    From https://matplotlib.org/examples/specialty_plots/hinton_demo.html
コード例 #6
0
####################################################################################################
############### Don't edit anything after this point unless you know what you
############### are doing. You may know what you are doing, I don't know, but be aware that
############### everything past this point is breakable. You know, the "You break it
############### you buy it" kind of thing.
####################################################################################################
####################################################################################################

import requests, json
import argparse
import sys, os
import csv
import pprint
import percache

cache = percache.Cache('./tmp_my_cache', livesync=True)

import re

p = re.compile('([\w\d]+-){4}[\w\d]+')

DOMAIN = None
TOKEN = None


def get_headers():
    return {'Authorization': 'Bearer %s' % TOKEN}


vendor_guid_cache = {'outcome_groups': {}, 'outcomes': {}}
コード例 #7
0
    args = parser.parse_args()

    root = args.root
    output_root = args.out_root

    evaluative_adj_path = "{}{}_{}_filtered_{}.csv".format(
        root, args.lexicon, args.kind, args.min_freq)

    if args.distrib:
        rest_adj_path = "{}rest/{}/with_distribution/{}_filtered_{}.csv".format(
            root, args.lexicon, args.kind, args.min_freq)
    else:
        rest_adj_path = "{}rest/{}/{}_filtered_{}.csv".format(
            root, args.lexicon, args.kind, args.min_freq)

    cache = percache.Cache(rest_adj_path.replace('.csv', '') + '_tmp_cache')

    @cache
    def get_anchor(word, model):
        model_anchor = GlobalAnchors(
            w2v1=model, w2v2=model,
            assume_vocabs_are_identical=True).get_global_anchors(word,
                                                                 w2v=model)
        return model_anchor

    models = []
    corpus_lens = []
    for decade in range(1960, 2010, 10):
        model = get_models_by_decade(decade, args.kind, lang=args.lexicon)
        corpus_len = get_len(str(decade), args.lengths, lang=args.lexicon)
コード例 #8
0
# Author:      Henrik Skov Midtiby, [email protected]
#
# Created:     2014-03-26
# Copyright:   (c) Henrik Skov Midtiby, 2014
# Licence:     LGPL
#-------------------------------------------------------------------------------

import http.client
import json
import percache
import tempfile
import os
cache_file = os.path.join(tempfile.gettempdir(), "doilookup.cache.db")
print(cache_file)
try:
    cache = percache.Cache(cache_file, livesync=True)
except Exception as e:
    print("An exception occured while creating an instance of percache")
    print("A solution could be to delete the cache file")
    print("Path to cache file: '%s'" % cache_file)
    print("Details about the exception")
    print(e)

@cache
def get_doi_information(doi):
    conn = http.client.HTTPConnection("api.crossref.org")
    conn.request("GET", "/v1/works/" + doi)
    res = conn.getresponse()
    data = res.read()
    res.close()
    parsed_data = json.JSONDecoder().decode(data.decode('utf-8'))
コード例 #9
0
import numpy as np
import librosa
import percache

cache = percache.Cache('cache.cch')


@cache
def get_mel_spec(filename):
    sound, sr = librosa.load(filename)
    # sound, _ = librosa.effects.trim(sound)

    n_fft = 2048
    hop_length = 512
    n_mels = 128
    S = librosa.feature.melspectrogram(sound,
                                       sr=sr,
                                       n_fft=n_fft,
                                       hop_length=hop_length,
                                       n_mels=n_mels)
    S_DB = librosa.power_to_db(S, ref=np.max)
    result = librosa.util.normalize(S_DB) + 1
    result = result.transpose()
    result = result.reshape((1, ) + result.shape)
    return result
コード例 #10
0
from os.path import basename, splitext
import re

import argparse

import geojson
from pycountry import countries
from great_circle_calculator.great_circle_calculator import distance_between_points as gcdistance

argparser = argparse.ArgumentParser(description='generate some information about (multi)polygons from geojson files')
argparser.add_argument('file', nargs='*', help='geojson file(s)')
argparser.add_argument('-parts', action='store_true', help='write one line per part, not just one line per administrative body')
argparser.add_argument('-summary', action='store_true', help='display aggregate information about each borders largest polygon')

import percache
cache = percache.Cache('borderstatscache')

# in km
def ringlength(coords):
  return sum([ gcdistance(pt1, pt2, 'kilometers') for pt1, pt2 in pairwise(coords + coords[:1]) ])

# returns an array of tuples, where the first element is the length of a part and the second an array of holes in it (again respectively their length)
def getpartsandholes(coords):
  if isinstance(coords[0][0], list):
    # multipolygon
    outer, *holes = coords
    return (ringlength(outer), [ ringlength(hole) for hole in holes ])
  else:
    return (ringlength(coords), [])

@cache
コード例 #11
0
ファイル: reportificate.py プロジェクト: graingert/malucrawl
    url = full_repo_url
    param = {
        'path': path,
        'per_page': '100'
    }
    while url:
        r = session.get(
            url,
            params=param
        )
        yield r.json()
        url = r.links.get("next", {"url": False})["url"]
        param = {}

with closing(percache.Cache(
    os.path.join(BaseDirectory.save_cache_path("malucrawl_reportificate"), "cache")
)) as cache:

    @cache
    def get_commit_details(commit_url):
        return session.get(commit_url).json()

    @cache
    def count_words_in_tree(tree_url):
        return sum(
            map(
                lambda tree: blob_lacount(tree["url"]),
                itertools.ifilter(
                    lambda tree: tree["type"] == "blob" and fnmatchcase(tree["path"], valid_files),
                    session.get(tree_url, params={"recursive": 1}).json()["tree"]
                )
コード例 #12
0
import lxml.html
import percache
import requests
import simplekml


FIELDS = [
    'Location', 'Name', 'Frequency', 'Duplex', 'Offset', 'Tone',
    'rToneFreq', 'cToneFreq', 'DtcsCode', 'DtcsPolarity', 'Mode',
    'TStep', 'Comment'
]

GOOGLE_CLIENT = None


cache = percache.Cache('/tmp/repeaterbook-to-kml.cache')

logger = logging.getLogger(__name__)


def get_google_client(key):
    global GOOGLE_CLIENT

    if not GOOGLE_CLIENT:
        GOOGLE_CLIENT = build('customsearch', 'v1', developerKey=key)

    return GOOGLE_CLIENT


def validate_document(reader):
    if set(reader.fieldnames) != set(FIELDS):
コード例 #13
0
from scipy import sparse
import scipy.sparse.linalg
from scipy.sparse.linalg import spsolve

from tqdm import tqdm
import nvector as nv
import numpy as np
import pyproj
import percache
from bisect import bisect_left
import logging
import os

log = logging.getLogger(name=__name__)
data_root = os.path.expanduser('~/Data/GA-cover/')
cache = percache.Cache(data_root + 'cache')
mass_attenuation_air = 0.09  # assume some sort of bulk property
density_air = 1.22
mu_air = mass_attenuation_air * density_air


def main():
    """Main demo."""

    # Load survey data
    llh, data = get_flightlines()
    # llh in n*3 lon-lat-hei format
    # data in n*3 k th u format

    # Crop to ROI
    ROI = (120.4, 120.5, -27.4, -27.3)
コード例 #14
0
ファイル: go.py プロジェクト: bddap/phonet
import tensorflow as tf
from tensorflow.keras import Sequential
from scipy.io import wavfile
import numpy as np
import sys
import percache
import os

keras = tf.keras

# loading and fft is expensive and single-threaded so we memoize results
cache = percache.Cache("fftcache", livesync=True)

INPUT_SIZE = 2400  # length of time domain inputs
INPUT_STEP = 99  # length of time domain inputs
WEIGHT_FILE = 'weights'


def create_model():
    FFT_BINS = INPUT_SIZE // 2 + 1
    HIDDEN_HEIGHT = INPUT_SIZE * 2
    HIDDEN_WIDTH = 16
    classes = 5
    model = Sequential([
        keras.layers.Dense(
            HIDDEN_HEIGHT, activation='relu', input_shape=(FFT_BINS, ))
    ] + [
        keras.layers.Dense(HIDDEN_HEIGHT, activation='relu')
        for _ in range(HIDDEN_WIDTH)
    ] + [keras.layers.Dense(classes, activation='softmax')])
    model.compile(optimizer="adadelta",
コード例 #15
0
ファイル: MERUtils.py プロジェクト: papoon/aw007
#-*- coding: utf-8 -*-
# Python 3
import subprocess
from constants import *
from dbUtils import *

import percache

cache = percache.Cache("../cache/MERUtils")

#TODO FIX ERROR ON TWEETS - ARTICLES ARE ALREADY SAVING CORRECTLY IN DB WITH NEW FIELDS


def entityAnnotation():
    """
    Get entities from Articles and Tweets.
    Requires: no args.
    Ensures: saves MER terms on database and returns a list with 2 dictionaries
    (one with the terms for the articles and another with the terms for the tweets).
    """
    #dictionary with list of terms per article (title + abstract)
    termsPerArticle = {}

    #get article info
    articleInfo = getAllArticleInformation()

    # raw disease info
    diseaseInfo = getAllDiseaseInformation()

    print("DEBUG: diseaseinfo", diseaseInfo)
コード例 #16
0

def checkFileReturnCSVReader(file_name):
  if file_name and os.path.exists(file_name):
    return csv.reader(open(file_name,'rU'))
  else:
    return None

def getRootOutcomeGroup():
  url = "https://%s/api/v1/accounts/self/root_outcome_group" % domain
  #print 'url',url
  return requests.get(url,headers=get_headers(),verify=False).json()


import percache
cache = percache.Cache('./tmp_my_cache')

@cache
def c_request_get(*args, **kwargs):
  return requests.get(*args, **kwargs)

def paginated_outcomes(outcome_group_vendor_id=None):
  # Get outcomes
  all_done = False
  url = 'https://{0}/api/v1/accounts/self/outcome_groups/{1}/outcomes'.format(domain,outcome_group_vendor_id)
  while not all_done:
    response = c_request_get(url,headers=get_headers())
    for s in response.json():
      outcome = s['outcome']
      vendor_guid_cache['outcomes'].setdefault(outcome['vendor_guid'],outcome)
      yield outcome 
コード例 #17
0
ファイル: lut_verify_manual.py プロジェクト: GuckLab/ggf
"""Used for quantifying LUT error by randomly sampling kwargs"""
import pathlib

import h5py
import numpy as np
import percache

import ggf

NUM_ERRS = 1000  # number of random error values to compute

mycache = percache.Cache("lut_test.cache", livesync=True)


@mycache
def compute_ggf(**kwargs):
    return ggf.get_ggf(use_lut=False, **kwargs)


def get_kwarg_ranges(lut_path):
    with h5py.File(lut_path, mode="r") as h5:
        attrs = dict(h5["lut"].attrs)

    fixed = {}
    ranges = {}

    for kw in ["model", "stretch_ratio", "semi_minor", "relative_object_index",
               "medium_index", "effective_fiber_distance",
               "mode_field_diameter", "power_per_fiber",
               "wavelength", "poisson_ratio", "n_poly"]:
        if kw in attrs:
コード例 #18
0
import urllib

WGET_TIMEOUT = 20  # In seconds

from enum import Enum
from PIL import Image
from math import pi, log, tan, exp, atan, log2, floor

from urllib.error import URLError
from socket import timeout as TimeoutError

from retry import retry

import percache

cache = percache.Cache("/tmp/percache_mapbox_maps", livesync=True)

# Load the MapBox token, if present
import dotenv

dotenv.load_dotenv(os.path.join(os.path.dirname(__file__), ".env"))

import matplotlib as mpl

PARAM = {
    'do_retina': True,
    'do_snap_to_dyadic': True,
}

# Convert geographical coordinates to pixels
# https://en.wikipedia.org/wiki/Web_Mercator_projection
コード例 #19
0
	settings.setdefault('SMUGMUG_CACHE', 
						os.path.join(settings['CACHE_PATH'], 'smugmug'))
	cache = percache.Cache(settings['SMUGMUG_CACHE'])

	smugmug = SmugMugCache(api_key=settings['SMUGMUG_API_KEY'])

def register():
	signals.initialized.connect(init_api)
	signals.article_generator_context.connect(add_smugmug_album)
	signals.page_generator_context.connect(add_smugmug_album)
	signals.finalized.connect(persist_api_cache)

if __name__ == '__main__':
	import sys, string

	cache = percache.Cache('cache')
	smugmug = SmugMugCache(api_key=SMUGMUG_API_KEY)

	username = sys.argv[1]
	album_title = string.join(sys.argv[2:])

	# Print list of albums for user
	albums = smugmug.albums_get(NickName=username, Heavy=True)
	for album in albums["Albums"]:
		print("%s, %s" % (album["id"], album["URL"]))

	# Return image metadata for specified album
	if len(album_title):
		for image in get_images(username, album_title, 'gallery'):
			print image
コード例 #20
0
# -*- coding: utf-8 -*-
"""
This module contains the primary :class:`DelegateLoad` class for handling a
single load of AP delegate counts and methods necessary to obtain them.
"""
import json
import percache

from elex import DELEGATE_REPORT_ID_CACHE_FILE
from elex.api import utils
from collections import OrderedDict

cache = percache.Cache(DELEGATE_REPORT_ID_CACHE_FILE, livesync=True)


@cache
def _get_reports(params={}):
    """
    Use percache to dump a report response to disk
    """
    resp = utils.api_request('/reports', **params)
    if resp.ok:
        return resp.json().get('reports')
    else:
        cache.clear()
        return []


def clear_delegate_cache():
    """
    Delete the delegate cache file
コード例 #21
0
import populartimes
import requests
from io import StringIO
import csv
import more_itertools
import sys

import time

from credentials import API_KEY
from googleplaces import GooglePlaces, types, lang
import percache

google_places = GooglePlaces(API_KEY)
cache = percache.Cache("./times_cache")


def load_airports():
    data_source = "https://raw.githubusercontent.com/jpatokal/openflights/master/data/airports.dat"
    fieldnames = [
        "Airline ID", "Name", "City", "Country", "IATA", "ICAO", "Latitude",
        "Longitude", "Altitude", "Timezone", "DST", "Tz database time zone",
        "Type", "Source"
    ]
    resp = requests.get(data_source)
    csv_file = StringIO(resp.text)
    reader = csv.DictReader(csv_file, fieldnames=fieldnames)
    yield from (dict(airport) for airport in reader
                if airport["Country"] == "United States")
    # yield from reader
コード例 #22
0
# vim: syntax=python ts=4 sts=4 sw=4 expandtab

# Reads the Sky & Telescope official star names document (.docx)
# given as the first argument, and returns star_names.fab output

import docx
import sys
import re
import percache
from astroquery.vizier import Vizier
from collections import defaultdict

# livesync=True so that even if we ctrl-c out of
# the program, any previously cached values will
# be present for future invocations
cache = percache.Cache('.hip_cache_stars', livesync=True)


@cache
def get_hip(name):
    """
    Given a star's Bayer designation, queries
    VizieR and attempts to locate a Hipparcos
    star ID at the location.

    Returns an integer HIP ID if found, or None otherwise

    Maintains a .hip_cache_stars file to speed up lookups;
    you can delete the .hip_cache_stars file to perform
    fresh lookups.
    """
コード例 #23
0
ファイル: linkage_solver.py プロジェクト: jaywreddy/spiderpig
import sympy
import math
from sympy.geometry import *
from sympy.plotting import plot_implicit, plot_parametric, plot
from sympy.simplify import simplify
from sympy.utilities.autowrap import ufuncify

import matplotlib.pyplot as plt
from matplotlib.path import Path
import matplotlib.patches as patches

from digifab import *
import solid

#rationalization
cache = percache.Cache("/tmp/my-cache15")


def create_servo_mount():
    """
    right now designed to fit Jacobs institute model
    """
    width = 6.5
    length = 20.0

    depth = 2.3
    voffset = -18.5 - 9

    left_bar = solid.cube([width, length, depth], center=True)
    hole = solid.cylinder(r=2, h=10, center=True, segments=100)
    hole1 = solid.translate([0, 4, 0])(hole)
コード例 #24
0
ファイル: util.py プロジェクト: hamayanhamayan/ctf-solutions
import requests
from typing import Dict
from typing import Union, cast
import base64
import time
import binascii
import urllib.parse
import hashlib

import percache
cache = percache.Cache('util')

lasttime = None


def get_file_via_internet_without_cache(url: str,
                                        params: Dict[str, str],
                                        cookie: Dict[str, str] = {}) -> str:
    global lasttime

    if lasttime != None:
        d = 1 - (time.time() - lasttime)
        if 0 < d:
            time.sleep(d)

    req = requests.Request('GET', url, params=params, cookies=cookie)
    prepared = req.prepare()
    session = requests.Session()
    res = session.send(prepared, allow_redirects=True)
    lasttime = time.time()
    return res.content.decode("utf-8")
コード例 #25
0
# Load dependencies
import sys
import os
import re
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdftypes import PDFObjectNotFound
import percache
import tempfile
import os
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
TESTFILE = os.path.join(tempfile.gettempdir(), "gradedexams.cache")
cache = percache.Cache(TESTFILE, livesync=True)

from collections import namedtuple
FilenameAndTimestamp = namedtuple("FilenameAndTimestamp", ["filename", "timestamp"])


# dump_comments
# Analyzes an open pdf file and extracting comments from it.
# It is tested with comments inserted with PDF X-Change Viewer.
def dump_comments(doc):
    comments = []
    visited = set()
    for xref in doc.xrefs:
        # Iterate over all objects in the pdf file.
        for objid in xref.get_objids():
            if objid in visited:
コード例 #26
0
                "ra": round(float(m.group(2).strip()), 5),
                "npd": round(float(m.group(3).strip()), 4),
                "dec": round(90 - float(m.group(3).strip()), 4),
                "bayer": m.group(4).strip(),
                "superscript": None if m.group(5) == " " else m.group(5),
                "weight": int(m.group(6)),
                "constellation": constellation,
            }
        else:
            if not line.startswith('#'):
                print("WARNING: No match: {}".format(line), file=sys.stderr)

# livesync=True so that even if we ctrl-c out of
# the program, any previously cached values will
# be present for future invocations
cache = percache.Cache('.hip_cache', livesync=True)
@cache
def get_hip(ra, dec, mag):
    """
    Given an RA (in hours and decimals), and Dec (in
    degrees and decimals), and a magnitude (in
    visual magnitudes), queries VizieR and attempts
    to locate a Hipparcos star ID at the location.

    Returns an integer HIP ID if found, or None otherwise

    Maintains a .hip_cache file to speed up lookups;
    you can delete the .hip_cache file to perform
    fresh lookups.
    """
    coord = SkyCoord(ra=Angle("{} hours".format(ra)),
コード例 #27
0
This program is slow. Try the other one! :)
"""

# non-standard imports
from joblib import Memory
from wikidata.client import Client
import percache

# standard imports
import os.path
import sys
import tempfile

# set up cache
tmpdir = os.path.join(tempfile.gettempdir(), 'wikidata.org.percache')
cache = percache.Cache(tmpdir)

# globals
fieldnames = ['wikidata_id', 'name', 'instance_of', 'country']

# set up Wikidata client
client = Client()
p_instance_of = client.get('P31')
p_country = client.get('P17')


def get_label(e, p):
    """Get label from Wikidata"""
    r = None
    try:
        r = e[p].label
コード例 #28
0
from abc import ABCMeta, abstractmethod
import requests
import time
import percache

from .requests import Requests

cache = percache.Cache('.duedeligence_request_cache')


class CacheRequests(Requests):
    __metaclass__ = ABCMeta

    def __init__(self, wait_time=1):
        self._wait_time = wait_time

    @abstractmethod
    def get(self, url: str, stream=False):
        return _cache_get(url, stream, self._wait_time)


@cache
def _cache_get(url, stream, wait_time):
    print('cache')
    time.sleep(wait_time)
    return requests.get(url, stream=stream)
コード例 #29
0
from itertools import groupby
from collections import Counter

import matplotlib as mpl
import matplotlib.pyplot as plt

import logging as logger
logger.basicConfig(level=logger.DEBUG,
                   format="%(levelname)-8s [%(asctime)s] : %(message)s",
                   datefmt="%Y%m%d %H:%M:%S %Z")
logger.getLogger('matplotlib').setLevel(logger.WARNING)
logger.getLogger('PIL').setLevel(logger.WARNING)

import percache
cache = percache.Cache("/tmp/percache_" + os.path.basename(__file__),
                       livesync=True)

PARAM = {
    'taxidata': "data/taxidata/sqlite/UV/db.db",
    'out_images_path': "exploration/",
    'mpl_style': {
        'font.size': 3,
        'xtick.major.size': 2,
        'ytick.major.size': 0,
        'xtick.major.pad': 1,
        'ytick.major.pad': 1,
        'savefig.bbox': "tight",
        'savefig.pad_inches': 0,
        'savefig.dpi': 300,
    },
}
コード例 #30
0
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

from __future__ import print_function

import time as _time
import multitasking as _multitasking
import pandas as _pd
import percache

from . import Ticker, utils
from . import shared

cache = percache.Cache("/tmp/yfinance_cache")


def download(tickers,
             start=None,
             end=None,
             actions=False,
             threads=True,
             group_by='column',
             auto_adjust=False,
             back_adjust=False,
             progress=True,
             period="max",
             show_errors=True,
             interval="1d",
             prepost=False,