Ejemplo n.º 1
0
 def __init__(self):
     self._cache = Cache(settings.CACHE_ROOT)
Ejemplo n.º 2
0
 def __init__(self, loop, messenger, database):
     self._loop = loop
     self._messenger = messenger
     self._database = database
     self.cache = Cache()
application = Flask(__name__)
mongo = PyMongo(application)
mongo_server = "localhost"
mongo_port = "27017"
str = "mongodb://" + mongo_server + ":" + mongo_port
connection = MongoClient(str)
db = connection.project
servers = db.servers
server_transactions = ServerTransactions()
AUTH_KEY = "17771fab5708b94b42cfd00c444b6eaa"
SERVER_HOST = None
SERVER_PORT = None

# Set the cache location
cache = Cache('/tmp/mycachedir')


def asynchronous_upload(file, directory, headers):
    print "\nBEGINNING ASYNCHRONOUS UPLOAD ...\n"
    server_transactions.asynchronous_upload_transaction(
        file, directory, headers)


def asynchronous_delete(file, directory, headers):
    print "\nBEGINNING ASYNCHRONOUS DOWNLOAD ...\n"
    server_transactions.asynchronous_delete_transaction(
        file, directory, headers)


def server_instance():
Ejemplo n.º 4
0
    DateTimeField,
    MySQLDatabase,
    ForeignKeyField,
    DateField,
    FloatField,
    TextField,
)

env_config = configparser.ConfigParser()
env_config.read('env.ini')
dbname = env_config.get('database', 'database')
host = env_config.get('database', 'host')
username = env_config.get('database', 'user')
password = env_config.get('database', 'password')

cache = Cache()

db = MySQLDatabase(
    dbname,
    user=username,
    password=password,
    host=host,
)


class BaseModel(Model):
    class Meta:
        database = db


class User(BaseModel):
Ejemplo n.º 5
0
from diskcache import Cache

with Cache('/tmp/mycachedir') as cache:
    print('Heartrate: ', cache[b'heartrate'])
    print('Speed: ', cache[b'speed'])
    print('Cadence: ', cache[b'cadence'])
 def initialize(self):
     from diskcache import Cache
     self.cache = Cache(self.cache_location or self.uid + "_cache")
Ejemplo n.º 7
0
    StringVar,
    IntVar,
    filedialog,
    messagebox,
    Menu,
    TclError,
)
from tkinter.ttk import Frame, Label, Entry, Button, Checkbutton, Treeview, Notebook

from getmyancestors.classes.tree import Indi, Fam, Tree
from getmyancestors.classes.gedcom import Gedcom
from getmyancestors.classes.session import Session
from getmyancestors.classes.translation import translations

tmp_dir = os.path.join(tempfile.gettempdir(), "fstogedcom")
cache = Cache(tmp_dir)
lang = cache.get("lang")


def _(string):
    if string in translations and lang in translations[string]:
        return translations[string][lang]
    return string


class EntryWithMenu(Entry):
    """Entry widget with right-clic menu to copy/cut/paste"""
    def __init__(self, master, **kw):
        super().__init__(master, **kw)
        self.bind("<Button-3>", self.click_right)
Ejemplo n.º 8
0
    def set_threshold(self, fpr=None, threshold=None, gc=False):
        """Set motif scanning threshold based on background sequences.

        Parameters
        ----------
        fpr : float, optional
            Desired FPR, between 0.0 and 1.0.

        threshold : float or str, optional
            Desired motif threshold, expressed as the fraction of the
            difference between minimum and maximum score of the PWM.
            Should either be a float between 0.0 and 1.0 or a filename
            with thresholds as created by 'gimme threshold'.

        """
        if threshold and fpr:
            raise ValueError("Need either fpr or threshold.")

        if threshold is None and fpr is None:
            if self.genome:
                fpr = 0.01
                logger.info(f"Using default FPR of {fpr}")
            else:
                threshold = 0.95
                logger.info(
                    f"Genome not specified, using default threshold of {threshold}."
                )
                logger.info("This is likely not ideal.")

        if fpr:
            fpr = float(fpr)
            if not (0.0 < fpr < 1.0):
                raise ValueError("Parameter fpr should be between 0 and 1")

        if not self.motifs:
            raise ValueError("please run set_motifs() first")

        motifs = read_motifs(self.motifs)
        gc_bins = ["{:.2f}-{:.2f}".format(*gc_bin) for gc_bin in self.gc_bins]

        if threshold is not None:
            d = parse_threshold_values(self.motifs, threshold)
            self._threshold = pd.DataFrame(d, index=[0])
            self._threshold = self._threshold.join(
                pd.DataFrame(gc_bins,
                             index=[0] * len(gc_bins),
                             columns=["gc_bin"]))
            self._threshold = self._threshold.set_index("gc_bin")
            return

        if not self.background:
            try:
                self.set_background(gc=gc)
            except Exception:
                raise ValueError("please run set_background() first")

        seqs = self.background.seqs

        lock.acquire()
        with Cache(CACHE_DIR) as cache:
            scan_motifs = []
            self._threshold = None
            for motif in motifs:
                k = "{}|{}|{:.4f}|{}".format(motif.hash(),
                                             self.background_hash, fpr,
                                             ",".join(sorted(gc_bins)))
                vals = cache.get(k)
                if vals is None:
                    scan_motifs.append(motif)
                else:
                    if self._threshold is None:
                        self._threshold = vals.to_frame()
                    else:
                        self._threshold[motif.id] = vals

            if len(scan_motifs) > 0:
                logger.info("determining FPR-based threshold")
                df = self._threshold_from_seqs(scan_motifs, seqs,
                                               fpr).set_index("gc_bin")
                if self._threshold is None:
                    self._threshold = df
                else:
                    self._threshold = pd.concat((self._threshold, df), axis=1)
                for motif in scan_motifs:
                    k = "{}|{}|{:.4f}|{}".format(
                        motif.hash(),
                        self.background_hash,
                        fpr,
                        ",".join(sorted(gc_bins)),
                    )
                    cache.set(k, df[motif.id])
        lock.release()
        self.threshold_str = "{}_{}_{}_{}".format(fpr, threshold,
                                                  self.background_hash,
                                                  ",".join(sorted(gc_bins)))
Ejemplo n.º 9
0
print >>tf, lastString
print >>tf, '</div>'
print >>tf, printFooter()

tf.close()
os.rename(tmpname, filename)

if country == None:
  msgpackFile = '%s/players.msgpack' % webDir
  msgpackTmpFile = '%s.tmp' % msgpackFile
  with open(msgpackTmpFile, 'wb') as out:
    out.write(msgpack.packb(types))
    out.write(msgpack.packb(maps))
    out.write(msgpack.packb(totalPoints))
    out.write(msgpack.packb(pointsRanks))
    out.write(msgpack.packb(weeklyPointsRanks))
    out.write(msgpack.packb(monthlyPointsRanks))
    out.write(msgpack.packb(yearlyPointsRanks))
    out.write(msgpack.packb(teamrankRanks))
    out.write(msgpack.packb(rankRanks))
    out.write(msgpack.packb(serverRanks))
  os.rename(msgpackTmpFile, msgpackFile)

  with Cache('/home/teeworlds/servers/players-cache', eviction_policy='none', sqlite_auto_vacuum=0, sqlite_journal_mode='off') as cache:
    for player, value in players.items():
        cache[player] = value
    cachedPlayers = list(cache.iterkeys())
    for player in cachedPlayers:
        if player not in players:
            del cache[player]
Ejemplo n.º 10
0
    def set_meanstd(self, gc=False):
        if not self.background:
            self.set_background(gc=gc)

        self.meanstd = {}
        seqs = self.background.seqs
        if gc:
            seq_bins = [s.split(" ")[-1] for s in self.background.ids]
        else:
            seq_bins = ["0.00-1.00"] * len(seqs)
        if gc:
            bins = list(set(seq_bins))
        else:
            bins = ["0.00-1.00"]

        motifs = read_motifs(self.motifs)
        lock.acquire()
        with Cache(CACHE_DIR) as cache:
            scan_motifs = []
            for bin in bins:
                if bin not in self.meanstd:
                    self.meanstd[bin] = {}
                bin_seqs = [s for s, b in zip(seqs, seq_bins) if b == bin]

                for motif in motifs:
                    k = "e{}|{}|{}".format(motif.hash(), self.background_hash,
                                           bin)

                    results = cache.get(k)
                    if results is None:
                        scan_motifs.append(motif)
                    else:
                        self.meanstd[bin][motif.id] = results

                if len(scan_motifs) > 0:
                    logger.debug("Determining mean and stddev for motifs.")
                    for motif, mean, std in self._meanstd_from_seqs(
                            scan_motifs, bin_seqs):
                        k = "e{}|{}|{}".format(motif.hash(),
                                               self.background_hash, bin)
                        cache.set(k, [mean, std])
                        self.meanstd[bin][motif.id] = mean, std

            # Prevent std of 0
            # This should only happen in testing
            for motif in motifs:
                stds = np.array(
                    [self.meanstd[gcbin][motif.id][1] for gcbin in bins])
                idx = stds == 0
                if True in idx:
                    std = np.mean(stds[~idx])
                    for gcbin in np.array(bins)[idx]:
                        k = "e{}|{}|{}".format(motif.hash(),
                                               self.background_hash, gcbin)
                        mean = self.meanstd[gcbin][motif.id][0]
                        cache.set(k, [mean, std])
                        self.meanstd[gcbin][motif.id] = mean, std

        lock.release()

        for gc_bin in self.gc_bins:
            gc_bin = "{:.2f}-{:.2f}".format(*gc_bin)
            if gc_bin not in self.meanstd:
                valid_bins = []
                for b in self.gc_bins:
                    bstr = "{:.2f}-{:.2f}".format(b[0], b[1])
                    if bstr in self.meanstd:
                        valid_bins.append(((b[0] + b[1]) / 2, bstr))

                v = float(gc_bin.split("-")[1])
                _, bstr = sorted(valid_bins, key=lambda x: abs(x[0] - v))[0]
                logger.warn(f"Using {bstr}")
                self.meanstd[gc_bin] = self.meanstd[bstr]
Ejemplo n.º 11
0
    def set_background(self,
                       fname=None,
                       genome=None,
                       size=200,
                       nseq=None,
                       gc=False,
                       gc_bins=None):
        """Set the background to use for FPR and z-score calculations.

        Background can be specified either as a genome name or as the
        name of a FASTA file.

        Parameters
        ----------
        fname : str, optional
            Name of FASTA file to use as background.

        genome : str, optional
            Name of genome to use to retrieve random sequences.

        size : int, optional
            Size of genomic sequences to retrieve. The default
            is 200.

        nseq : int, optional
            Number of genomic sequences to retrieve.
        """
        if self.background:
            return

        size = int(size)

        if gc_bins is None:
            if gc:
                gc_bins = [(0.0, 0.2), (0.8, 1)]
                for b in np.arange(0.2, 0.799, 0.05):
                    gc_bins.append((b, b + 0.05))
            else:
                gc_bins = [(0, 1)]
        if nseq is None:
            nseq = max(10000, len(gc_bins) * 1000)

        if genome and fname:
            raise ValueError("Need either genome or filename for background.")

        if fname:
            if not os.path.exists(fname):
                raise IOError(
                    "Background file {} does not exist!".format(fname))

            self.background = Fasta(fname)
            self.background_hash = file_checksum(fname)
            return

        if not genome:
            if self.genome:
                genome = self.genome
            else:
                raise ValueError(
                    "Need either genome or filename for background.")

        logger.debug("using background: genome {} with size {}".format(
            genome, size))
        lock.acquire()
        with Cache(CACHE_DIR) as cache:
            self.background_hash = "d{}:{}:{}:{}".format(
                genome, int(size), gc, str(gc_bins))
            c = cache.get(self.background_hash)
            if c:
                fa, gc_bins = c
            else:
                fa = None

            if not fa:
                if gc:
                    with NamedTemporaryFile() as tmp:
                        logger.info("using {} sequences".format(nseq))
                        gc_bin_bedfile(tmp.name,
                                       genome,
                                       number=nseq,
                                       length=size,
                                       bins=gc_bins)
                        fa = as_fasta(tmp.name, genome=genome)
                else:
                    fa = RandomGenomicFasta(genome, size, nseq)
                cache.set(self.background_hash, (fa, gc_bins))
        lock.release()

        self.background = fa
        if gc_bins:
            self.gc_bins = gc_bins
        default=os.environ.get('UTT_TOKEN'),
        help=
        'token for submitting data to tracker (default: read from environment variable UTT_TOKEN)'
    )
    parser.add_argument('--base-url',
                        type=str,
                        default='https://tracker.tauguide.de')
    options = parser.parse_args()

    if options.verbose:
        verbose = True
    if options.debug:
        verbose = True
        DEBUG = True

    with Cache(directory='item-price-cache') as cache:
        fuel_prices = get_fuel_prices(cache)

    # print result
    if verbose:
        stations_ascending = sorted(fuel_prices.keys(),
                                    key=lambda k: fuel_prices[k])
        for station in stations_ascending:
            print("%8.2f  %s" % (fuel_prices[station], station))

    # if token, send result to tracker
    if options.token:
        url = options.base_url + '/v1/fuel_estimation/add'
        payload = {
            'token': options.token,
            'stations': fuel_prices,
Ejemplo n.º 13
0
parser.add_argument('--stop-id',
                    help='Stop id (please find proper one in https://developer.deutschebahn.com/'
                    ' and copy it from URL), value "eva" numeric, default is Karlsruhe Hbf (8000191)',
                    default='8000191')
parser.add_argument('--api-key',
                    help='API key. Please find proper one in https://developer.deutschebahn.com/'
                    'it is free, just register there',
                    )
parser.add_argument('--size',
                    help='Try to find minimum trains amount (maximum 20 requests limit)',
                    default=4
                    )

args = parser.parse_args()

cache = Cache(directory='/tmp/db-timetable')

numeric_level = getattr(logging, args.log.upper(), None)
if not isinstance(numeric_level, int):
    raise ValueError('Invalid log level: %s' % numeric_level)
logging.basicConfig(level=numeric_level)

logging.debug(args)

mqtt_auth = {'username': args.mqtt_user, 'password': args.mqtt_pass}

# Find amount
trains = []
request_time = datetime.datetime.now()

for x in range(20):
from datetime import datetime, timedelta, timezone
import io
from binance.client import Client
from binance.exceptions import BinanceAPIException
import requests
import xmltodict
import zipfile

from pebble import ProcessPool
from concurrent.futures import TimeoutError
from diskcache import Cache

from .logger import Logger

cache = Cache("data", size_limit=int(1e12))


def download(link):
    r = requests.get(
        link,
        headers={
            'User-Agent':
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:88.0) Gecko/20100101 Firefox/88.0',
            'Accept-Language': 'en-US,en;q=0.5',
            'Origin': 'https://data.binance.vision',
            'Referer': 'https://data.binance.vision/'
        })
    with zipfile.ZipFile(io.BytesIO(r.content)) as z:
        f = z.infolist()[0]
        return z.open(f).read()
Ejemplo n.º 15
0
import logging
import time
import uuid

from diskcache import Cache
from diskcache import RLock

from kolibri.core.tasks import compat
from kolibri.deployment.default.cache import diskcache_location

# An object on which to store data about the current job
# So far the only use is to track the job, but other metadata
# could be added.
current_state_tracker = compat.local()

db_task_write_lock = RLock(Cache(diskcache_location), "db_task_write_lock")


def get_current_job():
    return getattr(current_state_tracker, "job", None)


def stringify_func(func):
    if not callable(func):
        raise TypeError(
            "function {} passed to stringify_func isn't a function!".format(
                func))

    fqn = "{module}.{funcname}".format(module=func.__module__,
                                       funcname=func.__name__)
    return fqn
Ejemplo n.º 16
0
 def initialize():
     LocalTaskCache._cache = Cache(CACHE_LOCATION)
     LocalTaskCache._initialized = True
Ejemplo n.º 17
0
from googleapiclient.discovery import build
from diskcache import Cache

from mentorship.gmail.data import Message, MessageId
from mentorship.gmail.factory import MessageFactory, MessageIdFactory

# If modifying these scopes, delete the file token.pickle.
SCOPES = ["https://www.googleapis.com/auth/gmail.readonly"]

CREDENTIALS_FILENAME = os.path.join(
    dirname(dirname(abspath(__file__))), "credentials.pickle"
)
SECRET_FILENAME = os.path.join(dirname(dirname(abspath(__file__))), "secret.json")
CACHE_LOCATION = os.getenv("CACHE_LOCATION", "/tmp/ml-mentorship-cache")

cache = Cache(CACHE_LOCATION)


class GmailApi:
    def initialize(self):
        """Calling this authenticates the user with Gmail and builds the internal client
        """
        if not hasattr(self, "_service"):
            self.authenticate()
            self._service = build("gmail", "v1", credentials=self.credentials)

    def get_labels(self):
        results = self.get_service().users().labels().list(userId="me").execute()
        return results

    def get_messages_for_page(
Ejemplo n.º 18
0
 def __init__(self, datebase_dir: Path):
     if not datebase_dir.exists():
         datebase_dir.mkdir(mode=0o750, parents=True)
     self._cache = Cache(str(datebase_dir))
Ejemplo n.º 19
0
 def __init__(self, dimension=DimensionType.DIM_2D):
     self._cache = Cache(settings.CACHE_ROOT)
     self._dimension = dimension
Ejemplo n.º 20
0
from appdirs import user_cache_dir
from diskcache import Cache

cache = Cache(user_cache_dir('pypi-client', 'PyPI'))
Ejemplo n.º 21
0
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.


import argparse
import os
import requests
import tempfile

from diskcache import Cache

ZUUL_API_BUILD = 'https://review.rdoproject.org/zuul/api/builds?job_name='

cache = Cache('/tmp/skip_cache')
cache.expire()


def main():
    parser = argparse.ArgumentParser(
        description='This will get the tempest file for fs021.')
    parser.add_argument(
        '--job_name',
        default='periodic-tripleo-ci-centos-7-ovb-1ctlr_2comp-'
        'featureset021-master',
        help="(default: %(default)s)")
    parser.add_argument(
        '--log_file',
        default='',
        help='specify the file name to be downloaded')
Ejemplo n.º 22
0
import requests
from appdirs import AppDirs
from bs4 import BeautifulSoup
from diskcache import Cache
from fuzzywuzzy import process
from ratelimit import limits, sleep_and_retry

from fyler import settings
from fyler.models import Media, Series, Episode, Special, Movie
from .provider import Provider

_cache_dir = Path(settings.appdirs.user_cache_dir) / 'anidb'
_titles_dat = Path(
    settings.appdirs.user_cache_dir) / 'anidb/data/anime-titles.dat'
_cache_dir.mkdir(parents=True, exist_ok=True)
cache = Cache(directory=str(_cache_dir))

logger = logging.getLogger(__name__)


@sleep_and_retry
@limits(calls=2, period=5)
def _rl_get(*args, **kwargs):
    return requests.get(*args, **kwargs)


@cache.memoize(expire=60 * 60 * 24 * 30)  # Cache for 1 month
def _raw_get_info(id: int) -> str:
    args = {
        'request': 'anime',
        'client': 'fyler',
Ejemplo n.º 23
0
def thread_status_api():
    cache = Cache(default_cache_dir)
    return "current: {}, total: {}".format(cache['current'], cache['total'])
Ejemplo n.º 24
0
import copy
import os
import sys

from diskcache import Cache

from kolibri.utils.conf import KOLIBRI_HOME
from kolibri.utils.conf import OPTIONS

cache_options = OPTIONS["Cache"]

pickle_protocol = OPTIONS["Python"]["PICKLE_PROTOCOL"]

diskcache_location = os.path.join(KOLIBRI_HOME, "process_cache")

diskcache_cache = Cache(diskcache_location,
                        disk_pickle_protocol=pickle_protocol)

# Default to LocMemCache, as it has the simplest configuration
default_cache = {
    "BACKEND": "django.core.cache.backends.locmem.LocMemCache",
    # Default time out of each cache key
    "TIMEOUT": cache_options["CACHE_TIMEOUT"],
    "OPTIONS": {
        "MAX_ENTRIES": cache_options["CACHE_MAX_ENTRIES"]
    },
}

built_files_prefix = "built_files"

built_files_cache = {
    "BACKEND": "django.core.cache.backends.locmem.LocMemCache",
Ejemplo n.º 25
0
    'config': 'CONFIG',
    'figure_idx': 'FIGURE_IDX',
    'figure': 'FIGURE',
    'figure_ref': 'FIGURE_REF',
    'figure_layout': 'FIGURE_LAYOUT',
    'task_id': 'TASK_ID',
    'filter_kwargs': 'FILTGER_KWARGS',
    'selected_data': 'SELECTED_DATA'
}
KEY_TYPES = {'CAT': 'categorical', 'NUM': 'numerical'}

redis_ip = os.environ.get('REDIS_SERVER_SERVICE_HOST', '127.0.0.1')
redis_url = 'redis://' + redis_ip + ':6379'
redis_instance = redis.StrictRedis.from_url(redis_url)

cache = Cache('./cache', eviction_policy='none')


def load_config(json_file):
    """
    Load config json file

    :param str json_file
        json file path

    :return: configuration struct
    :rtype: dict
    """
    with open(json_file, 'r') as read_file:
        return json.load(read_file)
Ejemplo n.º 26
0
    def search(
        self,
        name=None,
        ip=None,
        hexadecimal=None,
        type="ANY",
        bailiwick=None,
        wildcard_left=None,
        wildcard_right=None,
        inverse=False,
        sort=True,
        return_limit=10000,
        remote_limit=50000,
        epoch=False,
        time_first_before=None,
        time_first_after=None,
        time_last_before=None,
        time_last_after=None,
    ):
        """
        A method of the DNSDB Class to search the DNSDB API.

        :param name: string (required)
            fully qualified domain name
        :param ip: string
            IPv4 or IPv6 address, CIDR notation is valid
        :param hexadecimal: string
            hexadecimal digits specifying a raw octet string
        :param type: string (optional: default="ANY")
            dns resource record types (ANY, A, MX, SIG, etc)
        :param bailiwick: string (optional: default=None)
            a label in a fqdn, not valid for inverse queries
        :param wildcard_left: Boolean (optional: default=None)
            wildcard search to the left of a dot in a domain name
        :param wildcard_right: Boolean (optional: default=None)
            wildcard search to the right of a dot in a domain name
        :param inverse: boolean (optional: default=False)
            search for names resolving to names (e.g. MX, NS, CNAME, etc)
            only valid when used with name
        :param sort: boolean (optional: default=True)
        :param return_limit: integer (optional: default=10000)
        :param remote_limit: integer (optional: default=50000)
        :param epoch: boolean (optional: default=False)
        :param time_first_before:
        :param time_first_after:
        :param time_last_before:
        :param time_last_after:

        :return: Object
        """

        options = dict()

        options["name"] = name
        options["ip"] = ip
        options["hex"] = hexadecimal
        options["type"] = type
        options["bailiwick"] = bailiwick
        options["wildcard_left"] = wildcard_left
        options["wildcard_right"] = wildcard_right
        options["inverse"] = inverse
        options["sort"] = sort
        options["return_limit"] = return_limit
        options["remote_limit"] = remote_limit
        options["epoch"] = epoch
        options["time_first_before"] = time_first_before
        options["time_first_after"] = time_first_after
        options["time_last_before"] = time_last_before
        options["time_last_after"] = time_last_after
        options["api_key"] = self.api_key
        options["server"] = self.server
        options["cache"] = self.cache
        options["cache_location"] = self.cache_location
        options["cache_timeout"] = self.cache_timeout

        options = utils.pre_process(options)

        uri = utils.build_uri(options)

        if options["cache"] is True:
            cache = Cache(options["cache_location"])

            cached_result = cache.get(uri)

            if cached_result:
                data = json.loads(
                    gzip.decompress(cached_result).decode("utf-8"))
                results = Result(
                    records=data["records"],
                    status_code=data["status_code"],
                    error=data["error"],
                    quota=data["quota"],
                    cached=True,
                )
            else:
                results = _query(options, uri)
                if results.status_code == 200 or results.status_code == 404:
                    compressed = Result.to_compressed(results)
                    cache.set(uri, compressed, expire=options["cache_timeout"])
        else:
            results = _query(options, uri)

        if results.status_code == 200:
            results = utils.post_process(options, results)
            return results

        return results
Ejemplo n.º 27
0
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0

from diskcache import Cache

cache_location = "../db"
with Cache(cache_location) as cache:
    cache[b'speed'] = 0
    cache[b'prevspdrevcount'] = 0
    cache[b'prevspdevttime'] = 0
    cache[b'prevcadrevcount'] = 0
    cache[b'prevcadevttime'] = 0
    cache[b'cadence'] = 0
    cache[b'heartrate'] = 0
    cache[b'temperature'] = 0

    print('Speed cached value: ' + str(cache[b'speed']))
    print('Prev Speed Rev Count cached value: ' +
          str(cache[b'prevspdrevcount']))
    print('Prev Speed Event Time cached value: ' +
          str(cache[b'prevspdevttime']))
    print('Previous Cadence Rev Count cached value: ' +
          str(cache[b'prevcadrevcount']))
    print('Previous Cadence Event Time cached value: ' +
          str(cache[b'prevcadevttime']))
    print('Cadence cached value: ' + str(cache[b'cadence']))
    print('Heartrate cached value: ' + str(cache[b'heartrate']))
    print('Temperature cached value: ' + str(cache[b'temperature']))
Ejemplo n.º 28
0
import re
import subprocess
import sys
import threading
import time
import webbrowser  # to open link on browser
from collections import namedtuple
from typing import Tuple
from urllib import request

import requests
from diskcache import Cache

import services as s

cache = Cache(os.path.join(s.SETTINGS_DIR, 'cache'))

if sys.platform == "win32":
    import win32process
    import psutil
    import win32gui
elif sys.platform == "linux":
    import dbus
elif sys.platform == "darwin":
    import applescript


class Song:
    name = ""
    artist = ""
    album = "UNKNOWN"
Ejemplo n.º 29
0
 def __init__(self, geocoder_from_geopy, identifier):
     self.geocoder_from_geopy = geocoder_from_geopy
     self.identifier = identifier
     self.cache = Cache('tmp/' + identifier)
Ejemplo n.º 30
0
def greengrass_infinite_infer_run():
    """ Entry point of the lambda function"""
    try:
        # This object detection model is implemented as single shot detector (ssd), since
        # the number of labels is small we create a dictionary that will help us convert
        # the machine labels to human readable labels.
        model_name = "deploy_model_algo_1"
        model_type = 'ssd'
        input_width = 300
        input_height = 300
        max_threshold = 0.1
        # output_map = {0:'construction', 1:'crowd',2:'pothole',3:'person-bike',4:'person-pet', 5:'baby-strolls',6:'traffic-lights',7:'car', 8:'pedestrians'}
        output_map = {1: 'person', 2: 'bicycle', 3: 'car', 4: 'motorbike', 5: 'aeroplane', 6: 'bus', 7: 'train', 8: 'truck', 9: 'boat', 10: 'traffic light', 11: 'fire hydrant', 12: 'stop sign', 13: 'parking meter', 14: 'bench', 15: 'bird', 16: 'cat', 17: 'dog', 18: 'horse', 19: 'sheep', 20: 'cow', 21: 'elephant', 22: 'bear', 23: 'zebra', 24: 'giraffe', 25: 'backpack', 26: 'umbrella', 27: 'handbag', 28: 'tie', 29: 'suitcase', 30: 'frisbee', 31: 'skis', 32: 'snowboard', 33: 'sports ball', 34: 'kite', 35: 'baseball bat', 36: 'baseball glove', 37: 'skateboard', 38: 'surfboard', 39: 'tennis racket', 40: 'bottle', 41: 'wine glass', 42: 'cup', 43: 'fork', 44: 'knife', 45: 'spoon', 46: 'bowl', 47: 'banana', 48: 'apple', 49: 'sandwich', 50: 'orange', 51: 'broccoli', 52: 'carrot', 53: 'hot dog', 54: 'pizza', 55: 'donut', 56: 'cake', 57: 'chair', 58: 'sofa', 59: 'pottedplant', 60: 'bed', 61: 'diningtable', 62: 'toilet', 63: 'tvmonitor', 64: 'laptop', 65: 'mouse', 66: 'remote', 67: 'keyboard', 68: 'cell phone', 69: 'microwave', 70: 'oven', 71: 'toaster', 72: 'sink', 73: 'refrigerator', 74: 'book', 75: 'clock', 76: 'vase', 77: 'scissors', 78: 'teddy bear', 79: 'hair drier', 80: 'toothbrush'}
        #output_map = {0:'construction', 1:'crowd',2:'pothole',3:'person-bike',4:'person-pet', 5:'baby-strolls',6:'car',7:'pedestrians', 8:'person_bike', 9:'home',4:'person_pet',5:'baby_strolls'}

        # Create an IoT client for sending to messages to the cloud.
        client = greengrasssdk.client('iot-data')
        #iot_topic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME'])
        iot_topic = 'smartcycle/object-detection'
        # Create a local display instance that will dump the image bytes to a FIFO
        # file that the image can be rendered locally.
        local_display = LocalDisplay('480p')
        local_display.start()
        # The sample projects come with optimized artifacts, hence only the artifact
        # path is required.
        model_path = '/home/aws_cam/aws-smartcycle/object-detection/models/mxnet_deploy_model_algo_1_FP32_FUSED.xml'
        #error, model_path = mo.optimize(model_name, input_width, input_height , aux_inputs={'--epoch':0})
        # Load the model onto the GPU.
        client.publish(topic=iot_topic, payload='Loading object detection model: {0}'.format(model_path))
        model = awscam.Model(model_path, {'GPU': 1})
        client.publish(topic=iot_topic, payload='Object detection model loaded')
        # Set the threshold for detection
        #detection_threshold = 0.12
        detection_threshold = 0.30
        # The height and width of the training set images
        # input_height = 300
        # input_width = 300
        # Do inference until the lambda is killed.
        while True:
            # Get a frame from the video stream
            ret, frame = awscam.getLastFrame()
            if not ret:
                raise Exception('Failed to get frame from the stream')
            # Resize frame to the same size as the training set.
            frame_resize = cv2.resize(frame, (input_height, input_width))
            # Run the images through the inference engine and parse the results using
            # the parser API, note it is possible to get the output of doInference
            # and do the parsing manually, but since it is a ssd model,
            # a simple API is provided.
            parsed_inference_results = model.parseResult(model_type,
                                                         model.doInference(frame_resize))
            #client.publish(topic=iot_topic, payload = str(parsed_inference_results))
            # Compute the scale in order to draw bounding boxes on the full resolution
            # image.
            yscale = float(frame.shape[0]/input_height)
            xscale = float(frame.shape[1]/input_width)
            # Dictionary to be filled with labels and probabilities for MQTT
            cloud_output = {}
            topk = 30
            #client.publish(topic=iot_topic, payload = str(parsed_inference_results[model_type][0:topk]))
            # Sample objects - demo purpose only
            req_list =  [2,3,4,6,8]
            # Get the detected objects and probabilities
            for obj in parsed_inference_results[model_type]:#[0:topk]:
                if obj['label'] >= 18 or obj['label'] not in req_list: #or output_map[obj['label']] != 'person' or  output_map[obj['label']] != 'bicycle' or output_map[obj['label']] != 'tst' or output_map[obj['label']] != 'motorbike' or output_map[obj['label']] != 'traffic light' or output_map[obj['label']] != 'stop sign' or output_map[obj['label']] != 'dog':
                    continue
                if obj['prob'] > detection_threshold:
                    #client.publish(topic=iot_topic, payload = str(obj['prob']))
                    #client.publish(topic=iot_topic, payload = str(cloud_output))
                    # Add bounding boxes to full resolution frame
                    xmin = int(xscale * obj['xmin']) \
                           + int((obj['xmin'] - input_width/2) + input_width/2)
                    ymin = int(yscale * obj['ymin'])
                    xmax = int(xscale * obj['xmax']) \
                           + int((obj['xmax'] - input_width/2) + input_width/2)
                    ymax = int(yscale * obj['ymax'])
                    # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
                    # for more information about the cv2.rectangle method.
                    # Method signature: image, point1, point2, color, and tickness.
                    cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (255, 165, 20), 10)
                    # Amount to offset the label/probability text above the bounding box.
                    text_offset = 15
                    # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
                    # for more information about the cv2.putText method.
                    # Method signature: image, text, origin, font face, font scale, color,
                    # and tickness
                    cv2.putText(frame, "{}: {:.2f}%".format(output_map[obj['label']],
                                                            obj['prob'] * 100),
                                (xmin, ymin-text_offset),
                                cv2.FONT_HERSHEY_SIMPLEX, 2.5, (255, 165, 20), 6)
                    # Store label and probability to send to cloud
                    cloud_output[output_map[obj['label']]] = obj['prob']

            #START SENSOR METRICS DISPLAY
            #Transparent rectangle overlays
            overlay = frame.copy()
            info_rect_color = (13,13,13)

            #top left rectangle
            cv2.rectangle(overlay, (0,0), (800,200), info_rect_color, -1)

            #bottom left rectangle
            cv2.rectangle(overlay, (0,frame.shape[0]-200), (900,frame.shape[0]), info_rect_color, -1)

            #top right rectangle
            cv2.rectangle(overlay, (frame.shape[1]-800,0), (frame.shape[1],200), info_rect_color, -1)

            #bottom right rectangle
            cv2.rectangle(overlay, (frame.shape[1]-900,frame.shape[0]-200), (frame.shape[1],frame.shape[0]), info_rect_color, -1)

            alpha = 0.60
            beta = 0.40

            cv2.addWeighted(overlay, alpha, frame, beta, 0, frame)

            cache = Cache('/home/aws_cam/aws-smartcycle/db')
            heartrate = cache[b'heartrate'] or '--'
            speed = cache[b'speed'] or '--'
            cadence = cache[b'cadence'] or '--'
            temperature = cache[b'temperature'] or '--'

            normal_font_color = (0,255,0)
            normal_font = cv2.FONT_HERSHEY_COMPLEX
            normal_font_scale = 3

            topleftcoord = (50, 125)
            bottomleftcoord = (50, frame.shape[0]-95)
            toprightcoord = (frame.shape[1]-750, 125)
            bottomrightcoord = (frame.shape[1]-750, frame.shape[0]-95)

            cv2.putText(frame, "{}: {}".format('HEARTRATE', heartrate), bottomleftcoord, normal_font, normal_font_scale, normal_font_color,6)
            cv2.putText(frame, "{}: {}".format('SPEED', speed), topleftcoord, normal_font, normal_font_scale, normal_font_color,6)
            cv2.putText(frame, "{}: {}".format('CADENCE', cadence), toprightcoord, normal_font, normal_font_scale, normal_font_color,6)
            cv2.putText(frame, "{}: {}F".format('TEMP', int(temperature)), bottomrightcoord, normal_font, normal_font_scale, normal_font_color,6)
            #cv2.putText(frame, "{}: {}".format(frame.shape[1], frame.shape[0]), (1250, 700), cv2.FONT_HERSHEY_SIMPLEX, 4.5, (66,144,161),6)

            # Set the next frame in the local display stream.
            # getall = ast.literal_eval(cloud_output)
            local_display.set_frame_data(frame)
            # Send results to the cloud
            client.publish(topic=iot_topic, payload=json.dumps(cloud_output))
    except Exception as ex:
        client.publish(topic=iot_topic, payload='Error in object detection lambda: {}'.format(ex))