def test_install_aliases(self):
        """
        Does the install_aliases() interface monkey-patch urllib etc. successfully?
        """
        from future.standard_library import remove_hooks, install_aliases
        remove_hooks()
        install_aliases()

        from collections import Counter, OrderedDict   # backported to Py2.6
        from collections import UserDict, UserList, UserString

        # Requires Python dbm support:
        # import dbm
        # import dbm.dumb
        # import dbm.gnu
        # import dbm.ndbm

        from itertools import filterfalse, zip_longest

        from subprocess import check_output    # backported to Py2.6
        from subprocess import getoutput, getstatusoutput

        from sys import intern

        # test_support may not be available (e.g. on Anaconda Py2.6):
        # import test.support

        import urllib.error
        import urllib.parse
        import urllib.request
        import urllib.response
        import urllib.robotparser

        self.assertTrue('urlopen' in dir(urllib.request))
Example #2
0
def _colormap_plot_similarity(cmaps):
    """
    Plot for illustrating colormaps: similarity matrix.

    :param cmaps: list of :class:`~matplotlib.colors.Colormap`
    :rtype: None
    """
    import matplotlib.pyplot as plt
    from future import standard_library
    standard_library.install_aliases()
    import io
    from urllib.request import urlopen

    url = "https://examples.obspy.org/dissimilarities.npz"
    with io.BytesIO(urlopen(url).read()) as fh, np.load(fh) as data:
        dissimilarity = data['dissimilarity']

    for cmap in cmaps:
        plt.figure(figsize=(6, 5))
        plt.subplot(1, 1, 1)
        plt.imshow(1 - dissimilarity, interpolation='nearest', cmap=cmap)
        plt.xlabel("Event number")
        plt.ylabel("Event number")
        cb = plt.colorbar()
        cb.set_label("Similarity")
    plt.show()
 def test_urllib_imports_install_aliases(self):
     with standard_library.suspend_hooks():
         standard_library.install_aliases()
         import urllib
         import urllib.parse
         import urllib.request
         import urllib.robotparser
         import urllib.error
         import urllib.response
         self.assertTrue(True)
    def test_issue_158(self):
        """
        CherryPy conditional import in _cpcompat.py: issue 158
        """
        install_aliases()
        try:
            from urllib.parse import unquote as parse_unquote

            def unquote_qs(atom, encoding, errors='strict'):
                return parse_unquote(
                    atom.replace('+', ' '),
                    encoding=encoding,
                    errors=errors)
        except ImportError:
            from urllib import unquote as parse_unquote

            def unquote_qs(atom, encoding, errors='strict'):
                return parse_unquote(atom.replace('+', ' ')).decode(encoding, errors)
        self.assertEqual(unquote_qs('/%7Econnolly/', 'utf-8'),
                         '/~connolly/')
Example #5
0
def download_file(url, save_dir='./'):
    """Download a file from `url` saving it to disk.

    The file name is taken from `url` and left unchanged.
    The destination dir can be set using `save_dir`
    (Default: the current dir).
    """
    ## Check if local path already exist
    fname = url.split('/')[-1]
    print('URL:  %s' % url)
    print('File: %s\n ' % fname)

    path = '/'.join([os.path.abspath(save_dir), fname])
    if os.path.exists(path):
        print('File already on disk: %s \nDelete it to re-download.' % path)
        return

    from future.standard_library import install_aliases
    install_aliases()
    from urllib.request import urlopen, urlretrieve
    from urllib.error import HTTPError, URLError

    ## Check if the URL is valid
    try:
        urlopen(url)
    except URLError as e:
        print('Wrong URL or no connection.\n\nError:\n%s\n' % e)
    except HTTPError:
        print('URL not found: ' + url)
        return

    ## Donwload the file
    def _report(blocknr, blocksize, size):
        current = blocknr*blocksize/2**20
        sys.stdout.write(
            "\rDownloaded {0:4.1f} / {1:4.1f} MB".format(current, size/2**20))
    mkdir_p(save_dir)
    urlretrieve(url, path, _report)
Example #6
0
from future import standard_library
standard_library.install_aliases()  # NOQA

from builtins import str
from builtins import object
import base64
import hashlib
import hmac
try:
    from http.client import HTTP_PORT
    from http.client import HTTPS_PORT
except ImportError:
    from httplib import HTTP_PORT
    from httplib import HTTPS_PORT
import logging
import os
import re
import time
import urllib.parse


logger = logging.getLogger(__name__)


def generate_ext(content_type, body):
    """Returns an `ext` value as described in
    `<http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-02#section-3.1>`_.

    :param content_type: The content type of the request e.g. application/json.'
    :param body: The request body as a byte or Unicode string.
    """
Example #7
0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Example that does inference on an LSTM networks for amazon review analysis

$ python examples/imdb/inference.py --model_weights imdb.p --vocab_file imdb.vocab

"""

from __future__ import print_function
from future import standard_library
standard_library.install_aliases()  # triggers E402, hence noqa below
from builtins import input  # noqa
import numpy as np  # noqa
from neon.backends import gen_backend  # noqa
from neon.initializers import Uniform, GlorotUniform  # noqa
from neon.layers import LSTM, Affine, Dropout, LookupTable, RecurrentSum  # noqa
from neon.models import Model  # noqa
from neon.transforms import Logistic, Tanh, Softmax  # noqa
from neon.util.argparser import NeonArgparser, extract_valid_args  # noqa
from neon.util.compat import pickle  # noqa
from neon.data.text_preprocessing import clean_string  # noqa

# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument(
    '--model_weights', required=True, help='pickle file of trained weights')
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the LSST License Statement and
# the GNU General Public License along with this program.  If not,
# see <http://www.lsstcorp.org/LegalNotices/>.
#
from future import standard_library
standard_library.install_aliases()  # noqa E402
import unittest
import pickle

import numpy as np

import lsst.utils.tests
import lsst.afw.geom as afwGeom
import lsst.afw.image as afwImage
import lsst.afw.table as afwTable
import lsst.afw.cameraGeom as cameraGeom
from lsst.afw.geom.testUtils import BoxGrid
from lsst.afw.image.testUtils import makeRampImage
from lsst.ip.isr import applyLookupTable, LinearizeLookupTable
from lsst.log import Log
Example #9
0
from __future__ import print_function, unicode_literals, division, absolute_import
from future import standard_library
standard_library.install_aliases()  # noqa: Counter, OrderedDict,
from builtins import *  # noqa
from past.builtins import basestring  # noqa

from seaborn import plt
from mpl_toolkits.mplot3d import Axes3D  # noqa

import pandas as pd
np = pd.np

h = pd.read_csv('pointcloud.csv.gz', header=None).values[:, :3]
h = pd.DataFrame(h, columns='x y z'.split())
h = h.sample(1000)

fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(h.x, h.y, h.z, zdir='z', s=20, c=None, depthshade=True)
plt.show()
# coding=utf-8

from __future__ import absolute_import, division, print_function
from future import standard_library; standard_library.install_aliases()

from os.path import join, dirname, realpath
from itertools import groupby
from operator import itemgetter
from urllib.parse import urlparse
from csv import DictReader
from time import time
import json, sys, os

from requests import get
from psycopg2 import connect
from psycopg2.extras import DictCursor
from flask import Flask, request, render_template, jsonify

with open(join(dirname(__file__), 'VERSION')) as file:
    __version__ = file.read().strip()

PROJECTS_FILE = realpath(join(dirname(__file__), '..', 'projects.json'))
STATUSES_FILE = realpath(join(dirname(__file__), '..', 'statuses.csv'))

app = Flask(__name__)

@app.route('/')
def index():
    with open(PROJECTS_FILE) as file:
        projects = json.load(file)
Example #11
0
def allActions(userFile):
    # ANOTHER MAIN ACTIONS
    unique = []
    tagged = []
    vectors = []
    if not TEST:
        standard_library.install_aliases()
        udpipe_model_url = 'http://rusvectores.org/static/models/udpipe_syntagrus.model'
        udpipe_filename = udpipe_model_url.split('/')[-1]
        if not os.path.isfile(udpipe_filename):
            print('UDPipe model not found. Downloading...', file=sys.stderr)
            wget.download(udpipe_model_url)
        print('Loading the model...', file=sys.stderr)
        udpipe_model = Model.load(udpipe_filename)
        process_pipeline = Pipeline(udpipe_model, 'tokenize', Pipeline.DEFAULT,
                                    Pipeline.DEFAULT, 'conllu')
        nltk.download('stopwords')
        model = gensim.models.KeyedVectors.load_word2vec_format(
            'ruwikiruscorpora-superbigrams_skipgram_300_2_2018.vec.gz',
            binary=False)
        model.init_sims(replace=True)
        file = open('data_full.csv', "r")
        content = file.readlines()
        file.close()
        temp = ""
        for item in content:
            temp += item.replace('""', "'").replace("\n", "")
        items = temp.split('""')
        items[0] = items[0].replace('"', '')
        items[-1] = items[-1].replace('"', '')
        unique = []
        unique.append(items[0])
        for item in items:
            if item != unique[-1]:
                unique.append(item)
        asd = 0
        print(len(unique))
        tagged = []
        vectors = []
        pos = True
        for item in unique:
            asd += 1
            print(asd)
            clean = ''
            for word in item.split(' '):
                if word not in stopwords.words('russian'):
                    clean += word + ' '
            processed = process_pipeline.process(clean)
            content = [
                l for l in processed.split('\n') if not l.startswith('#')
            ]
            tagged_ = [
                w.split('\t')[2].lower() + '_' + w.split('\t')[3]
                for w in content if w
            ]
            tagged_propn = []
            propn = []
            for t in tagged_:
                if t.endswith('PROPN'):
                    if propn:
                        propn.append(t)
                    else:
                        propn = [t]
                elif t.endswith('PUNCT'):
                    propn = []
                    continue
                else:
                    if len(propn) > 1:
                        name = '::'.join([x.split('_')[0]
                                          for x in propn]) + '_PROPN'
                        tagged_propn.append(name)
                    elif len(propn) == 1:
                        tagged_propn.append(propn[0])
                    tagged_propn.append(t)
                    propn = []
            if not pos:
                tagged_propn = [t.split('_')[0] for t in tagged_propn]
            tagged.append(tagged_propn)

            item_vec = [0] * 300
            for word in tagged_propn:
                if word in model:
                    for i in range(len(model.wv[word])):
                        item_vec[i] += model.wv[word][i]
            vectors.append(item_vec)
        file = open("unique.json", "w")
        file.write(
            json.dumps(unique,
                       sort_keys=False,
                       indent=4,
                       separators=(',', ': '),
                       ensure_ascii=False))
        file.close()
        file = open("tagged.json", "w")
        file.write(
            json.dumps(tagged,
                       sort_keys=False,
                       indent=4,
                       separators=(',', ': '),
                       ensure_ascii=False))
        file.close()
        file = open("vectors.json", "w")
        file.write(
            json.dumps(vectors,
                       sort_keys=False,
                       indent=4,
                       separators=(',', ': '),
                       ensure_ascii=False))
        file.close()
    else:
        with open('unique.json') as file:
            unique = json.load(file)
        with open('tagged.json') as file:
            tagged = json.load(file)
        with open('vectors.json') as file:
            vectors = json.load(file)

    # TSNE_PLOT FUNCTION
    "Creates and TSNE model and plots it"
    labels = []
    tokens = []
    for i in range(len(unique)):
        labels.append(i)
    for vec in vectors:
        tokens.append(vec)
    tsne_model = TSNE(n_components=2)
    from sklearn.cluster import KMeans
    X = tsne_model.fit_transform(tokens)
    kmeans = KMeans(n_clusters=20)
    kmeans.fit(X)
    ###########################
    #print(X)

    clusters = kmeans.labels_.tolist()
    prepared = []
    for i in range(len(unique)):
        item = {'item': unique[i], 'cluster': clusters[i]}
        prepared.append(item)
    file = open("prepared.json", "w")
    file.write(
        json.dumps(prepared,
                   sort_keys=False,
                   indent=4,
                   separators=(',', ': '),
                   ensure_ascii=False))
    file.close()

    return json.dumps([X.tolist(), kmeans.labels_.tolist(), 20, unique])

    ###########################
    """plt.figure(figsize=(7, 7)) 
Example #12
0
from future.standard_library import install_aliases  # isort:skip

install_aliases()  # isort:skip

import click
import requests
from graphql import graphql
from graphql.execution.executors.gevent import GeventExecutor
from graphql.execution.executors.sync import SyncExecutor

from dagster import check, seven
from dagster.cli.load_handle import recon_repo_for_cli_args
from dagster.cli.pipeline import repository_target_argument
from dagster.core.definitions.reconstructable import ReconstructableRepository
from dagster.core.instance import DagsterInstance
from dagster.seven import urljoin, urlparse
from dagster.utils import DEFAULT_REPOSITORY_YAML_FILENAME
from dagster.utils.log import get_stack_trace_array

from .client.query import (
    EXECUTE_PLAN_MUTATION,
    LAUNCH_PIPELINE_EXECUTION_MUTATION,
    START_PIPELINE_EXECUTION_FOR_CREATED_RUN_MUTATION,
    START_PIPELINE_EXECUTION_MUTATION,
    START_SCHEDULED_EXECUTION_MUTATION,
)
from .implementation.context import DagsterGraphQLInProcessRepositoryContext
from .implementation.pipeline_execution_manager import SubprocessExecutionManager
from .schema import create_schema
from .version import __version__
Example #13
0
# Python 2 and 3: easiest option
# see http://python-future.org/compatible_idioms.html
from future.standard_library import install_aliases  # isort:skip to keep 'install_aliases()'
install_aliases()

import codecs
import json
import logging
import os.path
import sys
import errno
from urllib.parse import quote, unquote, urljoin, urlparse
import ssl

import requests
from bs4 import BeautifulSoup
from sortedcontainers import SortedDict

logging.basicConfig()
log = logging.getLogger(sys.argv[0] if __name__ == '__main__' else __name__)
logging.captureWarnings(
    True
)  # see https://urllib3.readthedocs.org/en/latest/security.html#disabling-warnings


class DownloadError(Exception):
    """content could not be downloaded as requested."""
    pass


class CacheNotFoundError(DownloadError):
Example #14
0
from future.standard_library import install_aliases
install_aliases()   # noqa

from urllib.parse import urljoin
import yaml

from .exceptions import InvalidObject
from .model import Model
from .util import slugify


class Page(Model):
    """
    Representation of a page
    """
    @classmethod
    def from_yaml(cls, file_contents, config=None):
        """
        Return a Page from the given file_contents
        """
        parts = file_contents.split("---\n")
        if not parts[0]:
            # nothing before the first ---
            parts.pop(0)

        meta_yaml, body = parts
        payload = yaml.load(meta_yaml)
        payload['body'] = body.strip()

        return cls(payload, config=config)
Example #15
0
import os
import sys
import csv
import datetime
import time
from threading import Thread

from future.standard_library import install_aliases; install_aliases() # for py 2 and 3 compat w/urllib
import urllib

import logging
log = logging.getLogger(__file__)


class Csv(object):
    csv_uri = None
    file = None
    reader = None
    raw_data = []
    last_update = None
    last_check = None
    delimiter=','

    def __init__(self, uri, path=None, delimiter=','):
        log.info('Reading CSV {0}'.format(uri))
        self.delimiter=delimiter
        self.assign_uri(uri, path)

    def assign_uri(self, uri, path=None):
        """
        """
#!/usr/bin/python
# -*- coding: utf-8 -*-

from future.standard_library import install_aliases
install_aliases()

import datetime
import sys
import time
import urllib.error
import urllib.parse
import urllib.request

import CommonFunctions as common
import simplejson as json
try:
   import StorageServer
except ImportError:
   import storageserverdummy as StorageServer
import xbmcvfs

from resources.lib.helpers import *
from .base import *
from .Scraper import *

class htmlScraper(Scraper):

	__urlBase	   = 'http://tvthek.orf.at'
	__urlLive	   = __urlBase + '/live'
	__urlMostViewed = __urlBase + '/most-viewed'
	__urlNewest	 = __urlBase + '/newest'
Example #17
0
The flow is described in detail in the google developer docs:
https://developers.google.com/identity/protocols/OAuth2InstalledApp

We use OAuth to fetch a token which identifies the user by google account.
This token is then passed to the manager, and the manager in turn calls google
APIs to validate the token and extract the user's email address. It then checks
to see that this email address has been registered as a labrad user.
"""

from __future__ import absolute_import
from __future__ import print_function

from builtins import input
from future.standard_library import install_aliases
install_aliases()  # for http.server and urllib.parse

import http.server
import json
import logging
import os
import threading
import time
import urllib.parse
import webbrowser
from concurrent import futures

import requests


log = logging.getLogger(__name__)
 def setUp(self):
     self.interpreter = sys.executable
     standard_library.install_aliases()
     super(TestChainMap, self).setUp()
Example #19
0
from __future__ import division
from __future__ import print_function

from future import standard_library

standard_library.install_aliases()
from builtins import object
from builtins import range
from builtins import input
from builtins import next
from builtins import str
import sys
import os
import getpass
import csv
import re
import time
from urllib.request import urlretrieve
from weaver.lib.tools import open_fr, open_fw, open_csvw
from weaver.lib.warning import Warning


class Engine(object):
    """A generic database system. Specific database platforms will inherit
    from this class."""

    name = ""
    instructions = "Enter your database connection information:"
    db = None
    table = None
    _connection = None
Example #20
0
"""Test the Product build process using a mock documentation.

See mock_manifest.yaml; the doc repo is github.com/lsst-sqre/mock-doc and
the packages are embedded in this repo's test_data/ directory.
"""

from __future__ import (division, absolute_import, print_function,
                        unicode_literals)
from builtins import *  # NOQA
from future.standard_library import install_aliases
install_aliases()  # NOQA

import os
import tempfile
import shutil

import pkg_resources
import pytest
import sh
import ruamel.yaml

from ltdmason.product import Product
from ltdmason.manifest import Manifest


@pytest.fixture(scope='session')
def mock_manifest():
    """Provide the mock_manifest.yaml test file as a string."""
    resource_args = (__name__, 'mock_manifest.yaml')
    assert pkg_resources.resource_exists(*resource_args)
    yaml_str = pkg_resources.resource_string(*resource_args)
 def setUp(self):
     self.interpreter = sys.executable
     standard_library.install_aliases()
     super(TestStandardLibraryReorganization, self).setUp()
Example #22
0
# ActivitySim
# See full license in LICENSE.txt.

from __future__ import (absolute_import, division, print_function, )
from future.standard_library import install_aliases
install_aliases()  # noqa: E402

import os
import pytest

from activitysim.core import inject


# The following import statement has the side-effect of registering injectables:
from .. import __init__


def test_misc():

    inject.clear_cache()

    with pytest.raises(RuntimeError) as excinfo:
        inject.get_injectable("configs_dir")
    assert "directory does not exist" in str(excinfo.value)

    with pytest.raises(RuntimeError) as excinfo:
        inject.get_injectable("data_dir")
    assert "directory does not exist" in str(excinfo.value)

    with pytest.raises(RuntimeError) as excinfo:
        inject.get_injectable("output_dir")
"""This script is changed from chainerrl examples. 
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import *  # NOQA
from future import standard_library
standard_library.install_aliases()  # NOQA
import argparse
import os
import sys

# This prevents numpy from using multiple threads
os.environ['OMP_NUM_THREADS'] = '1'  # NOQA

import chainer
from chainer import functions as F
from chainer.initializers import LeCunNormal
from chainer import links as L
import gym
from gym import spaces
import numpy as np

import chainerrl
from chainerrl.action_value import DiscreteActionValue
from chainerrl.agents import acer
from chainerrl.distribution import SoftmaxDistribution
from chokozainerrl import experiments
from chainerrl import links
from chainerrl import misc
from future import standard_library
standard_library.install_aliases()
from builtins import object
from django.conf import settings
from .cache_retry import json_loads_url
import urllib.parse


class RegistryManager(object):
    def init_registry_data(self, path):
        base = settings.UCLDC_REGISTRY_URL
        page_one = json_loads_url(urllib.parse.urljoin(base, path))
        out = dict((x['id'], x) for x in page_one['objects'])
        next_path = page_one['meta']['next']
        while next_path:
            next_page = json_loads_url(urllib.parse.urljoin(base, next_path))
            out.update(dict((x['id'], x) for x in next_page['objects']))
            next_path = next_page['meta']['next']

        return out

    def __init__(self):
        repository_path = '/api/v1/repository/?format=json'
        # collection_path = '/api/v1/collection/?format=json'
        self.repository_data = self.init_registry_data(repository_path)
        # self.collection_data = self.init_registry_data(collection_path)


"""
Copyright (c) 2015, Regents of the University of California
All rights reserved.
Example #25
0
from __future__ import division
# ActivitySim
# See full license in LICENSE.txt.
from __future__ import (
    absolute_import,
    division,
    print_function,
)
from future.standard_library import install_aliases
install_aliases()  # noqa: E402

import logging
import itertools
import os

import numpy as np
import pandas as pd
from zbox import toolz as tz, gen

from activitysim.core import simulate
from activitysim.core import pipeline

from activitysim.core import chunk
from activitysim.core import logit
from activitysim.core import tracing
from activitysim.core import inject
from activitysim.core import config

logger = logging.getLogger(__name__)

# FIXME - this allows us to turn some dev debug table dump code on and off - eventually remove?