예제 #1
0
 def test_run_tests_with_coverage_errors(self, exiter, check_call):
     mock_args = MagicMock(name="args")
     mock_args.userland_here = ffs.Path('.')
     mock_args.coverage = True
     mock_args.test = None
     mock_args.failfast = False
     check_call.side_effect = [
         None, subprocess.CalledProcessError(None, None)
     ]
     test_runner._run_py_tests(mock_args)
     self.assertEqual(2, check_call.call_count)
     exiter.assert_called_once_with(1)
예제 #2
0
    def test_run_tests_for_app_with_test(self, has_file, check_call):
        mock_args = MagicMock(name="args")
        mock_args.userland_here = ffs.Path('.')
        mock_args.coverage = False
        mock_args.failfast = False
        mock_args.test = 'foo.tests.bar'

        has_file.side_effect = lambda a, b: b == 'manage.py'

        test_runner._run_py_tests(mock_args)
        check_call.assert_called_with(
            ['python', 'manage.py', 'test', 'foo.tests.bar'])
예제 #3
0
    def test_run_tests_for_unknown_config(self, sysexit, writer, has_file):
        mock_args = MagicMock(name="args")
        mock_args.userland_here = ffs.Path('.')
        mock_args.coverage = False
        mock_args.test = None
        mock_args.failfast = False

        has_file.return_value = False

        test_runner._run_py_tests(mock_args)
        writer.assert_any_call("\n\nCripes!\n")
        sysexit.assert_called_with(1)
예제 #4
0
 def test_run_tests_failfast(self, check_call):
     mock_args = MagicMock(name="args")
     mock_args.userland_here = ffs.Path('.')
     mock_args.coverage = False
     mock_args.test = None
     mock_args.failfast = True
     test_runner.TRAVIS = False
     test_runner._run_js_tests(mock_args)
     self.assertEqual([
         'karma', 'start', 'config/karma.conf.js', '--single-run',
         '--failfast'
     ], check_call.call_args[0][0])
예제 #5
0
    def handle(self, **options):
        "IDjangoManagementCommand Entrypoint!"
        assert options['filename']
        csvfile = ffs.Path(options['filename'])
        with csvfile.csv(header=True) as csv:
            for expiry in csv:
                drugs = Product.objects.filter(name=expiry.name)

                for drug in drugs:
                        expiry_date = datetime.datetime.strptime(expiry.expiry, '%d/%m/%Y')
                        patent = Patent.objects.get_or_create(drug=drug, expiry_date = expiry_date)[0]
                        patent.save()
예제 #6
0
    def test_run_tests_with_coverage(self, check_call):
        mock_args = MagicMock(name="args")
        mock_args.userland_here = ffs.Path('.')
        mock_args.coverage = True
        mock_args.test = None
        test_runner._run_py_tests(mock_args)
        calls = [
            call(['coverage', 'run', 'runtests.py']),
            call(['coverage', 'html'])
        ]

        check_call.assert_has_calls(calls)
예제 #7
0
 def test_boolean_render(self, build_field_schema, lshift):
     build_field_schema.return_value = {
         'lookup_list': None,
         'model': 'Colour',
         'name': 'name',
         'title': 'Name',
         'type': 'boolean'
     },
     scaffold_path = ffs.Path(settings.PROJECT_PATH) / 'scaffolding'
     create_form_template_for(Colour, scaffold_path)
     lshift.assert_called_once_with(
         '{% load forms %}\n{% checkbox  field="Colour.name"  %}')
예제 #8
0
 def test_null_boolean_render(self, build_field_schema, lshift):
     build_field_schema.return_value = {
         'lookup_list': None,
         'model': 'Colour',
         'name': 'name',
         'title': 'Name',
         'type': 'null_boolean'
     },
     scaffold_path = ffs.Path(settings.PROJECT_PATH) / 'scaffolding'
     create_display_template_for(Colour, scaffold_path)
     lshift.assert_called_once_with(
         '<span ng-show="item.name">\n     Name\n   <br />\n</span>')
def scrape(workspace):
    result = []
    print "scrape"
    directory = ffs.Path(workspace) / 'indicators_raw'
    directory.mkdir()
    filename = directory / 'indicators.json'
    print "Fetching indicators"
    for i in range(1, 1699):
        indicator = get_indicator(i, directory)
        if indicator:
            result.append(indicator)
    json.dump(result, open(filename, 'wb'), indent=2)
    logging.info('Written results to {}'.format(filename))
예제 #10
0
def main(workspace):
    DATA_DIR = ffs.Path(workspace) / 'data'
    DATA_DIR.mkdir()

    dc.ensure_publisher('gp-survey')
    dc.ensure_group('surveys')

    def year_as_key(x):
        return x['title'][-4:]

    datasets = json.load(open(os.path.join(DATA_DIR, "metadata.json"), "r"))
    datasets = sorted(datasets, key=year_as_key)
    for dataset in datasets:
        load_statistic(dataset, DATA_DIR)
        groups(dataset)
예제 #11
0
    def test_run_tests_travis(self, check_call):
        mock_args = MagicMock(name="args")
        mock_args.userland_here = ffs.Path('.')
        mock_args.coverage = False
        mock_args.test = None
        test_runner.TRAVIS = True
        test_runner._run_js_tests(mock_args)
        self.assertEqual([
            './node_modules/karma/bin/karma', 'start', 'config/karma.conf.js',
            '--single-run'
        ], check_call.call_args[0][0])
        self.assertIn("OPAL_LOCATION", check_call.call_args[1]["env"])

        self.assertTrue(
            isinstance(check_call.call_args[1]["env"]["OPAL_LOCATION"], str))
예제 #12
0
def main(workspace):
    data_dir = ffs.Path(workspace) / 'data'
    data_dir.mkdir()

    page = get_dom(ROOT)

    datasets = []
    datasets.append(latest(page))
    datasets.extend(history(page))

    datasets = filter(lambda x: x is not None, datasets)
    datasets.sort()

    print "Processed {} datasets".format(len(datasets))
    json.dump(datasets, open(data_dir / 'metadata.json', 'w'))
예제 #13
0
    def test_run_tests_for_app_with_coverage(self, has_file, check_call):
        mock_args = MagicMock(name="args")
        mock_args.userland_here = ffs.Path('.')
        mock_args.coverage = True
        mock_args.test = None

        has_file.side_effect = lambda a, b: b == 'manage.py'

        test_runner._run_py_tests(mock_args)
        calls = [
            call(['coverage', 'run', 'manage.py', 'test']),
            call(['coverage', 'html'])
        ]

        check_call.assert_has_calls(calls)
def main(workspace):
    datasets = []
    folder = ffs.Path(workspace) / 'data'
    folder.mkdir()

    html = requests.get("https://gp-patient.co.uk/surveys-and-reports").content
    page = fromstring(html)

    divs = page.cssselect('div h2')[1:]
    for h2 in divs:
        title = h2.text_content().strip()
        if title.startswith('Latest'):
            title = title.replace('Latest survey and reports', '').strip()
        datasets.append(process_div(title, h2))

    json.dump(datasets, open(folder / 'metadata.json', 'w'))
예제 #15
0
def main(workspace):
    global DATA_DIR
    DATA_DIR = ffs.Path(workspace) / 'data'
    DATA_DIR.mkdir()

    datasets = json.load(
        get_resource_file(DATA_DIR / 'nhsof_metadata_indicators.json'))
    print "Ensuring publisher"
    dc.ensure_publisher('hscic')
    print "Ensuring group"
    dc.ensure_group('nhsof')
    wrote = load_nhsof(datasets)
    if wrote:
        group_nhsof(datasets)
    else:
        print "Created/processed no datasets ..."
예제 #16
0
    def test_makes_forms_dir_if_does_not_exist(self, build_field_schema, mkdir,
                                               nonzero, booler, lshift):
        build_field_schema.return_value = {
            'lookup_list': None,
            'model': 'Colour',
            'name': 'name',
            'title': 'Name',
            'type': 'null_boolean'
        },

        # We need both of these to make sure this works on both Python3 and Python2
        nonzero.return_value = False
        booler.return_value = False

        scaffold_path = ffs.Path(settings.PROJECT_PATH) / 'scaffolding'
        create_form_template_for(Colour, scaffold_path)
        mkdir.assert_called_once_with()
예제 #17
0
파일: test_scaffold.py 프로젝트: ow6n/opal
 def test_integration(self, get_template_dir):
     """
         A quick cover all test that, um doesn't cover everything
         apart from django migrations/makemigrations
         can we confirm with a superficial test
         that no other apis internal or external
         that we are using have changed.
     """
     tmp_dir = tempfile.mkdtemp()
     get_template_dir.return_value = ffs.Path(tmp_dir)
     scaffold.scaffold_subrecords('tests', migrations=False)
     self.assertTrue(
         os.path.isfile(os.path.join(tmp_dir, "records",
                                     "hat_wearer.html")))
     self.assertTrue(
         os.path.isfile(
             os.path.join(tmp_dir, "forms", "hat_wearer_form.html")))
예제 #18
0
def changes(args):
    """
    Herein we look for changes in the proportion of prescribing habit for a subset of
    the UK Prescribing data.

    It is assumed that you have already extracted the subset of drugs you are interested
    in to a CSV file that follows the same format as the main data.

    Done that? then we'll begin by grouping the prescriptions by practice, by period.
    Next, figure out what percentage of all statin prescription each item has for the period.
    Then, detect changes to the percentage.

    Then, Print a CSV of all swings of > 10%
    """
    scrips = ffs.Path(args.filename)
    grouped = collections.defaultdict(
        lambda: collections.defaultdict(lambda: collections.defaultdict(dict)))
    with scrips.csv(header=True) as csv:
        for row in csv:
            if row.items == 'items':
                continue  # This is something pointless and stupid. Will find out what later
            grouped[row.practice.strip()][row.period.strip()][
                row.bnf_name.lower().strip()]['num'] = int(row.items)

    for practice in grouped:
        periods = list(sorted(grouped[practice].keys()))
        for i, period in enumerate(periods):
            scrips = grouped[practice][period]
            total = sum([s['num'] for s in scrips.values()])
            for name, value in scrips.items():
                per = int(float(value['num']) / total * 100)
                grouped[practice][period][name]['per'] = per

                if i > 0:
                    prev = grouped[practice][periods[i - 1]][name]
                    try:
                        change = per - prev['per']
                    except KeyError:
                        pass  # For now
                    grouped[practice][period][name]['change'] = change

    for practice, history in grouped.items():
        for month, scrips in history.items():
            for drug, stats in scrips.items():
                if 'change' in stats and abs(stats['change']) > 10:
                    print 'YAY', practice, month, drug, stats['change']
예제 #19
0
def main(workspace):
    global DATA_DIR
    DATA_DIR = ffs.Path(workspace)

    page_count = get_page_count()
    print "There are {} pages".format(page_count)

    urls = []
    for p in xrange(1, page_count + 1):
        urls.extend(collect_urls(p))
    print "Found {} urls".format(len(urls))

    datasets = []
    for url in urls:
        datasets.append(scrape_page(url))

    with open(DATA_DIR / "metadata.json", 'w') as f:
        json.dump(datasets, f)
def main(workspace):
    global DATA_DIR
    DATA_DIR = ffs.Path(workspace) / 'data'

    output_datasets = []
    for d in DATASETS:
        output_datasets.extend(process_dataset(d))

    metafile = DATA_DIR / "dataset.metadata.json"
    if metafile:
        metafile.truncate()
    metafile << json.dumps(output_datasets, indent=2)

    #for d in output_datasets:
    #    for r in d['resources']:
    #        hash = hashlib.sha224(r['url']).hexdigest()
    #        download_file(r['url'], os.path.join(DATA_DIR, hash))

    return 0
예제 #21
0
    def handle(self, **options):
        "IDjangoManagementCommand Entrypoint!"
        assert options['filename']
        csvfile = ffs.Path(options['filename'])

        names = [d.name for d in Product.objects.all()]

        with csvfile.csv(header=True) as csv:
            for recc in csv:
                print recc
                drugs = [n for n in names if n in recc.title]
                if len(drugs) > 0:
                    print recc
                    print drugs
                    for d in drugs:
                        date = datetime.datetime.strptime(recc.date, '%Y-%m')
                        modeldrug = Product.objects.get(name=d)
                        recc = Recommendation.objects.get_or_create(
                            drug=modeldrug,
                            date=date,
                            guideline=recc.title,
                            link=recc.link)[0]
                        recc.save()
def main(workspace):
    DATA_DIR = ffs.Path(workspace)
    datasets = json.load(open(DATA_DIR / 'ccgois_indicators.json'))

    u = Uploader("ccgois")
    for dataset in datasets:
        resources = []
        for resource in dataset['sources']:
            resource['format'] = resource['filetype']
            resource['name'] = resource['url'].split('/')[-1]

            filename = filename_for_resource(resource)
            path = DATA_DIR / filename

            download_file(resource['url'], path)
            print "Uploading to S3"
            url = u.upload(path)
            resource['url'] = url
            resources.append(resource)
        dataset['resources'] = resources
    u.close()

    json.dump(datasets, open(DATA_DIR / 'ccgois_indicators.json', 'w'))
예제 #23
0
def main(workspace):
    DATA_DIR = ffs.Path(workspace) / 'data'
    DATA_DIR.mkdir()

    datasets = json.load(open(os.path.join(DATA_DIR, "metadata.json"), 'r'))

    tag_list = ["Statistics"]
    u = Uploader("stats")

    for dataset in datasets:
        print "Processing", dataset['name']

        print "..adding tags"
        tags = dataset.get('tags', [])
        for t in tag_list:
            if not t in tags:
                tags.append(t)
        dataset['tags'] = tags

        print "..fetching resources"
        for resource in dataset["resources"]:
            filename = filename_for_resource(resource)
            path = DATA_DIR / filename

            try:
                download_file(resource['url'], path)
            except:
                continue
            print "Uploading to S3"
            url = u.upload(path)
            resource['url'] = url
            resource['url_type'] = ''  # make sure we zap historical uploads

    u.close()
    json.dump(datasets, open(os.path.join(DATA_DIR, "metadata.json"), 'wb'))

    return 0
예제 #24
0
파일: scaffold.py 프로젝트: tjguk/opal
"""
OPAL scaffolding and code generation
"""
import inspect
import os
import subprocess
import sys

from django.utils.crypto import get_random_string
import ffs
from ffs import nix
from ffs.contrib import mold

import opal

OPAL = ffs.Path(opal.__file__).parent
SCAFFOLDING_BASE = OPAL / 'scaffolding'
SCAFFOLD = SCAFFOLDING_BASE / 'scaffold'
PLUGIN_SCAFFOLD = SCAFFOLDING_BASE / 'plugin_scaffold'


def write(what):
    if 'runtests.py' in sys.argv:
        return
    sys.stdout.write("{0}\n".format(what))


# TODO: This is backported from Django 1.9.x - after we upgrade to target
# Django 1.9.x can we kill this and import it from there please.
def get_random_secret_key():
    """
예제 #25
0
In which we expose useful commandline functionality to our users.
"""
import argparse
import os
import subprocess
import sys

import ffs

import opal
from opal.core import scaffold as scaffold_utils
from opal.core import test_runner
from opal.utils import write

USERLAND_HERE    = ffs.Path('.').abspath
OPAL             = ffs.Path(opal.__file__).parent


def find_application_name():
    """
    Return the name of the current Opal application
    """
    for d in USERLAND_HERE.ls():
        if d.is_dir:
            if d/'settings.py':
                return d[-1]

    write("\n\nCripes!\n")
    write("We can't figure out what the name of your application is :(\n")
    write("Are you in the application root directory? \n\n")
예제 #26
0
"""
Locate interesting things related to Statin usage in the
NHS Prescribing data.
"""
import collections
import json
import sys

import argparse
import ffs

blacklist = [  # Things that look interesting but aren't.
    'nystatin'
]

data = ffs.Path('../data/prescriptions')
scrips = data.ls('*IEXT.CSV')


def changes(args):
    """
    Herein we look for changes in the proportion of prescribing habit for a subset of
    the UK Prescribing data.

    It is assumed that you have already extracted the subset of drugs you are interested
    in to a CSV file that follows the same format as the main data.

    Done that? then we'll begin by grouping the prescriptions by practice, by period.
    Next, figure out what percentage of all statin prescription each item has for the period.
    Then, detect changes to the percentage.
예제 #27
0
파일: __init__.py 프로젝트: Edoi1/MASTER1
 def __init__(self, path):
     self.path = ffs.Path(path)
예제 #28
0
def _has_file(where, filename):
    """
    Predicate function to determine whether we have FILENAME
    is to be found in WHERE
    """
    return bool(ffs.Path(where / filename))
def main(workspace):
    global DATA_DIR
    DATA_DIR = ffs.Path(workspace) / 'data'

    add_metadata_to_qof_datasets()
    return 0
예제 #30
0
import ffs
import letter

here = ffs.Path(__file__).parent

class Message(letter.Letter):
    Postie = letter.Postman(templatedir=here)

    From     = '*****@*****.**'
    To       = '*****@*****.**'
    Subject  = 'Easy Templated Emails'
    Template = 'cool_email'
    Context  = {
        'href': 'http://www.example.com',
        'link': 'Exemplary'
        }

Message.send()