Ejemplo n.º 1
0
    def setUp(self):
        super(TestSearch, self).setUp()
        self.config = {
            "PORT" : get_first_free_port(),
            "ELASTIC_SEARCH_INDEX" : app.config['ELASTIC_SEARCH_INDEX'],
            "THREADED" : True,
            "FUNCTIONAL_TEST_MODE" : True
        }
        self.cfg_file = paths.rel2abs(__file__, "..", "resources", "test-server.cfg")

        make_config(self.config, self.cfg_file)
        self.test_server = TestServer(port=None, index=None, python_app_module_path=os.path.abspath(web.__file__), cfg_file=self.cfg_file)
        self.test_server.spawn_with_config()

        self.appurl = "http://localhost:{x}".format(x=self.config["PORT"])
        self.api_base = self.appurl + "/search/v1/"
Ejemplo n.º 2
0
# overrides for the webapp deployment

DEBUG = True
PORT = 5000
SSL = False
THREADED = True

############################################
# override octopus initialisation to skip Elasticsearch init and use Postgres
INITIALISE_MODULES = ["service.initialise"]

############################################
INITIALISE_DATABASE = True

############################################
# important overrides for account module

ACCOUNT_ENABLE = False
SECRET_KEY = "super-secret-key"

#############################################
# important overrides for storage module

#STORE_IMPL = "octopus.modules.store.store.StoreLocal"
#STORE_TMP_IMPL = "octopus.modules.store.store.TempStore"

from octopus.lib import paths
STORE_LOCAL_DIR = paths.rel2abs(__file__, "..", "service", "tests",
                                "local_store", "live")
STORE_TMP_DIR = paths.rel2abs(__file__, "..", "service", "tests",
                              "local_store", "tmp")
Ejemplo n.º 3
0
    # we can turn off deposit receipts, which is allowed by the specification
    "return_deposit_receipt":
    True,
    "generator": ("http://www.swordapp.org/sss", "2.0"),

    ##############################################
    # Default configuration for SSS repository impl

    # The number of collections that SSS will create and give to users to deposit content into
    "num_collections":
    10,

    # The directory where the deposited content should be stored
    "store_dir":
    paths.rel2abs(__file__, "..", "..", "..", "..", "service", "tests",
                  "sss_store"),
    # If you are using Apache you should set the store directory in full

    # The directory where incoming content will be temporarily stored
    "tmp_dir":
    paths.rel2abs(__file__, "..", "..", "..", "..", "service", "tests",
                  "sss_tmp"),

    # The chunk size used to copy file streams into and out of the temp directory
    "copy_chunk_size":
    8096,

    # What media ranges should the app:accept element in the Service Document support
    "app_accept": ["*/*"],
    "multipart_accept": ["*/*"],
    "accept_nothing":
Ejemplo n.º 4
0
]

############################################
# important overrides for account module

ACCOUNT_ENABLE = False
SECRET_KEY = "super-secret-key"

#############################################
# important overrides for storage module

#STORE_IMPL = "octopus.modules.store.store.StoreLocal"
#STORE_TMP_IMPL = "octopus.modules.store.store.TempStore"

from octopus.lib import paths
STORE_LOCAL_DIR = paths.rel2abs(__file__, "..", "service", "tests", "local_store", "live")
STORE_TMP_DIR = paths.rel2abs(__file__, "..", "service", "tests", "local_store", "tmp")

##############################################
IUCN_REDLIST_CATEGORIES = {
    u'EX':    u'Extinct',
    u'EW':    u'Extinct In The Wild',
    u'RE':    u'Regionally Extinct',
    u'CR':    u'Critically Endangered',
    u'EN':    u'Endangered',
    u'VU':    u'Vulnerable',
    u'LR/cd': u'Lower Risk: Conservation Dependent',
    u'NT':    u'Near Threatened',
    u'LR/nt': u'Near Threatened',
    u'DD':    u'Data Deficient',
    u'LC':    u'Least Concern',
Ejemplo n.º 5
0
# The default time period to use for dynamically set index types
# allowed: second, minute, hour, day, month, year
ESDAO_DEFAULT_TIME_BOX = "month"
# You can also set the time box on a per-type basis with
# ESDAO_TIME_BOX_<UPPER CASE TYPE NAME> = "<period>"

# How many time boxes to look back on during READ operations
ESDAO_DEFAULT_TIME_BOX_LOOKBACK = 0
# You can also set the look back on a per-type basis with
# ESDAO_TIME_BOX_LOOKBACK_<UPPER CASE TYPE NAME> = <number of boxes>

# path to directory where the "next", "prev" and "curr" files for routing
# requests to the correct type are placed
from octopus.lib import paths
ESDAO_ROLLING_DIR = paths.rel2abs(__file__, "..", "..", "..", "..", "indexdir")

# map of type names to DAOs which will have the publish() or rollback()
# methods called on them
# {"mytype" : "service.dao.MyDAO"}
ESDAO_ROLLING_PLUGINS = {}

##############################################################
# Query Endpoint Configuration
##############################################################

# The query url routes and the types that are available via the query endpoint (see below for an example)
QUERY_ROUTE = {}

# query filters that are used in the above QUERY_ROUTE (see below for an example)
QUERY_FILTERS = {}
Ejemplo n.º 6
0
from octopus.lib import paths, dataobj, clcsv
import os, codecs, json, uuid, csv
import esprit

d = paths.rel2abs(__file__, "..", "..", "data", "election")
rfa = os.path.join(d, "RESULTS FOR ANALYSIS.csv")
names = os.path.join(d, "name_map.txt")
out = os.path.join(d, "constituencies.json")
batch = os.path.join(d, "constituencies.es")

# read in the name map
nm = {}
with open(names) as f:
    reader = csv.reader(f)
    for row in reader:
        nm[row[0].strip()] = row[1].strip()


class Constituency(dataobj.DataObj):
    def __init__(self, raw=None):
        struct = {
            "fields": {
                "id": {
                    "coerce": "unicode"
                },
                "constituency": {
                    "coerce": "unicode"
                }
            },
            "lists": {
                "result": {
Ejemplo n.º 7
0
    "allow_update" : True,
    "allow_delete" : True,

    # we can turn off deposit receipts, which is allowed by the specification
    "return_deposit_receipt" : True,

    "generator" : ("http://www.swordapp.org/sss", "2.0"),

    ##############################################
    # Default configuration for SSS repository impl

    # The number of collections that SSS will create and give to users to deposit content into
    "num_collections" : 10,

    # The directory where the deposited content should be stored
    "store_dir" : paths.rel2abs(__file__, "..", "..", "..", "..", "service", "tests", "sss_store"),
    # If you are using Apache you should set the store directory in full

    # The directory where incoming content will be temporarily stored
    "tmp_dir" : paths.rel2abs(__file__, "..", "..", "..", "..", "service", "tests", "sss_tmp"),

    # The chunk size used to copy file streams into and out of the temp directory
    "copy_chunk_size" : 8096,

    # What media ranges should the app:accept element in the Service Document support
    "app_accept" : [ "*/*" ],
    "multipart_accept" : [ "*/*" ],
    "accept_nothing" : False,

    # use these app_accept and multipart_accept values to create an invalid Service Document
    # "app_accept" : None,
Ejemplo n.º 8
0
"""
Use this script to strip out unwanted rows from the WDI sheet from the WB website
"""
from octopus.lib import paths
import csv, codecs

d = paths.rel2abs(__file__, "..", "..", "data", "wbwdi", "WDI_csv",
                  "WDI_Data.csv")
o = paths.rel2abs(__file__, "..", "..", "data", "wbwdi", "wbwdi_selected.csv")

KEEP = [
    "Access to electricity (% of population)",
    "Adjusted net national income per capita (current US$)",
    "Adolescent fertility rate (births per 1,000 women ages 15-19)",
    "Agricultural land (sq. km)",
    "Alternative and nuclear energy (% of total energy use)",
    "Armed forces personnel, total",
    "Average precipitation in depth (mm per year)",
    "Birth rate, crude (per 1,000 people)",
    "CO2 emissions (metric tons per capita)",
    "Death rate, crude (per 1,000 people)",
    "Electric power consumption (kWh per capita)", "GDP (current US$)",
    "Land area (sq. km)", "Life expectancy at birth, total (years)",
    "Population growth (annual %)"
]

with codecs.open(o, "wb") as out:
    writer = csv.writer(out)
    with codecs.open(d) as f:
        reader = csv.reader(f)
        first = True
Ejemplo n.º 9
0
"""
Use this to generate the json and es bulk from the wb wdi spreadsheet
"""
from octopus.lib import paths, dataobj, clcsv
import codecs, json, uuid
import esprit

wdi = paths.rel2abs(__file__, "..", "..", "data", "wbwdi", "wbwdi_selected.csv")
out = paths.rel2abs(__file__, "..", "..", "data", "wbwdi", "wbwdi_selected.json")
batch = paths.rel2abs(__file__, "..", "..", "data", "wbwdi", "wbwdi_selected.es")

class Indicator(dataobj.DataObj):
    def __init__(self, raw=None):
        struct = {
            "fields" : {
                "id" : {"coerce" : "unicode"},
                "country" : {"coerce" : "unicode"},
                "indicator" : {"coerce" : "unicode"},
                "year" : {"coerce" : "integer"},
                "value" : {"coerce" : "float"}
            }
        }
        self._add_struct(struct)
        super(Indicator, self).__init__(raw, expose_data=True)

    def add_measure(self, year, value):
        if value is None:
            value = 0
        self._set_single("year", year, coerce=dataobj.to_int())
        self._set_single("value", value, coerce=dataobj.to_float())
Ejemplo n.º 10
0
from octopus.lib import paths, dataobj, clcsv
import os, codecs, json, uuid, csv
import esprit

d = paths.rel2abs(__file__, "..", "..", "data", "election")
rfa = os.path.join(d, "RESULTS FOR ANALYSIS.csv")
names = os.path.join(d, "name_map.txt")
out = os.path.join(d, "constituencies.json")
batch = os.path.join(d, "constituencies.es")

# read in the name map
nm = {}
with open(names) as f:
    reader = csv.reader(f)
    for row in reader:
        nm[row[0].strip()] = row[1].strip()

class Constituency(dataobj.DataObj):
    def __init__(self, raw=None):
        struct = {
            "fields" : {
                "id" : {"coerce" : "unicode"},
                "constituency" : {"coerce" : "unicode"}
            },
            "lists" : {
                "result" : {"contains" : "object"}
            },
            "structs" : {
                "result" : {
                    "fields" : {
                        "party" : {"coerce" : "unicode"},
Ejemplo n.º 11
0
"""
Use this to generate the json and es bulk from the wb wdi spreadsheet
"""
from octopus.lib import paths, dataobj, clcsv
import codecs, json, uuid
import esprit

wdi = paths.rel2abs(__file__, "..", "..", "data", "wbwdi",
                    "wbwdi_selected.csv")
out = paths.rel2abs(__file__, "..", "..", "data", "wbwdi",
                    "wbwdi_selected.json")
batch = paths.rel2abs(__file__, "..", "..", "data", "wbwdi",
                      "wbwdi_selected.es")


class Indicator(dataobj.DataObj):
    def __init__(self, raw=None):
        struct = {
            "fields": {
                "id": {
                    "coerce": "unicode"
                },
                "country": {
                    "coerce": "unicode"
                },
                "indicator": {
                    "coerce": "unicode"
                },
                "year": {
                    "coerce": "integer"
                },
Ejemplo n.º 12
0
# The default time period to use for dynamically set index types
# allowed: second, minute, hour, day, month, year
ESDAO_DEFAULT_TIME_BOX = "month"
# You can also set the time box on a per-type basis with
# ESDAO_TIME_BOX_<UPPER CASE TYPE NAME> = "<period>"

# How many time boxes to look back on during READ operations
ESDAO_DEFAULT_TIME_BOX_LOOKBACK = 0
# You can also set the look back on a per-type basis with
# ESDAO_TIME_BOX_LOOKBACK_<UPPER CASE TYPE NAME> = <number of boxes>

# path to directory where the "next", "prev" and "curr" files for routing
# requests to the correct type are placed
from octopus.lib import paths
ESDAO_ROLLING_DIR = paths.rel2abs(__file__, "..", "..", "..", "..", "indexdir")

# map of type names to DAOs which will have the publish() or rollback()
# methods called on them
# {"mytype" : "service.dao.MyDAO"}
ESDAO_ROLLING_PLUGINS = {}

##############################################################
# Query Endpoint Configuration
##############################################################

# The query url routes and the types that are available via the query endpoint (see below for an example)
QUERY_ROUTE = {}

# query filters that are used in the above QUERY_ROUTE (see below for an example)
QUERY_FILTERS = {}
Ejemplo n.º 13
0
"""
Use this script to strip out unwanted rows from the WDI sheet from the WB website
"""
from octopus.lib import paths
import csv, codecs

d = paths.rel2abs(__file__, "..", "..", "data", "wbwdi", "WDI_csv", "WDI_Data.csv")
o = paths.rel2abs(__file__, "..", "..", "data", "wbwdi", "wbwdi_selected.csv")

KEEP = [
    "Access to electricity (% of population)",
    "Adjusted net national income per capita (current US$)",
    "Adolescent fertility rate (births per 1,000 women ages 15-19)",
    "Agricultural land (sq. km)",
    "Alternative and nuclear energy (% of total energy use)",
    "Armed forces personnel, total",
    "Average precipitation in depth (mm per year)",
    "Birth rate, crude (per 1,000 people)",
    "CO2 emissions (metric tons per capita)",
    "Death rate, crude (per 1,000 people)",
    "Electric power consumption (kWh per capita)",
    "GDP (current US$)",
    "Land area (sq. km)",
    "Life expectancy at birth, total (years)",
    "Population growth (annual %)"
]

with codecs.open(o, "wb") as out:
    writer = csv.writer(out)
    with codecs.open(d) as f:
        reader = csv.reader(f)