Exemple #1
0
    def read(self):
        data = []
        for indicator in self.symbols:
            # Build URL for api call
            try:
                df = self._read_one_data(self.url + indicator, self.params)
                df.columns = ['country', 'iso_code', 'year', indicator]
                data.append(df)

            except ValueError as e:
                msg = str(e) + ' Indicator: ' + indicator
                if self.errors == 'raise':
                    raise ValueError(msg)
                elif self.errors == 'warn':
                    warnings.warn(msg)

        # Confirm we actually got some data, and build Dataframe
        if len(data) > 0:
            out = reduce(lambda x, y: x.merge(y, how='outer'), data)
            out = out.drop('iso_code', axis=1)
            out = out.set_index(['country', 'year'])
            if PANDAS_0170:
                out = out.apply(pd.to_numeric, errors='ignore')
            else:
                # deprecated in 0.17.0
                out = out.convert_objects(convert_numeric=True)
            return out
        else:
            msg = "No indicators returned data."
            raise ValueError(msg)
Exemple #2
0
    def read(self):
        data = []
        for indicator in self.symbols:
            # Build URL for api call
            try:
                df = self._read_one_data(self.url + indicator, self.params)
                df.columns = ['country', 'iso_code', 'year', indicator]
                data.append(df)

            except ValueError as e:
                msg = str(e) + ' Indicator: ' + indicator
                if self.errors == 'raise':
                    raise ValueError(msg)
                elif self.errors == 'warn':
                    warnings.warn(msg)

        # Confirm we actually got some data, and build Dataframe
        if len(data) > 0:
            out = reduce(lambda x, y: x.merge(y, how='outer'), data)
            out = out.drop('iso_code', axis=1)
            out = out.set_index(['country', 'year'])
            if PANDAS_0170:
                out = out.apply(pd.to_numeric, errors='ignore')
            else:
                # deprecated in 0.17.0
                out = out.convert_objects(convert_numeric=True)
            return out
        else:
            msg = "No indicators returned data."
            raise ValueError(msg)
def _result_type_many(*arrays_and_dtypes):
    """ wrapper around numpy.result_type which overcomes the NPY_MAXARGS (32)
    argument limit """
    try:
        return np.result_type(*arrays_and_dtypes)
    except ValueError:
        # we have > NPY_MAXARGS terms in our expression
        return reduce(np.result_type, arrays_and_dtypes)
Exemple #4
0
def table_view(request):
    if request.method == 'POST':

        algo_name = request.POST['algo_name']
        if not algo_name:
            return HttpResponse(
                '<h1>Please go back and enter a algo_name</h1>')

        signal = request.POST['signal']
        if not signal:
            return HttpResponse(
                '<h1>Please go back and enter a stock signal</h1>')

        trade = request.POST['trade']
        if not trade:
            return HttpResponse(
                '<h1>Please go back and enter a stock trade</h1>')

        ticker = request.POST['ticker']
        if not ticker:
            return HttpResponse(
                '<h1>Please go back and enter a stock ticker</h1>')

        api_url = "https://api.iextrading.com/1.0/stock/{ticker}/chart/1y".format(
            ticker=ticker)

        headers = {
            'User-Agent':
            'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'
        }

        response = requests.request('GET', api_url, headers=headers)

        if response.status_code == 200:
            json_response = json.loads(response.content)
        else:
            return render(
                request,
                'stocks/errors.html',
                context={'error_string_exp': 'Some Errors Occurred!'})

        prices = [d['close'] for d in json_response if 'close' in d]

        positions, PnL = algo_result(signal, trade, prices)
        # Save the Algo name, the daily PnL and the positions.
        new_algo = AlgoProp(name=algo_name,
                            daily_pnl=' '.join(map(str, PnL)),
                            position=' '.join(map(str, positions)))
        new_algo.save()

        return_list = list()
        val = AlgoProp.objects.all()
        for v in val:
            l = list(map(float, v.daily_pnl.split(' ')))
            avg = reduce(lambda x, y: x + y, l) / len(l)
            return_list.append(dict({'daily_pnl': avg, 'name': v.name}))

    return render(request, 'stocks/table_view.html', {'d_val': return_list})
Exemple #5
0
 def same_as(self, na):
     # 截图
     fw = open('screenshot_path', 'w', encoding='utf8')  # 打开文件写入
     fr = open('screenshot_path', 'r', encoding='utf8')  # 打开文件读取
     t1 = time.strftime('%Y%m%d%H%M',
                        time.localtime(time.time()))  # 获取当前的时间
     print('当前时间:' + t1)
     open('result', 'a', encoding='utf8').writelines('当前时间:' + t1 + '\n')
     # #day = t1[0:8]
     day = t1
     img_folder = os.path.abspath(
         os.path.join(os.path.dirname(__file__),
                      "..")) + '\\baoxian\\za_40手机资金安全险'
     # # 设定文件保存的路径,存放到当前目录下是screenshots文件夹中
     # print('当前的路径:' + img_folder)
     #open('result', 'a', encoding='utf8').writelines('当前的路径:' + img_folder + '\n')
     cre_file = ClassMethon()
     cre_file.creat_file(img_folder)
     screen_save_path = img_folder + '/' + na + t1 + '.png'  # 截屏图片命名方式
     #print('当前截图路径已经名称为:' + screen_save_path)
     open('result', 'a',
          encoding='utf8').writelines('当前截图路径已经名称为:' + screen_save_path +
                                      '\n')
     time.sleep(10)  # 休眠十秒等待页面加载完成
     self.driver.get_screenshot_as_file(screen_save_path)  # 截屏并存在指定的路径下
     #print('截图已存')
     open('result', 'a', encoding='utf8').writelines('截图已存 \n')
     #self.pic_save()
     imglist = self.imglist
     if len(imglist) > 1:
         #这里的 screen_save_path 和 imglist[i] 是同一个张图片
         image1 = Image.open(imglist[-2])
         image2 = Image.open(imglist[-1])
         # 把图像对象转换为直方图数据,存在list h1、h2 中s
         h1 = image1.histogram()
         h2 = image2.histogram()
         result = math.sqrt(
             reduce(operator.add, list(map(lambda a, b:
                                           (a - b)**2, h1, h2))) /
             len(h1))  #通过方差大小进行计算
         print(result)
         if result > 5:
             # 图像不一致,继续执行下一个下拉截图
             print(imglist[-1] + '与' + imglist[-2] + '张图像不一致,继续执行。')
             open('result', 'a',
                  encoding='utf8').writelines(imglist[-1] + '与' +
                                              imglist[-2] + '张图像不一致,继续执行。' +
                                              '\n')
         else:
             # 图像一致,判断一致原因
             print(imglist[-1] + '与' + imglist[-2] + '图像一致')
             open('result', 'a',
                  encoding='utf8').writelines(imglist[-1] + '与' +
                                              imglist[-2] + '图像一致' + '\n')
             if self.driver.title != '同花顺保险':
                 print('跳转地址有误,返回上一张图的位置重新截图')
                 open('result', 'a',
                      encoding='utf8').writelines('跳转地址有误,返回上一张图的位置重新截图 \n')
Exemple #6
0
def chain_dot(*matrices):
    """
    Returns the dot product of the given matrices.

    Parameters
    ----------
    matrices: argument list of ndarray
    """
    return reduce(lambda x, y: np.dot(y, x), matrices[::-1])
Exemple #7
0
    def filter_property_type(self, queryset, field, value):
        data = dict(self.data)

        if data[field]:
            q_list = map(lambda n: Q(property_type__icontains=n), data[field])
            q_list = reduce(lambda a, b: a | b, q_list)
            return queryset.filter(q_list)
        else:
            return queryset
Exemple #8
0
def chain_dot(*matrices):
    """
    Returns the dot product of the given matrices.

    Parameters
    ----------
    matrices: argument list of ndarray
    """
    return reduce(lambda x, y: np.dot(y, x), matrices[::-1])
Exemple #9
0
def fitness(individual, target):
    """
     Determine the fitness of an individual. Lower is better.

    :param individual:  the individual to evaluate
    :param target: the sum of numbers that individuals are aiming for
    :return:
    """
    sum = reduce(add, individual, 0)
    return abs(target - sum)
Exemple #10
0
def grade(pop, target):
    """
    'Find average fitness for a population.'

    :param pop:
    :param target:
    :return:
    """
    summed = reduce(add, (fitness(x, target) for x in pop), 0)
    return summed / (len(pop) * 1.0)
Exemple #11
0
    def visit_BoolOp(self, node, **kwargs):
        def visitor(x, y):
            lhs = self._try_visit_binop(x)
            rhs = self._try_visit_binop(y)

            op, op_class, lhs, rhs = self._possibly_transform_eq_ne(node, lhs,
                                                                    rhs)
            return self._possibly_evaluate_binop(op, node.op, lhs, rhs)

        operands = node.values
        return reduce(visitor, operands)
Exemple #12
0
def _result_type_many(*arrays_and_dtypes):
    """ wrapper around numpy.result_type which overcomes the NPY_MAXARGS (32)
    argument limit """
    try:
        return np.result_type(*arrays_and_dtypes)
    except ValueError:
        # length 0 or length > NPY_MAXARGS both throw a ValueError, so check
        # which one we're dealing with
        if len(arrays_and_dtypes) == 0:
            raise ValueError('at least one array or dtype is required')
        return reduce(np.result_type, arrays_and_dtypes)
Exemple #13
0
    def visit_BoolOp(self, node, **kwargs):
        def visitor(x, y):
            lhs = self._try_visit_binop(x)
            rhs = self._try_visit_binop(y)

            op, op_class, lhs, rhs = self._possibly_transform_eq_ne(
                node, lhs, rhs)
            return self._possibly_evaluate_binop(op, node.op, lhs, rhs)

        operands = node.values
        return reduce(visitor, operands)
Exemple #14
0
    def __init__(self,
                 gbls=None,
                 lcls=None,
                 level=1,
                 resolvers=None,
                 target=None):
        self.level = level
        self.resolvers = tuple(resolvers or [])
        self.globals = dict()
        self.locals = dict()
        self.target = target
        self.ntemps = 1  # number of temporary variables in this scope

        if isinstance(lcls, Scope):
            ld, lcls = lcls, dict()
            self.locals.update(ld.locals.copy())
            self.globals.update(ld.globals.copy())
            self.resolvers += ld.resolvers
            if ld.target is not None:
                self.target = ld.target
            self.update(ld.level)

        frame = sys._getframe(level)
        try:
            self.globals.update(gbls or frame.f_globals)
            self.locals.update(lcls or frame.f_locals)
        finally:
            del frame

        # add some useful defaults
        self.globals['Timestamp'] = pd.lib.Timestamp
        self.globals['datetime'] = datetime

        # SUCH a hack
        self.globals['True'] = True
        self.globals['False'] = False

        # function defs
        self.globals['list'] = list
        self.globals['tuple'] = tuple

        res_keys = (list(o.keys()) for o in self.resolvers)
        self.resolver_keys = frozenset(reduce(operator.add, res_keys, []))
        self._global_resolvers = self.resolvers + (self.locals, self.globals)
        self._resolver = None

        self.resolver_dict = {}
        for o in self.resolvers:
            self.resolver_dict.update(dict(o))
Exemple #15
0
    def _format_header(self):
        if isinstance(self.columns, MultiIndex):
            gen = self._format_header_mi()
        else:
            gen = self._format_header_regular()

        gen2 = ()
        if self.df.index.names:
            row = [x if x is not None else ''
                   for x in self.df.index.names] + [''] * len(self.columns)
            if reduce(lambda x, y: x and y, map(lambda x: x != '', row)):
                gen2 = (ExcelCell(self.rowcounter, colindex, val, header_style)
                        for colindex, val in enumerate(row))
                self.rowcounter += 1
        return itertools.chain(gen, gen2)
Exemple #16
0
    def _format_header(self):
        if isinstance(self.columns, MultiIndex):
            gen = self._format_header_mi()
        else:
            gen = self._format_header_regular()

        gen2 = ()
        if self.df.index.names:
            row = [x if x is not None else ''
                   for x in self.df.index.names] + [''] * len(self.columns)
            if reduce(lambda x, y: x and y, map(lambda x: x != '', row)):
                gen2 = (ExcelCell(self.rowcounter, colindex, val, header_style)
                        for colindex, val in enumerate(row))
                self.rowcounter += 1
        return itertools.chain(gen, gen2)
Exemple #17
0
    def __init__(self, gbls=None, lcls=None, level=1, resolvers=None,
                 target=None):
        self.level = level
        self.resolvers = tuple(resolvers or [])
        self.globals = dict()
        self.locals = dict()
        self.target = target
        self.ntemps = 1  # number of temporary variables in this scope

        if isinstance(lcls, Scope):
            ld, lcls = lcls, dict()
            self.locals.update(ld.locals.copy())
            self.globals.update(ld.globals.copy())
            self.resolvers += ld.resolvers
            if ld.target is not None:
                self.target = ld.target
            self.update(ld.level)

        frame = sys._getframe(level)
        try:
            self.globals.update(gbls or frame.f_globals)
            self.locals.update(lcls or frame.f_locals)
        finally:
            del frame

        # add some useful defaults
        self.globals['Timestamp'] = pd.lib.Timestamp
        self.globals['datetime'] = datetime

        # SUCH a hack
        self.globals['True'] = True
        self.globals['False'] = False

        # function defs
        self.globals['list'] = list
        self.globals['tuple'] = tuple

        res_keys = (list(o.keys()) for o in self.resolvers)
        self.resolver_keys = frozenset(reduce(operator.add, res_keys, []))
        self._global_resolvers = self.resolvers + (self.locals, self.globals)
        self._resolver = None

        self.resolver_dict = {}
        for o in self.resolvers:
            self.resolver_dict.update(dict(o))
Exemple #18
0
def download(
    country=["MX", "CA", "US"], indicator=["NY.GDP.MKTP.CD", "NY.GNS.ICTR.ZS"], start=2003, end=2005, errors="warn"
):
    """
    Download data series from the World Bank's World Development Indicators

    Parameters
    ----------

    indicator: string or list of strings
        taken from the ``id`` field in ``WDIsearch()``
        
    country: string or list of strings.
        ``all`` downloads data for all countries
        2 or 3 character ISO country codes select individual
        countries (e.g.``US``,``CA``) or (e.g.``USA``,``CAN``).  The codes
        can be mixed.
            
        The two ISO lists of countries, provided by wikipedia, are hardcoded
        into pandas as of 11/10/2014.
        
    start: int
        First year of the data series
        
    end: int
        Last year of the data series (inclusive)
    
    errors: str {'ignore', 'warn', 'raise'}, default 'warn'
        Country codes are validated against a hardcoded list.  This controls
        the outcome of that validation, and attempts to also apply
        to the results from world bank.
        
        errors='raise', will raise a ValueError on a bad country code.
    
    Returns
    -------

    ``pandas`` DataFrame with columns: country, iso_code, year, 
    indicator value.
    
    """

    if type(country) == str:
        country = [country]

    bad_countries = np.setdiff1d(country, country_codes)

    # Validate the input
    if len(bad_countries) > 0:
        tmp = ", ".join(bad_countries)
        if errors == "raise":
            raise ValueError("Invalid Country Code(s): %s" % tmp)
        if errors == "warn":
            warnings.warn("Non-standard ISO country codes: %s" % tmp)

    # Work with a list of indicators
    if type(indicator) == str:
        indicator = [indicator]

    # Download
    data = []
    bad_indicators = {}
    for ind in indicator:
        one_indicator_data, msg = _get_data(ind, country, start, end)
        if msg == "Success":
            data.append(one_indicator_data)
        else:
            bad_indicators[ind] = msg

    if len(bad_indicators.keys()) > 0:
        bad_ind_msgs = [i + " : " + m for i, m in bad_indicators.items()]
        bad_ind_msgs = "\n\n".join(bad_ind_msgs)
        bad_ind_msgs = "\n\nInvalid Indicators:\n\n%s" % bad_ind_msgs
        if errors == "raise":
            raise ValueError(bad_ind_msgs)
        if errors == "warn":
            warnings.warn(bad_ind_msgs)

    # Confirm we actually got some data, and build Dataframe
    if len(data) > 0:
        out = reduce(lambda x, y: x.merge(y, how="outer"), data)
        out = out.drop("iso_code", axis=1)
        out = out.set_index(["country", "year"])
        if PD017:
            kwargs = dict((kw, True) for kw in ("datetime", "numeric", "timedelta"))
        else:
            kwargs = {"convert_numeric": True}
        out = out.convert_objects(**kwargs)
        return out
    else:
        msg = "No indicators returned data."
        if errors == "ignore":
            msg += "  Set errors='warn' for more information."
        raise ValueError(msg)
Exemple #19
0
 def _excel2num(x):
     "Convert Excel column name like 'AB' to 0-based column index"
     return reduce(lambda s, a: s * 26 + ord(a) - ord('A') + 1,
                   x.upper().strip(), 0) - 1
def colrename(x):
    reps = ('.', '_'), (' ', '_'), ('(', ''), (')', ''), ('{', ''), (
        '}', ''), ('\\n', ''), ('\n', ''), ('\\t', ''), ('\t', ''), ('=', '')
    return reduce(lambda a, kv: a.replace(*kv), reps, x)
Exemple #21
0
def compose(*funcs):
    """Compose 2 or more callables"""
    assert len(funcs) > 1, "At least 2 callables must be passed to compose"
    return reduce(_compose2, funcs)
def download(country=['MX', 'CA', 'US'],
             indicator=['GDPPCKD', 'GDPPCKN'],
             start=2003,
             end=2005):
    """
    Download data series from the World Bank's World Development Indicators

    Parameters
    ----------

    indicator: string or list of strings
        taken from the ``id`` field in ``WDIsearch()``
    country: string or list of strings.
        ``all`` downloads data for all countries
        ISO-2 character codes select individual countries (e.g.``US``,``CA``)
    start: int
        First year of the data series
    end: int
        Last year of the data series (inclusive)

    Returns
    -------

    ``pandas`` DataFrame with columns: country, iso2c, year, indicator value.
    """

    # Are ISO-2 country codes valid?
    valid_countries = [
        "AG", "AL", "AM", "AO", "AR", "AT", "AU", "AZ", "BB", "BD", "BE", "BF",
        "BG", "BH", "BI", "BJ", "BO", "BR", "BS", "BW", "BY", "BZ", "CA", "CD",
        "CF", "CG", "CH", "CI", "CL", "CM", "CN", "CO", "CR", "CV", "CY", "CZ",
        "DE", "DK", "DM", "DO", "DZ", "EC", "EE", "EG", "ER", "ES", "ET", "FI",
        "FJ", "FR", "GA", "GB", "GE", "GH", "GM", "GN", "GQ", "GR", "GT", "GW",
        "GY", "HK", "HN", "HR", "HT", "HU", "ID", "IE", "IL", "IN", "IR", "IS",
        "IT", "JM", "JO", "JP", "KE", "KG", "KH", "KM", "KR", "KW", "KZ", "LA",
        "LB", "LC", "LK", "LS", "LT", "LU", "LV", "MA", "MD", "MG", "MK", "ML",
        "MN", "MR", "MU", "MW", "MX", "MY", "MZ", "NA", "NE", "NG", "NI", "NL",
        "NO", "NP", "NZ", "OM", "PA", "PE", "PG", "PH", "PK", "PL", "PT", "PY",
        "RO", "RU", "RW", "SA", "SB", "SC", "SD", "SE", "SG", "SI", "SK", "SL",
        "SN", "SR", "SV", "SY", "SZ", "TD", "TG", "TH", "TN", "TR", "TT", "TW",
        "TZ", "UA", "UG", "US", "UY", "UZ", "VC", "VE", "VN", "VU", "YE", "ZA",
        "ZM", "ZW", "all"
    ]
    if type(country) == str:
        country = [country]
    bad_countries = np.setdiff1d(country, valid_countries)
    country = np.intersect1d(country, valid_countries)
    country = ';'.join(country)
    # Work with a list of indicators
    if type(indicator) == str:
        indicator = [indicator]
    # Download
    data = []
    bad_indicators = []
    for ind in indicator:
        try:
            tmp = _get_data(ind, country, start, end)
            tmp.columns = ['country', 'iso2c', 'year', ind]
            data.append(tmp)
        except:
            bad_indicators.append(ind)
    # Warn
    if len(bad_indicators) > 0:
        print('Failed to obtain indicator(s): %s' % '; '.join(bad_indicators))
        print('The data may still be available for download at '
              'http://data.worldbank.org')
    if len(bad_countries) > 0:
        print('Invalid ISO-2 codes: %s' % ' '.join(bad_countries))
    # Merge WDI series
    if len(data) > 0:
        out = reduce(lambda x, y: x.merge(y, how='outer'), data)
        # Clean
        out = out.drop('iso2c', axis=1)
        out = out.set_index(['country', 'year'])
        out = out.convert_objects(convert_numeric=True)
        return out
Exemple #23
0
 def _excel2num(x):
     "Convert Excel column name like 'AB' to 0-based column index"
     return reduce(lambda s, a: s * 26 + ord(a) - ord('A') + 1,
                   x.upper().strip(), 0) - 1
    def rfd_selected_to_max_clique(self, current: QTreeWidgetItem, previous: QTreeWidgetItem):
        if current:
            t0 = time.time()

            rfd: RFD = current.data(self.RFD, Qt.UserRole)
            self.rfd_subject.on_next(rfd)

            # https://stackoverflow.com/questions/38987/how-to-merge-two-dictionaries-in-a-single-expression#26853961
            rfd_thresholds: dict = {**rfd.get_left_hand_side(), **rfd.get_right_hand_side()}

            lhs = rfd.get_left_hand_side()
            rhs = rfd.get_right_hand_side()

            lhs_keys = lhs.keys()
            rhs_keys = rhs.keys()

            # https://stackoverflow.com/questions/1720421/how-to-concatenate-two-lists-in-python#answer-35631185
            rfd_columns = [*lhs_keys, *rhs_keys]

            rfd_columns_index: dict = {value: position for (position, value) in enumerate(rfd_columns)}

            rows, columns = self.data_frame.shape

            # Calculate all possible differences.
            # This is an O(N^2) operation and may be costly in terms of time and memory.
            # dist: ndarray = np.abs(rfd_columns_data[:, None] - rfd_columns_data)

            full_dist = DiffDataFrame.full_diff(self.data_frame[rfd_columns])

            dist: np.ndarray = np.array(
                [[full_dist.iloc[row1 * rows + row2] for row2 in range(0, rows)] for row1 in range(0, rows)])

            # Identify the suitable pairs of rows:
            # im: ndarray = (dist[:, :, 0] <= 2) & (dist[:, :, 1] <= 0) & (dist[:, :, 2] <= 1)

            conditions_arrays: np.ndarray = [(dist[:, :, rfd_columns_index[column]] <= rfd_thresholds[column])
                                             for column in rfd_columns]

            adjacency_matrix: np.ndarray = np.array(reduce(lambda a, b: np.bitwise_and(a, b), conditions_arrays))

            # Use them as an adjacency matrix and construct a graph.
            # The graph nodes represent rows in the original dataframe.
            # The nodes are connected if the corresponding rows are in the RFD:

            graph = nx.from_numpy_matrix(adjacency_matrix)

            max_clique = max(nx.clique.find_cliques(graph), key=len)
            df: DataFrame = self.data_frame.loc[max_clique, :]
            t1 = time.time()

            seconds = t1 - t0
            current.setText(self.TIME, str(seconds) + "''")

            percentage = round((df.shape[0] / self.rows_count) * 100)
            current.setText(self.EXTENT, str(percentage) + "%")

            df_indexes = df.index.values.tolist()

            # self.pandas_model.update_data(self.data_frame)

            '''self.rfd_data_set_table.clearSelection()
Exemple #25
0
def download(country=['MX', 'CA', 'US'], indicator=['GDPPCKD', 'GDPPCKN'],
             start=2003, end=2005):
    """
    Download data series from the World Bank's World Development Indicators

    Parameters
    ----------

    indicator: string or list of strings
        taken from the ``id`` field in ``WDIsearch()``
    country: string or list of strings.
        ``all`` downloads data for all countries
        ISO-2 character codes select individual countries (e.g.``US``,``CA``)
    start: int
        First year of the data series
    end: int
        Last year of the data series (inclusive)

    Returns
    -------

    ``pandas`` DataFrame with columns: country, iso2c, year, indicator value.
    """

    # Are ISO-2 country codes valid?
    valid_countries = ["AG", "AL", "AM", "AO", "AR", "AT", "AU", "AZ", "BB",
                       "BD", "BE", "BF", "BG", "BH", "BI", "BJ", "BO", "BR", "BS", "BW",
                       "BY", "BZ", "CA", "CD", "CF", "CG", "CH", "CI", "CL", "CM", "CN",
                       "CO", "CR", "CV", "CY", "CZ", "DE", "DK", "DM", "DO", "DZ", "EC",
                       "EE", "EG", "ER", "ES", "ET", "FI", "FJ", "FR", "GA", "GB", "GE",
                       "GH", "GM", "GN", "GQ", "GR", "GT", "GW", "GY", "HK", "HN", "HR",
                       "HT", "HU", "ID", "IE", "IL", "IN", "IR", "IS", "IT", "JM", "JO",
                       "JP", "KE", "KG", "KH", "KM", "KR", "KW", "KZ", "LA", "LB", "LC",
                       "LK", "LS", "LT", "LU", "LV", "MA", "MD", "MG", "MK", "ML", "MN",
                       "MR", "MU", "MW", "MX", "MY", "MZ", "NA", "NE", "NG", "NI", "NL",
                       "NO", "NP", "NZ", "OM", "PA", "PE", "PG", "PH", "PK", "PL", "PT",
                       "PY", "RO", "RU", "RW", "SA", "SB", "SC", "SD", "SE", "SG", "SI",
                       "SK", "SL", "SN", "SR", "SV", "SY", "SZ", "TD", "TG", "TH", "TN",
                       "TR", "TT", "TW", "TZ", "UA", "UG", "US", "UY", "UZ", "VC", "VE",
                       "VN", "VU", "YE", "ZA", "ZM", "ZW", "all"]
    if type(country) == str:
        country = [country]
    bad_countries = np.setdiff1d(country, valid_countries)
    country = np.intersect1d(country, valid_countries)
    country = ';'.join(country)
    # Work with a list of indicators
    if type(indicator) == str:
        indicator = [indicator]
    # Download
    data = []
    bad_indicators = []
    for ind in indicator:
        try:
            tmp = _get_data(ind, country, start, end)
            tmp.columns = ['country', 'iso2c', 'year', ind]
            data.append(tmp)
        except:
            bad_indicators.append(ind)
    # Warn
    if len(bad_indicators) > 0:
        print('Failed to obtain indicator(s): %s' % '; '.join(bad_indicators))
        print('The data may still be available for download at http://data.worldbank.org')
    if len(bad_countries) > 0:
        print('Invalid ISO-2 codes: %s' % ' '.join(bad_countries))
    # Merge WDI series
    if len(data) > 0:
        out = reduce(lambda x, y: x.merge(y, how='outer'), data)
        # Clean
        out = out.drop('iso2c', axis=1)
        out = out.set_index(['country', 'year'])
        out = out.convert_objects(convert_numeric=True)
        return out
Exemple #26
0
def download(country=['MX', 'CA', 'US'],
             indicator=['NY.GDP.MKTP.CD', 'NY.GNS.ICTR.ZS'],
             start=2003,
             end=2005,
             errors='warn'):
    """
    Download data series from the World Bank's World Development Indicators

    Parameters
    ----------

    indicator: string or list of strings
        taken from the ``id`` field in ``WDIsearch()``

    country: string or list of strings.
        ``all`` downloads data for all countries
        2 or 3 character ISO country codes select individual
        countries (e.g.``US``,``CA``) or (e.g.``USA``,``CAN``).  The codes
        can be mixed.

        The two ISO lists of countries, provided by wikipedia, are hardcoded
        into pandas as of 11/10/2014.

    start: int
        First year of the data series

    end: int
        Last year of the data series (inclusive)

    errors: str {'ignore', 'warn', 'raise'}, default 'warn'
        Country codes are validated against a hardcoded list.  This controls
        the outcome of that validation, and attempts to also apply
        to the results from world bank.

        errors='raise', will raise a ValueError on a bad country code.

    Returns
    -------

    ``pandas`` DataFrame with columns: country, iso_code, year,
    indicator value.

    """

    if type(country) == str:
        country = [country]

    bad_countries = np.setdiff1d(country, country_codes)

    # Validate the input
    if len(bad_countries) > 0:
        tmp = ", ".join(bad_countries)
        if errors == 'raise':
            raise ValueError("Invalid Country Code(s): %s" % tmp)
        if errors == 'warn':
            warnings.warn('Non-standard ISO country codes: %s' % tmp)

    # Work with a list of indicators
    if type(indicator) == str:
        indicator = [indicator]

    # Download
    data = []
    bad_indicators = {}
    for ind in indicator:
        one_indicator_data, msg = _get_data(ind, country, start, end)
        if msg == "Success":
            data.append(one_indicator_data)
        else:
            bad_indicators[ind] = msg

    if len(bad_indicators.keys()) > 0:
        bad_ind_msgs = [i + " : " + m for i, m in bad_indicators.items()]
        bad_ind_msgs = "\n\n".join(bad_ind_msgs)
        bad_ind_msgs = "\n\nInvalid Indicators:\n\n%s" % bad_ind_msgs
        if errors == 'raise':
            raise ValueError(bad_ind_msgs)
        if errors == 'warn':
            warnings.warn(bad_ind_msgs)

    # Confirm we actually got some data, and build Dataframe
    if len(data) > 0:
        out = reduce(lambda x, y: x.merge(y, how='outer'), data)
        out = out.drop('iso_code', axis=1)
        out = out.set_index(['country', 'year'])
        out = out._convert(datetime=True, numeric=True)
        return out
    else:
        msg = "No indicators returned data."
        if errors == 'ignore':
            msg += "  Set errors='warn' for more information."
        raise ValueError(msg)
Exemple #27
0
def _compose(*funcs):
    """Compose 2 or more callables"""
    assert len(funcs) > 1, 'At least 2 callables must be passed to compose'
    return reduce(_compose2, funcs)