예제 #1
0
def main():
    import argparse
    ver = re.search(r'CoffeeScript Compiler v(.+)',
                    open(P('coffee-script.js'), 'rb').read(500)).group(1)
    epilog = 'Copyright Kovid Goyal 2012'
    parser = argparse.ArgumentParser(description='''
            Serve up files under the current directory via HTTP, automatically
            compiling .coffee files to javascript. Can also be used as a
            simple coffeescript compiler.
            ''',
                                     epilog=epilog)
    parser.add_argument('--version',
                        action='version',
                        version='Using coffeescript compiler version: ' + ver)
    subparsers = parser.add_subparsers(help='Compile or serve',
                                       dest='which',
                                       title='Compile or Serve',
                                       description='Compile or serve')
    cc = subparsers.add_parser('compile',
                               help='Compile coffeescript',
                               epilog=epilog)
    cs = subparsers.add_parser(
        'serve',
        help='Serve files under the current '
        'directory, automatically compiling .coffee files to javascript',
        epilog=epilog)

    cc.add_argument('src',
                    type=argparse.FileType('rb'),
                    metavar='path/to/script.coffee',
                    help='The coffee script to compile. Use '
                    ' - for stdin')
    cc.add_argument('--highlight',
                    default=False,
                    action='store_true',
                    help='Syntax highlight the output (requires Pygments)')

    cs.add_argument('--port',
                    type=int,
                    default=8000,
                    help='The port on which to serve. Default: %default')
    cs.add_argument(
        '--host',
        default='0.0.0.0',
        help='The IP address on which to listen. Default is to listen on all'
        ' IPv4 addresses (0.0.0.0)')
    args = parser.parse_args()
    if args.which == 'compile':
        ans, errors = compile_coffeescript(args.src.read(),
                                           filename=args.src.name)
        for line in errors:
            print(line, file=sys.stderr)
        if ans:
            if args.highlight:
                from pygments.lexers import JavascriptLexer
                from pygments.formatters import TerminalFormatter
                from pygments import highlight
                print(highlight(ans, JavascriptLexer(), TerminalFormatter()))
            else:
                print(ans.encode(sys.stdout.encoding or 'utf-8'))
    else:
        serve(port=args.port, host=args.host)
예제 #2
0
 def __highlight(self, data):
     formatted = json.dumps(data, indent=4)
     return highlight(formatted,
                      formatter=TerminalFormatter(),
                      lexer=JavascriptLexer()).rstrip()
예제 #3
0
def _run(handle_data, initialize, before_trading_start, analyze, algofile,
         algotext, defines, data_frequency, capital_base, data, bundle,
         bundle_timestamp, start, end, output, print_algo, local_namespace,
         environ):
    """Run a backtest for the given algorithm.

    This is shared between the cli and :func:`zipline.run_algo`.
    """
    if algotext is not None:
        if local_namespace:
            ip = get_ipython()  # noqa
            namespace = ip.user_ns
        else:
            namespace = {}

        for assign in defines:
            try:
                name, value = assign.split('=', 2)
            except ValueError:
                raise ValueError(
                    'invalid define %r, should be of the form name=value' %
                    assign, )
            try:
                # evaluate in the same namespace so names may refer to
                # eachother
                namespace[name] = eval(value, namespace)
            except Exception as e:
                raise ValueError(
                    'failed to execute definition for name %r: %s' %
                    (name, e), )
    elif defines:
        raise _RunAlgoError(
            'cannot pass define without `algotext`',
            "cannot pass '-D' / '--define' without '-t' / '--algotext'",
        )
    else:
        namespace = {}
        if algofile is not None:
            algotext = algofile.read()

    if print_algo:
        if PYGMENTS:
            highlight(
                algotext,
                PythonLexer(),
                TerminalFormatter(),
                outfile=sys.stdout,
            )
        else:
            click.echo(algotext)

    if bundle is not None:
        bundle_data = load(
            bundle,
            environ,
            bundle_timestamp,
        )

        prefix, connstr = re.split(
            r'sqlite:///',
            str(bundle_data.asset_finder.engine.url),
            maxsplit=1,
        )
        if prefix:
            raise ValueError(
                "invalid url %r, must begin with 'sqlite:///'" %
                str(bundle_data.asset_finder.engine.url), )
        env = TradingEnvironment(asset_db_path=connstr)
        data = DataPortal(
            env,
            equity_minute_reader=bundle_data.minute_bar_reader,
            equity_daily_reader=bundle_data.daily_bar_reader,
            adjustment_reader=bundle_data.adjustment_reader,
        )

    perf = TradingAlgorithm(namespace=namespace,
                            capital_base=capital_base,
                            start=start,
                            end=end,
                            env=env,
                            **{
                                'initialize': initialize,
                                'handle_data': handle_data,
                                'before_trading_start': before_trading_start,
                                'analyze': analyze,
                            } if algotext is None else {
                                'algo_filename': algofile.name,
                                'script': algotext,
                            }).run(
                                data,
                                overwrite_sim_params=False,
                            )

    if output == '-':
        click.echo(str(perf))
    elif output != os.devnull:  # make the zipline magic not write any data
        perf.to_pickle(output)

    return perf
예제 #4
0
def delete_ad(uuid, url, head):
    url = url + '/' + uuid
    req = requests.delete(url, headers=head)
    details = json.dumps(req.json(), indent=4)
    print('Unjoining Active Directory')
    print(highlight(details, JsonLexer(), TerminalFormatter()))
예제 #5
0
    def do_api(self, args):
        """
        List and select methods from a given loaded API module

        := api list
        := api select
        := api analyzed list
        := api analyzed select
        """

        # Locals
        class_selection = None
        method_selection = None
        surgical_lib = None

        try:
            # List the available API methods from the target module
            if args.split()[0] == "list":
                if self.target_module:
                    print("\n")
                    for k, v in self.target_module.model.values.items():
                        print("\n")
                        for m in v:
                            print(
                                self.t.cyan("\t--> {} : {} : {}".format(
                                    self.target_module.name,
                                    k.split(".")[-1], m)))
                    print("\n")
                else:
                    self.logger.surgical_log(
                        "info", "Target module has not been loaded (!)")
            # Select an API method from the target module
            elif args.split()[0] == "select":
                if self.target_module:
                    # TODO Consider building a wrapper around raw_input()
                    class_selection = raw_input(
                        self.t.yellow("[{}] ".format(datetime.now())) +
                        "Select class : ")
                    method_selection = raw_input(
                        self.t.yellow("[{}] ".format(datetime.now())) +
                        "Select method : ")
                    for k, v in self.target_module.model.values.items():
                        # This is so we can support classes with identical
                        # method names --> Ex: java.util.zip.ZipFile
                        if class_selection == k.split(".")[-1]:
                            for m in v:
                                if m == method_selection:
                                    self.logger.surgical_log(
                                        "info", "Analyzing ...")
                                    from core.brains.surgical.lib.libsurgical import SurgicalLib
                                    # Begin processing and return the results
                                    # from the selected api
                                    surgical_lib = SurgicalLib(
                                        self.target_module, self.vmx, self.vm,
                                        k, method_selection, self.methods)
                                    # methods_api_usage will contain a list of
                                    # tuples
                                    self.methods_api_usage = surgical_lib.search(
                                    )
                                else:
                                    self.logger.surgical_log(
                                        "warn", "Method not found (!)")
            # Analyze the processed method list
            elif args.split()[0] == "analyzed":
                # List the methods that have been processed
                if args.split()[1] == "list":
                    if self.methods_api_usage:
                        print("\n")
                        for m in self.methods_api_usage:
                            print(
                                self.t.cyan("\t--> {} -> {} ".format(
                                    m[0].class_name, m[0].name)))
                        print("\n")
                    else:
                        SurgicalError("API usage not found (!)")
                # Select from the processed method list
                elif args.split()[1] == "select":
                    if self.methods_api_usage:
                        selection = raw_input(
                            self.t.yellow("[{}] ".format(datetime.now())) +
                            "Select method : ")
                        for m in self.methods_api_usage:
                            if selection == m[0].name:
                                print("\n")
                                print(
                                    self.t.cyan("\t--> Class : {}".format(
                                        m[0].class_name)))
                                print(
                                    self.t.cyan("\t\t--> Method : {}".format(
                                        m[0].name)))
                                print(
                                    self.t.cyan(
                                        "\t\t\t --> XREFS ###########"))
                                self.u.print_xref("T",
                                                  m[1].method.XREFto.items)
                                self.u.print_xref("F",
                                                  m[1].method.XREFfrom.items)
                                print("\n")
                                print(
                                    highlight(m[2], JavaLexer(),
                                              TerminalFormatter()))
                    else:
                        SurgicalError("API usage not found (!)")
        except Exception as e:
            SurgicalError(e.message)
예제 #6
0
def display_status(status):
    return highlight(yaml.dump(status, sort_keys=True, indent=2), YamlLexer(), TerminalFormatter())
예제 #7
0
def _run(handle_data, initialize, before_trading_start, analyze, algofile,
         algotext, defines, data_frequency, capital_base, data, bundle,
         bundle_timestamp, start, end, output, print_algo, local_namespace,
         environ):
    """Run a backtest for the given algorithm.

    This is shared between the cli and :func:`catalyst.run_algo`.
    """
    if algotext is not None:
        if local_namespace:
            ip = get_ipython()  # noqa
            namespace = ip.user_ns
        else:
            namespace = {}

        for assign in defines:
            try:
                name, value = assign.split('=', 2)
            except ValueError:
                raise ValueError(
                    'invalid define %r, should be of the form name=value' %
                    assign, )
            try:
                # evaluate in the same namespace so names may refer to
                # eachother
                namespace[name] = eval(value, namespace)
            except Exception as e:
                raise ValueError(
                    'failed to execute definition for name %r: %s' %
                    (name, e), )
    elif defines:
        raise _RunAlgoError(
            'cannot pass define without `algotext`',
            "cannot pass '-D' / '--define' without '-t' / '--algotext'",
        )
    else:
        namespace = {}
        if algofile is not None:
            algotext = algofile.read()

    if print_algo:
        if PYGMENTS:
            highlight(
                algotext,
                PythonLexer(),
                TerminalFormatter(),
                outfile=sys.stdout,
            )
        else:
            click.echo(algotext)

    if bundle is not None:
        bundles = bundle.split(',')

        def get_trading_env_and_data(bundles):
            env = data = None

            b = 'poloniex'
            if len(bundles) == 0:
                return env, data
            elif len(bundles) == 1:
                b = bundles[0]

            bundle_data = load(
                b,
                environ,
                bundle_timestamp,
            )

            prefix, connstr = re.split(
                r'sqlite:///',
                str(bundle_data.asset_finder.engine.url),
                maxsplit=1,
            )
            if prefix:
                raise ValueError(
                    "invalid url %r, must begin with 'sqlite:///'" %
                    str(bundle_data.asset_finder.engine.url), )

            open_calendar = get_calendar('OPEN')

            env = TradingEnvironment(
                load=partial(load_crypto_market_data, environ=environ),
                bm_symbol='USDT_BTC',
                trading_calendar=open_calendar,
                asset_db_path=connstr,
                environ=environ,
            )

            first_trading_day = bundle_data.minute_bar_reader.first_trading_day

            data = DataPortal(
                env.asset_finder,
                open_calendar,
                first_trading_day=first_trading_day,
                minute_reader=bundle_data.minute_bar_reader,
                five_minute_reader=bundle_data.five_minute_bar_reader,
                daily_reader=bundle_data.daily_bar_reader,
                adjustment_reader=bundle_data.adjustment_reader,
            )

            return env, data

        def get_loader_for_bundle(b):
            bundle_data = load(
                b,
                environ,
                bundle_timestamp,
            )

            if b == 'poloniex':
                return CryptoPricingLoader(
                    bundle_data,
                    data_frequency,
                    CryptoPricing,
                )
            elif b == 'quandl':
                return USEquityPricingLoader(
                    bundle_data,
                    data_frequency,
                    USEquityPricing,
                )
            raise ValueError("No PipelineLoader registered for bundle %s." % b)

        loaders = [get_loader_for_bundle(b) for b in bundles]
        env, data = get_trading_env_and_data(bundles)

        def choose_loader(column):
            for loader in loaders:
                if column in loader.columns:
                    return loader
            raise ValueError("No PipelineLoader registered for column %s." %
                             column)

    else:
        env = TradingEnvironment(environ=environ)
        choose_loader = None

    perf = TradingAlgorithm(
        namespace=namespace,
        env=env,
        get_pipeline_loader=choose_loader,
        sim_params=create_simulation_parameters(
            start=start,
            end=end,
            capital_base=capital_base,
            data_frequency=data_frequency,
            emission_rate=data_frequency,
        ),
        **{
            'initialize': initialize,
            'handle_data': handle_data,
            'before_trading_start': before_trading_start,
            'analyze': analyze,
        } if algotext is None else {
            'algo_filename': getattr(algofile, 'name', '<algorithm>'),
            'script': algotext,
        }).run(
            data,
            overwrite_sim_params=False,
        )

    if output == '-':
        click.echo(str(perf))
    elif output != os.devnull:  # make the catalyst magic not write any data
        perf.to_pickle(output)

    return perf
예제 #8
0
def genShellcode(args):
    fms = []
    for i in range(0, len(args)):
        fms.append("'%s'" % (args[i]))

    scode = fms[0].replace("'", "") + "(" + ','.join(fms[1:]) + ")"
    scode_tc = fms[0].replace("'", "") + "_tc(" + ','.join(fms[1:]) + ")"
    show = ''

    try:
        if g_arch == 'arm':
            show = eval("armgen.%s" % (scode))
        elif g_arch == 'arm64':
            show = eval("arm64gen.%s" % (scode))
        elif g_arch == 'thumb':
            show = eval("thgen.%s" % (scode))
    except:
        showShellcode(args)

    if g_format == 'asm':
        try:
            from pygments import highlight
            from pygments.lexers import get_lexer_by_name
            from pygments.formatters import TerminalFormatter
            print highlight(show, get_lexer_by_name('asm'),
                            TerminalFormatter())
        except ImportError:
            print show
            return

    scode, count = ks_asm(g_arch, show)

    if g_xorkey == True:
        if g_arch == 'thumb':
            scode = MakeXorShellcode(scode, g_arch)

    if g_format == 'asm':
        return

    if g_format == 'c':
        print _carray(scode)
    elif g_format == 'string':
        print _string(scode)
    elif g_format == 'raw':
        sys.stdout.write(scode)
    elif g_format == 'hex':
        print enhex(scode)
    elif g_format == 'python':
        _xscode = []
        _xscode.append('shellcode = ""\n')
        _xdiv = len(scode) / (16)
        _xmod = len(scode) % (16)
        _x = 0
        for _i in range(0, _xdiv):
            _xscode.append('shellcode += %s' %
                           _string(scode[_i * 16:(_i + 1) * 16]))
            _x = _x + 1
        if _xmod:
            _xscode.append('shellcode += %s' % _string(scode[(_i + 1) * 16:]))
        print "# shellcode's length is : %s" % (len(scode))
        print ''.join(_xscode)

    else:
        print _string(scode)
예제 #9
0
def hint_to_text(text, width=0):
    if text is None:
        return ''

    hint = type(text)

    if issubclass(hint, Hint) and not issubclass(hint, Text):
        raise ValueError('hint_to_text() support only Text messages')
    elif issubclass(hint, Text):
        pass
    elif hint == str:
        try:
            return text.decode('utf-8')
        except UnicodeDecodeError:
            return text.decode('latin1')
    elif hint == unicode:
        return text.encode('utf-8')
    else:
        return obj2utf8(text)

    if hint == NewLine:
        return '\n' * int(text.data)
    elif hint == Title:
        if width <= 0:
            real_width, _ = terminal_size()
            width = real_width + width

        title = hint_to_text(text.data)
        tlen = elen(title)
        ajust = width - tlen - 4
        ljust = 0
        rjust = 0
        if ajust > 0:
            ljust = ajust / 2
            rjust = ajust - ljust

        title = '>>' + (' ' * ljust) + title + (' ' * rjust) + '<<'
        title = ('-' * width) + '\n' + title + '\n' + ('-' * width)

        return colorize(title, 'lightyellow')
    elif hint == MultiPart:
        return '\n\n'.join(hint_to_text(x, width) for x in text.data)
    elif hint == Indent:
        return '\n'.join((' ' * text.indent) + x
                         for x in hint_to_text(text.data, width).split('\n'))

    elif hint == Color:
        return colorize(hint_to_text(text.data, width), text.color)
    elif hint == TruncateToTerm:
        if width <= 0:
            real_width, _ = terminal_size()
            width = real_width + width

        text = hint_to_text(text.data, width)
        if text == str:
            text = text.decode('utf-8', errors='replace')

        return '\n'.join(ejust(x, width) for x in text.split('\n'))
    elif hint == Error:
        header = text.header
        text = text.data
        etype = type(text)
        if issubclass(etype, Exception) and etype.__class__.__name__ != 'type':
            text = '({}) {}'.format(type(text).__class__.__name__, text)
        else:
            text = hint_to_text(text, width).rstrip()

        if header:
            text = '{}: {}'.format(colorize(header, 'yellow'), text)

        return colorize('[-] ', 'red') + text
    elif hint == Log:
        return hint_to_text(text.data, width).rstrip()
    elif hint == Warn:
        return colorize('[!] ', 'yellow') + hint_to_text(text.data,
                                                         width).rstrip()
    elif hint == Success:
        return colorize('[+] ', 'green') + hint_to_text(text.data,
                                                        width).rstrip()
    elif hint == Info:
        return colorize('[%] ', 'grey') + hint_to_text(text.data,
                                                       width).rstrip()
    elif hint == ServiceInfo:
        return ''.join([
            colorize('[*] ', 'blue'),
            hint_to_text(text.data, width).rstrip()
        ])
    elif hint == Section:
        return '\n'.join([
            colorize('#>#>  ', 'green') + hint_to_text(text.header, width) +
            colorize('  <#<#', 'green'),
            hint_to_text(text.data, width)
        ])
    elif hint == Line:
        return text.dm.join(hint_to_text(v, width) for v in text.data)
    elif hint == List:
        return (hint_to_text(text.caption, width) + '\n'
                if text.caption else '') + ('\n'.join([((' ' * text.indent) + (
                    (hint_to_text(text.bullet, width) +
                     ' ') if text.bullet else '') + hint_to_text(x, width))
                                                       for x in text.data]))
    elif hint == Table:
        table_data = [{
            k: hint_to_text(v, width)
            for k, v in record.iteritems()
        } for record in text.data]

        return ('\n' * text.vspace + '{ ' + hint_to_text(text.caption, width) +
                ' }\n' if text.caption else '') + table_format(
                    table_data, wl=text.headers,
                    legend=text.legend) + '\n' * text.vspace

    elif hint == Pygment:
        lexer = text.lexer
        text = hint_to_text(text.data, width)
        return highlight(text, lexer, TerminalFormatter(style=PYGMENTS_STYLE))

    else:
        raise NotImplementedError('hint_to_text not implemented for {}'.format(
            hint.__class__.__name__))
예제 #10
0
def colorin(text, color="red", style="normal"):
    """
    Return the given text, surrounded by the given color ASCII markers.

    If the given color is a name that exists in available colors,
    a 8-colors mode is assumed, else, a 256-colors mode.

    The given style must exists in the available styles.

    >>> colorin("Fetchez la vache", "red", "bold")
    '\x1b[1;31mFetchez la vache\x1b[0m'
    >>> colout.colorin("Faites chier la vache", 41, "normal")
    '\x1b[0;38;5;41mFaites chier la vache\x1b[0m'
    """

    assert (type(color) is str)

    global colormap_idx
    global debug

    # Special characters.
    start = "\033["
    stop = "\033[0m"

    color_code = ""
    style_code = ""

    # Convert the style code
    if style == "random" or style == "Random":
        style = random.choice(list(styles.keys()))
    else:
        if style in styles:
            style_code = str(styles[style])

    color = color.strip()
    if color == "none":
        # if no color, style cannot be applied
        if not debug:
            return text
        else:
            return "<none>" + text + "</none>"

    elif color == "random":
        mode = 8
        color_code = random.choice(list(colors.values()))
        color_code = str(30 + color_code)

    elif color == "Random":
        mode = 256
        color_nb = random.randint(0, 255)
        color_code = str(color_nb)

    elif color in colormaps.keys():
        if color[0].islower():  # lower case first letter
            mode = 8
            c = colormaps[color][colormap_idx]
            if c.isdigit():
                color_code = str(30 + c)
            else:
                color_code = str(30 + colors[c])

        else:  # upper case
            mode = 256
            color_nb = colormaps[color][colormap_idx]
            color_code = str(color_nb)

        if colormap_idx < len(colormaps[color]) - 1:
            colormap_idx += 1
        else:
            colormap_idx = 0

    elif color.lower() == "scale":  # "scale" or "Scale"

        # filter out everything that does not seem to be necessary to interpret the string as a number
        # this permits to transform "[ 95%]" to "95" before number conversion,
        # and thus allows to color a group larger than the matched number
        chars_in_numbers = "-+.,e/*"
        allowed = string.digits + chars_in_numbers
        nb = "".join([i for i in filter(allowed.__contains__, text)])

        # interpret as decimal
        # First, try with the babel module, if available
        # if not, use python itself,
        # if thoses fails, try to `eval` the string
        # (this allow strings like "1/2+0.9*2")
        try:
            # babel is a specialized module
            import babel.numbers as bn
            try:
                f = float(bn.parse_decimal(nb))
            except NumberFormatError:
                f = eval(
                    nb
                )  # Note: in python2, `eval(2/3)` would produce `0`, in python3 `0.666`
        except ImportError:
            try:
                f = float(nb)
            except ValueError:
                f = eval(nb)

        # if out of scale, do not color
        if f < scale[0] or f > scale[1]:
            return text

        if color[0].islower():
            mode = 8
            # Use the default colormap in lower case = 8-colors mode
            cmap = colormap

            # normalize and scale over the nb of colors in cmap
            i = int(
                math.ceil(
                    (f - scale[0]) / (scale[1] - scale[0]) * (len(cmap) - 1)))

            color = cmap[i]
            color_code = str(30 + colors[color])

        else:
            mode = 256
            cmap = colormap
            i = int(
                math.ceil(
                    (f - scale[0]) / (scale[1] - scale[0]) * (len(cmap) - 1)))
            color = cmap[i]
            color_code = str(color)

    # "hash" or "Hash"; useful to randomly but consistently color strings
    elif color.lower() == "hash":
        hasher = hashlib.md5()
        hasher.update(text.encode('utf-8'))
        hash = hasher.hexdigest()

        f = float(functools.reduce(lambda x, y: x + ord(y), hash, 0) % 101)

        if color[0].islower():
            mode = 8
            cmap = colormap

            # normalize and scale over the nb of colors in cmap
            i = int(
                math.ceil(
                    (f - scale[0]) / (scale[1] - scale[0]) * (len(cmap) - 1)))

            color = cmap[i]
            color_code = str(30 + colors[color])

        else:
            mode = 256
            cmap = colormap
            i = int(
                math.ceil(
                    (f - scale[0]) / (scale[1] - scale[0]) * (len(cmap) - 1)))
            color = cmap[i]
            color_code = str(color)

    # Really useful only when using colout as a library
    # thus you can change the "colormap" variable to your favorite one before calling colorin
    elif color == "colormap":
        color = colormap[colormap_idx]
        if color in colors:
            mode = 8
            color_code = str(30 + colors[color])
        else:
            mode = 256
            color_nb = int(color)
            assert (0 <= color_nb <= 255)
            color_code = str(color_nb)

        if colormap_idx < len(colormap) - 1:
            colormap_idx += 1
        else:
            colormap_idx = 0

    # 8 colors modes
    elif color in colors:
        mode = 8
        color_code = str(30 + colors[color])

    # hexadecimal color
    elif color[0] == "#":
        mode = 256
        color_nb = rgb_to_ansi(*hex_to_rgb(color))
        assert (0 <= color_nb <= 255)
        color_code = str(color_nb)

    # 256 colors mode
    elif color.isdigit():
        mode = 256
        color_nb = int(color)
        assert (0 <= color_nb <= 255)
        color_code = str(color_nb)

    # programming language
    elif color.lower() in lexers:
        lexer = get_lexer_by_name(color.lower())
        # Python => 256 colors, python => 8 colors
        ask_256 = color[0].isupper()
        if ask_256:
            try:
                formatter = Terminal256Formatter(style=style)
            except:  # style not found
                formatter = Terminal256Formatter()
        else:
            if style not in ("light", "dark"):
                style = "dark"  # dark color scheme by default
            formatter = TerminalFormatter(bg=style)
            # We should return all but the last character,
            # because Pygments adds a newline char.
        if not debug:
            return highlight(text, lexer, formatter)[:-1]
        else:
            return "<" + color + ">" + highlight(
                text, lexer, formatter)[:-1] + "</" + color + ">"

    # unrecognized
    else:
        raise UnknownColor(color)

    if not debug:
        return start + style_code + endmarks[
            mode] + color_code + "m" + text + stop
    else:
        return start + style_code + endmarks[mode] + color_code + "m<" + str(
            color) + ">" + text + "</" + str(color) + ">" + stop
예제 #11
0
1
2
3
>>> def foo(a, b):
...     pass
... 
>>> class Foo(object):
...     BAR = 12
...
...     def bar(self):
...             pass
>>> @decorated
... def bar(a, b, c):
...     numbers = [1, 2, 3, 0.2, 0x12]
...     x = sorted(numbers)
... 
...     raise AttributeError('string' + "string")
... 
...     for x in range(1, 10):
...         while(True):
...             break
... 
"""

print()
print(highlight(TEST, PythonConsoleLexer(), TerminalFormatter()))
print()
print(40*'-')
print()
print(highlight(TEST, PythonConsoleLexer(), Terminal256Formatter()))
예제 #12
0
        pass
    return lines[fmt]


def highlight_array(array, trailing_nl=True, bg='light', **options):
    fmt_array = highlight_string(''.join(array), bg, **options).split('\n')
    lines = [line + "\n" for line in fmt_array]
    if not trailing_nl: lines[-1] = lines[-1].rstrip('\n')
    return lines


python_lexer = PythonLexer()

# TerminalFormatter uses a colorTHEME with light and dark pairs.
# But Terminal256Formatter uses a colorSTYLE.  Ugh
dark_terminal_formatter = TerminalFormatter(bg='dark')
light_terminal_formatter = TerminalFormatter(bg='light')
terminal_256_formatter = Terminal256Formatter()


def highlight_string(string, bg='light', **options):
    global terminal_256_formatter
    if options.get('style'):
        if terminal_256_formatter.style != options['style']:
            terminal_256_formatter = \
              Terminal256Formatter(style=options['style'])
            del options['style']
        return highlight(string, python_lexer, terminal_256_formatter,
                         **options)
    elif 'light' == bg:
        return highlight(string, python_lexer, light_terminal_formatter,
예제 #13
0
 def show_js(self, raw):
     from pygments.lexers import JavascriptLexer
     from pygments.formatters import TerminalFormatter
     from pygments import highlight
     print(highlight(raw, JavascriptLexer(), TerminalFormatter()))
예제 #14
0
import sys
from pygments import highlight
from pygments.lexers import get_lexer_for_filename
from pygments.formatters import TerminalFormatter
file_names = sys.argv[1:]
for file_name in file_names:
    f=open(file_name,"r+")
    for row in f:
        if len(row.strip())>0:
            lexer = get_lexer_for_filename(file_name)
            print(highlight(row,lexer,TerminalFormatter()))
    f.close()

예제 #15
0
def _run(handle_data, initialize, before_trading_start, analyze, algofile,
         algotext, defines, data_frequency, capital_base, data, bundle,
         bundle_timestamp, start, end, output, trading_calendar, print_algo,
         metrics_set, local_namespace, environ, blotter):
    """Run a backtest for the given algorithm.

    This is shared between the cli and :func:`zipline.run_algo`.
    """
    if algotext is not None:
        if local_namespace:
            ip = get_ipython()  # noqa
            namespace = ip.user_ns
        else:
            namespace = {}

        for assign in defines:
            try:
                name, value = assign.split('=', 2)
            except ValueError:
                raise ValueError(
                    'invalid define %r, should be of the form name=value' %
                    assign, )
            try:
                # evaluate in the same namespace so names may refer to
                # eachother
                namespace[name] = eval(value, namespace)
            except Exception as e:
                raise ValueError(
                    'failed to execute definition for name %r: %s' %
                    (name, e), )
    elif defines:
        raise _RunAlgoError(
            'cannot pass define without `algotext`',
            "cannot pass '-D' / '--define' without '-t' / '--algotext'",
        )
    else:
        namespace = {}
        if algofile is not None:
            algotext = algofile.read()

    if print_algo:
        if PYGMENTS:
            highlight(
                algotext,
                PythonLexer(),
                TerminalFormatter(),
                outfile=sys.stdout,
            )
        else:
            click.echo(algotext)

    if trading_calendar is None:
        trading_calendar = get_calendar('NYSE')

    # date parameter validation
    if trading_calendar.session_distance(start, end) < 1:
        raise _RunAlgoError(
            'There are no trading days between %s and %s' % (
                start.date(),
                end.date(),
            ), )

    if bundle is not None:
        bundle_data = bundles.load(
            bundle,
            environ,
            bundle_timestamp,
        )

        prefix, connstr = re.split(
            r'sqlite:///',
            str(bundle_data.asset_finder.engine.url),
            maxsplit=1,
        )
        if prefix:
            raise ValueError(
                "invalid url %r, must begin with 'sqlite:///'" %
                str(bundle_data.asset_finder.engine.url), )
        env = TradingEnvironment(
            asset_db_path=connstr,
            environ=environ,
            trading_calendar=trading_calendar,
            trading_day=trading_calendar.day,
            trading_days=trading_calendar.schedule[start:end].index,
        )
        first_trading_day =\
            bundle_data.equity_minute_bar_reader.first_trading_day
        data = DataPortal(
            env.asset_finder,
            trading_calendar=trading_calendar,
            first_trading_day=first_trading_day,
            equity_minute_reader=bundle_data.equity_minute_bar_reader,
            equity_daily_reader=bundle_data.equity_daily_bar_reader,
            adjustment_reader=bundle_data.adjustment_reader,
        )

        pipeline_loader = USEquityPricingLoader(
            bundle_data.equity_daily_bar_reader,
            bundle_data.adjustment_reader,
        )

        def choose_loader(column):
            if column in USEquityPricing.columns:
                return pipeline_loader
            raise ValueError("No PipelineLoader registered for column %s." %
                             column)
    else:
        env = TradingEnvironment(
            environ=environ,
            trading_calendar=trading_calendar,
            trading_day=trading_calendar.day,
            trading_days=trading_calendar.schedule[start:end].index,
        )
        choose_loader = None

    if isinstance(metrics_set, six.string_types):
        try:
            metrics_set = metrics.load(metrics_set)
        except ValueError as e:
            raise _RunAlgoError(str(e))

    if isinstance(blotter, six.string_types):
        try:
            blotter = load(Blotter, blotter)
        except ValueError as e:
            raise _RunAlgoError(str(e))

    perf = TradingAlgorithm(
        namespace=namespace,
        env=env,
        get_pipeline_loader=choose_loader,
        trading_calendar=trading_calendar,
        sim_params=create_simulation_parameters(
            start=start,
            end=end,
            capital_base=capital_base,
            data_frequency=data_frequency,
            trading_calendar=trading_calendar,
        ),
        metrics_set=metrics_set,
        blotter=blotter,
        **{
            'initialize': initialize,
            'handle_data': handle_data,
            'before_trading_start': before_trading_start,
            'analyze': analyze,
        } if algotext is None else {
            'algo_filename': getattr(algofile, 'name', '<algorithm>'),
            'script': algotext,
        }).run(
            data,
            overwrite_sim_params=False,
        )

    if output == '-':
        click.echo(str(perf))
    elif output != os.devnull:  # make the zipline magic not write any data
        perf.to_pickle(output)

    return perf
예제 #16
0
def _epd(cmd):
    shell.magic("%clear")
    print('>>>', highlight(cmd, PythonLexer(), TerminalFormatter()), '\n')
    exec(cmd)
    return cmd
예제 #17
0
def color_yo_shit(code, lexer):
    """
    Calls pygments.highlight to color yo shit
    """
    return highlight(code, lexer, TerminalFormatter())
예제 #18
0
def pretty_print_json(raw_json):
    print(
        highlight(json.dumps(raw_json, indent=4), JsonLexer(),
                  TerminalFormatter()))
예제 #19
0
    def do_install(self, url, name, show_install_notes=True):
        """Download and install a plugin."""
        data = self.get_json(url)
        if name in data:
            utils.makedirs(self.output_dir)
            url = data[name]
            LOGGER.info("Downloading '{0}'".format(url))
            try:
                zip_data = requests.get(url).content
            except requests.exceptions.SSLError:
                LOGGER.warning(
                    "SSL error, using http instead of https (press ^C to abort)"
                )
                time.sleep(1)
                url = url.replace('https', 'http', 1)
                zip_data = requests.get(url).content

            zip_file = io.BytesIO()
            zip_file.write(zip_data)
            LOGGER.info('Extracting: {0} into {1}/'.format(
                name, self.output_dir))
            utils.extract_all(zip_file, self.output_dir)
            dest_path = os.path.join(self.output_dir, name)
        else:
            try:
                plugin_path = utils.get_plugin_path(name)
            except:
                LOGGER.error("Can't find plugin " + name)
                return 1

            utils.makedirs(self.output_dir)
            dest_path = os.path.join(self.output_dir, name)
            if os.path.exists(dest_path):
                LOGGER.error("{0} is already installed".format(name))
                return 1

            LOGGER.info('Copying {0} into plugins'.format(plugin_path))
            shutil.copytree(plugin_path, dest_path)

        reqpath = os.path.join(dest_path, 'requirements.txt')
        if os.path.exists(reqpath):
            LOGGER.notice('This plugin has Python dependencies.')
            LOGGER.info('Installing dependencies with pip...')
            try:
                subprocess.check_call(
                    (sys.executable, '-m', 'pip', 'install', '-r', reqpath))
            except subprocess.CalledProcessError:
                LOGGER.error('Could not install the dependencies.')
                print('Contents of the requirements.txt file:\n')
                with io.open(reqpath, 'r', encoding='utf-8') as fh:
                    print(utils.indent(fh.read(), 4 * ' '))
                print('You have to install those yourself or through a '
                      'package manager.')
            else:
                LOGGER.info('Dependency installation succeeded.')

        reqnpypath = os.path.join(dest_path, 'requirements-nonpy.txt')
        if os.path.exists(reqnpypath):
            LOGGER.notice('This plugin has third-party '
                          'dependencies you need to install '
                          'manually.')
            print('Contents of the requirements-nonpy.txt file:\n')
            with io.open(reqnpypath, 'r', encoding='utf-8') as fh:
                for l in fh.readlines():
                    i, j = l.split('::')
                    print(utils.indent(i.strip(), 4 * ' '))
                    print(utils.indent(j.strip(), 8 * ' '))
                    print()

            print('You have to install those yourself or through a package '
                  'manager.')

        req_plug_path = os.path.join(dest_path, 'requirements-plugins.txt')
        if os.path.exists(req_plug_path):
            LOGGER.notice('This plugin requires other Nikola plugins.')
            LOGGER.info('Installing plugins...')
            try:
                with io.open(req_plug_path, 'r', encoding='utf-8') as inf:
                    for plugname in inf.readlines():
                        self.do_install(url, plugname, show_install_notes)
            except subprocess.CalledProcessError:
                LOGGER.error('Could not install a plugin.')
                print('Contents of the requirements-plugins.txt file:\n')
                with io.open(req_plug_path, 'r', encoding='utf-8') as fh:
                    print(utils.indent(fh.read(), 4 * ' '))
                print('You have to install those yourself manually.')
            else:
                LOGGER.info('Dependency installation succeeded.')

        confpypath = os.path.join(dest_path, 'conf.py.sample')
        if os.path.exists(confpypath) and show_install_notes:
            LOGGER.notice(
                'This plugin has a sample config file.  Integrate it with yours in order to make this plugin work!'
            )
            print('Contents of the conf.py.sample file:\n')
            with io.open(confpypath, 'r', encoding='utf-8') as fh:
                if self.site.colorful:
                    print(
                        utils.indent(
                            pygments.highlight(fh.read(), PythonLexer(),
                                               TerminalFormatter()), 4 * ' '))
                else:
                    print(utils.indent(fh.read(), 4 * ' '))
        return 0
예제 #20
0
    def query(self,
              query,
              data=None,
              fmt='PrettyCompact',
              stream=False,
              verbose=False,
              query_id=None,
              compress=False,
              **kwargs):
        if query.lstrip()[:6].upper().startswith('INSERT'):
            query_split = query.split()
        else:
            query = sqlparse.format(query, strip_comments=True).rstrip(';')

            if verbose and self.cli_settings.get('show_formatted_query'):
                # Highlight & reformat the SQL query
                formatted_query = sqlparse.format(
                    query,
                    reindent_aligned=True,
                    indent_width=2,
                    # keyword_case='upper'  # works poorly in a few cases
                )

                formatter = TerminalFormatter()

                if self.cli_settings.get(
                        'highlight') and self.cli_settings.get(
                            'highlight_truecolor'):
                    formatter = TerminalTrueColorFormatter(
                        style=CHPygmentsStyle)

                print(
                    '\n' +
                    pygments.highlight(formatted_query, CHLexer(), formatter))

            # TODO: use sqlparse's parser instead
            query_split = query.split()

            if not query_split:
                return Response(query, fmt)

            # Since sessions aren't supported over HTTP, we have to make some quirks:
            # USE database;
            if query_split[0].upper() == 'USE' and len(query_split) == 2:
                old_database = self.database
                self.database = query_split[1]
                try:
                    self.test_query()
                except DBException as e:
                    self.database = old_database
                    raise e

                return Response(
                    query,
                    fmt,
                    message='Changed the current database to {0}.'.format(
                        self.database))

            # Set response format
            if query_split[0].upper(
            ) in FORMATTABLE_QUERIES and len(query_split) >= 2:
                if query_split[-2].upper() == 'FORMAT':
                    fmt = query_split[-1]
                elif query_split[-2].upper() != 'FORMAT':
                    if query_split[0].upper() != 'INSERT' or data is not None:

                        if query[-2:] in (r'\g', r'\G'):
                            query = query[:-2] + ' FORMAT Vertical'
                        else:
                            query = query + ' FORMAT {fmt}'.format(fmt=fmt)

        params = {
            'database': self.database,
            'stacktrace': int(self.stacktrace)
        }
        if query_id:
            params['query_id'] = query_id

        has_outfile = False
        if query_split[0].upper() == 'SELECT':
            # Detect INTO OUTFILE at the end of the query
            t_query = [
                t.value.upper() if t.ttype == Keyword else t.value
                for t in sqlparse.parse(query)[0]
                if t.ttype not in (Whitespace, Newline)
            ]

            try:
                last_tokens = t_query[-5:]
                into_pos = last_tokens.index('INTO')
                has_outfile = into_pos >= 0 and last_tokens.index(
                    'OUTFILE') == into_pos + 1

                if has_outfile:
                    path = last_tokens[into_pos + 2].strip("'")
                    # Remove `INTO OUTFILE '/path/to/file.out'`
                    last_tokens.pop(into_pos)
                    last_tokens.pop(into_pos)
                    last_tokens.pop(into_pos)
                    query = ' '.join(t_query[:-5] + last_tokens)
            except ValueError:
                has_outfile = False

        method = 'POST'
        response = self._query(method,
                               query,
                               params,
                               fmt=fmt,
                               stream=stream,
                               data=data,
                               compress=compress,
                               **kwargs)

        if has_outfile:
            try:
                with open(path, 'wb') as f:
                    if not f:
                        return response

                    if stream:
                        for line in response.iter_lines():
                            f.write(line)
                    else:
                        f.write(response.data.encode())
            except Exception as e:
                echo.warning(
                    "Caught an exception when writing to file: {0}".format(e))

        return response
예제 #21
0
#!/usr/bin/env python
# vim: set fileencoding=utf-8

from sys import argv, exit

from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter

if len(argv) <= 1:
    print("Usage: pygments11_highlight_source.py FILENAME")
    exit(1)

with open(argv[1], "r") as fin:
    code = fin.read()
    print(highlight(code, PythonLexer(), TerminalFormatter()))
예제 #22
0
from crate.client.exceptions import ConnectionError, ProgrammingError

from pygments import highlight
from pygments.formatters import TerminalFormatter
from pygments.lexers.data import JsonLexer
from pygments.lexers.sql import SqlLexer
from pygments.token import Token
from pygments.styles.monokai import MonokaiStyle

from .tabulate import TableFormat, Line as TabulateLine, DataRow, tabulate
from .printer import ColorPrinter, PrintWrapper

from appdirs import user_data_dir

_json_lexer = JsonLexer()
_formatter = TerminalFormatter()

NULL = u'NULL'
TRUE = u'TRUE'
FALSE = u'FALSE'

USER_DATA_DIR = user_data_dir("Crate", "Crate")
HISTORY_FILE_NAME = 'crash_history'
HISTORY_PATH = os.path.join(USER_DATA_DIR, HISTORY_FILE_NAME)
MAX_HISTORY_LENGTH = 10000

crate_fmt = TableFormat(lineabove=TabulateLine("+", "-", "+", "+"),
                        linebelowheader=TabulateLine("+", "-", "+", "+"),
                        linebetweenrows=None,
                        linebelow=TabulateLine("+", "-", "+", "+"),
                        headerrow=DataRow("|", "|", "|"),
예제 #23
0
def diff_color(input_file, cfg):
    """
    colorizes a diff file
    """
    cov = cfg.data

    with paged_echo() as pager:
        term_width = click.get_terminal_size()[0]
        modified = []
        measured = cov.get_data().measured_files()
        diff = PatchSet(input_file)
        for thing in diff:
            if thing.is_modified_file or thing.is_added_file:
                target = thing.target_file
                if target.startswith('b/') or target.startswith('a/'):
                    target = target[2:]
                if abspath(target) in measured:
                    covdata = cov._analyze(abspath(target))
                    modified.append((thing, covdata))
#                    pager.echo(abspath(target))
                else:
                    msg = "skip: {}".format(target)
                    msg = msg + (' ' * (term_width - len(msg)))
                    pager.echo(colors.color(msg, bg='yellow', fg='black'))

        total_added_lines = 0
        total_covered_lines = 0

        for (patch, covdata) in modified:
            fname = str(patch.target_file) + (' ' * (term_width - len(patch.target_file)))
            pager.echo(colors.color(fname, bg='cyan', fg='black'))
            for hunk in patch:
                for line in hunk:
                    kw = dict()
                    if line.is_added:
                        total_added_lines += 1
                        if line.target_line_no in covdata.missing:
                            pager.echo(colors.color(u'\u258c', fg='red', bg=52), nl=False, color=True)
                            kw['bg'] = 52
                        else:
                            total_covered_lines += 1
                            pager.echo(colors.color(u'\u258f', fg='green'), nl=False, color=True)
                    else:
                        pager.echo(' ', nl=False)
                    out = u"{}".format(line).strip()
                    if line.is_added:
                        kw['fg'] = 'green'
                    elif line.is_removed:
                        kw['fg'] = 'red'
                    pager.echo(colors.color(out, **kw))

        if total_added_lines == 0:
            raise click.ClickException("No covered lines at all")

        percent_covered = (total_covered_lines / float(total_added_lines))
        msg = u"{} covered of {} added lines".format(total_covered_lines, total_added_lines)
        print_banner(msg, percent_covered, pager=pager)
        return

        target_fname = abspath(target_fname)
        for fname in cov.get_data().measured_files():
            if target_fname == abspath(fname):
                match.append(fname)

        if len(match) != 1:
            if len(match) == 0:
                # this file wasn't in the coverage data, so we just dump
                # it to stdout as-is. (FIXME: ideally, also
                # syntax-highlighted anyway)
                with open(target_fname, 'r') as f:
                    for line in f.readlines():
                        sys.stdout.write(line)
                return
            else:
                raise RuntimeError("Multiple matches: %s" % ', '.join(match))

        fname = match[0]
        covdata = cov._analyze(fname)

        percent = 1.0  # if no statements, it's all covered, right?
        if covdata.numbers.n_statements:
            percent = float(covdata.numbers.n_statements - covdata.numbers.n_missing) / covdata.numbers.n_statements
        total_statements = covdata.numbers.n_statements
        total_missing = covdata.numbers.n_missing

        fill = min(click.get_terminal_size()[0], 80)
        print_banner(fname, percent, fill)

        # it was tempting to write/override/wrap this Formatter and mess
        # with the background color based on our coverage data -- and
        # that's not a terrible idea, but the way TerminalFormatter is
        # written, it's not very nice. Basically, we'd have to wrap the
        # output stream, look for ANSI reset codes, and re-do the
        # background color after each reset (for "uncovered" lines)...  so
        # I didn't do that. Instead we just hack it by splitting on the
        # reset codes (see below)

        formatter = TerminalFormatter(style=style)
        lines = highlight(
            open(fname).read(), get_lexer_by_name('python'),
            formatter=formatter,
        )
        lines = lines.split(u'\n')

        for (i, line) in enumerate(lines):
    #        assert type(line) is unicode
            spaces = fill - len(colors.strip_color(line))
            spaces = u' ' * spaces
            if (i + 1) not in covdata.missing:
                if (i + 1) in covdata.excluded:
                    line = colors.strip_color(line)
                    pager.echo(colors.color(u'\u258f', fg=46, bg=236) + colors.color(line + spaces, bg=236, fg=242), color=True)
                elif cfg.branch and (i + 1) in covdata.branch_lines():
                    line = colors.strip_color(line)
                    pager.echo(colors.color(u'\u258f', bg=52, fg=160) + colors.color(line + spaces, bg=52), color=True)
                else:
                    pager.echo(u'{}{}{}'.format(colors.color(u'\u258f', fg=46), line, spaces), color=True)
            else:
                # HACK-O-MATIC, uhm. Yeah, so what we're doing here is
                # splitting the output from the formatter on the ANSI
                # "reset" code, and the re-assembling it with the "52"
                # (dark red) background color. I appoligize in advance;
                # PRs with improvements encouraged!
                reset_code = u"\x1b[39;49;00m"
                segments = (line + spaces).split(reset_code)
                reset_plus_bg = u"\x1b[39;49;00m\x1b[39;49;48;5;52m"
                out = u"\x1b[39;49;48;5;52m" + reset_plus_bg.join(segments)
                pager.echo(colors.color(u'\u258f', bg=52, fg=160) + out, color=True)
예제 #24
0
 def print_debug(self):
     json_repr = json.dumps(self._data, indent=4)
     print(highlight(json_repr, JsonLexer(), TerminalFormatter()))
예제 #25
0
def _run(handle_data, initialize, before_trading_start, analyze, algofile,
         algotext, defines, data_frequency, capital_base, bundle,
         bundle_timestamp, start, end, output, trading_calendar, print_algo,
         metrics_set, local_namespace, environ, blotter, benchmark_symbol,
         broker, state_filename):
    """Run a backtest for the given algorithm.

    This is shared between the cli and :func:`zipline.run_algo`.

    additions useful for live trading:
    broker - wrapper to connect to a real broker
    state_filename - saving the context of the algo to be able to restart
    """
    log.info("Using bundle '%s'." % bundle)

    if trading_calendar is None:
        trading_calendar = get_calendar('XNYS')

    bundle_data = load_sharadar_bundle(bundle)
    now = pd.Timestamp.utcnow()
    if start is None:
        start = bundle_data.equity_daily_bar_reader.first_trading_day if not broker else now

    if not trading_calendar.is_session(start.date()):
        start = trading_calendar.next_open(start)

    if end is None:
        end = bundle_data.equity_daily_bar_reader.last_available_dt if not broker else start

    # date parameter validation
    if trading_calendar.session_distance(start, end) < 0:
        raise _RunAlgoError(
            'There are no trading days between %s and %s' % (
                start.date(),
                end.date(),
            ), )

    if broker:
        log.info("Live Trading on %s." % start.date())
    else:
        log.info("Backtest from %s to %s." % (start.date(), end.date()))

    if benchmark_symbol:
        benchmark = symbol(benchmark_symbol)
        benchmark_sid = benchmark.sid
        benchmark_returns = load_benchmark_data_bundle(
            bundle_data.equity_daily_bar_reader, benchmark)
    else:
        benchmark_sid = None
        benchmark_returns = pd.Series(index=pd.date_range(start, end,
                                                          tz='utc'),
                                      data=0.0)

    # emission_rate is a string representing the smallest frequency at which metrics should be reported.
    # emission_rate will be either minute or daily. When emission_rate is daily, end_of_bar will not be called at all.
    emission_rate = 'daily'

    if algotext is not None:
        if local_namespace:
            # noinspection PyUnresolvedReferences
            ip = get_ipython()  # noqa
            namespace = ip.user_ns
        else:
            namespace = {}

        for assign in defines:
            try:
                name, value = assign.split('=', 2)
            except ValueError:
                raise ValueError(
                    'invalid define %r, should be of the form name=value' %
                    assign, )
            try:
                # evaluate in the same namespace so names may refer to
                # eachother
                namespace[name] = eval(value, namespace)
            except Exception as e:
                raise ValueError(
                    'failed to execute definition for name %r: %s' %
                    (name, e), )
    elif defines:
        raise _RunAlgoError(
            'cannot pass define without `algotext`',
            "cannot pass '-D' / '--define' without '-t' / '--algotext'",
        )
    else:
        namespace = {}
        if algofile is not None:
            algotext = algofile.read()

    if print_algo:
        if PYGMENTS:
            highlight(
                algotext,
                PythonLexer(),
                TerminalFormatter(),
                outfile=sys.stdout,
            )
        else:
            click.echo(algotext)

    first_trading_day = \
        bundle_data.equity_daily_bar_reader.first_trading_day

    if isinstance(metrics_set, six.string_types):
        try:
            metrics_set = metrics.load(metrics_set)
        except ValueError as e:
            raise _RunAlgoError(str(e))

    if isinstance(blotter, six.string_types):
        try:
            blotter = load(Blotter, blotter)
        except ValueError as e:
            raise _RunAlgoError(str(e))

    # Special defaults for live trading
    if broker:
        data_frequency = 'minute'

        # No benchmark
        benchmark_sid = None
        benchmark_returns = pd.Series(index=pd.date_range(start, end,
                                                          tz='utc'),
                                      data=0.0)

        broker.daily_bar_reader = bundle_data.equity_daily_bar_reader

        if start.date() < now.date():
            backtest_start = start
            backtest_end = bundle_data.equity_daily_bar_reader.last_available_dt

            if not os.path.exists(state_filename):
                log.info("Backtest from %s to %s." %
                         (backtest_start.date(), backtest_end.date()))
                backtest_data = DataPortal(
                    bundle_data.asset_finder,
                    trading_calendar=trading_calendar,
                    first_trading_day=first_trading_day,
                    equity_minute_reader=bundle_data.equity_minute_bar_reader,
                    equity_daily_reader=bundle_data.equity_daily_bar_reader,
                    adjustment_reader=bundle_data.adjustment_reader,
                )
                backtest = create_algo_class(
                    TradingAlgorithm, backtest_start, backtest_end, algofile,
                    algotext, analyze, before_trading_start, benchmark_returns,
                    benchmark_sid, blotter, bundle_data, capital_base,
                    backtest_data, 'daily', emission_rate, handle_data,
                    initialize, metrics_set, namespace, trading_calendar)

                ctx_blacklist = ['trading_client']
                ctx_whitelist = ['perf_tracker']
                ctx_excludes = ctx_blacklist + [
                    e
                    for e in backtest.__dict__.keys() if e not in ctx_whitelist
                ]
                backtest.run()
                #TODO better logic for the checksumq
                checksum = getattr(algofile, 'name', '<algorithm>')
                store_context(state_filename,
                              context=backtest,
                              checksum=checksum,
                              exclude_list=ctx_excludes)
            else:
                log.warn("State file already exists. Do not run the backtest.")

            # Set start and end to now for live trading
            start = pd.Timestamp.utcnow()
            if not trading_calendar.is_session(start.date()):
                start = trading_calendar.next_open(start)
            end = start

    # TODO inizia qui per creare un prerun dell'algo prima del live trading
    # usare store_context prima di passare da TradingAlgorithm a LiveTradingAlgorithm
    TradingAlgorithmClass = (partial(
        LiveTradingAlgorithm, broker=broker, state_filename=state_filename)
                             if broker else TradingAlgorithm)

    DataPortalClass = (partial(DataPortalLive, broker)
                       if broker else DataPortal)
    data = DataPortalClass(
        bundle_data.asset_finder,
        trading_calendar=trading_calendar,
        first_trading_day=first_trading_day,
        equity_minute_reader=bundle_data.equity_minute_bar_reader,
        equity_daily_reader=bundle_data.equity_daily_bar_reader,
        adjustment_reader=bundle_data.adjustment_reader,
    )
    algo = create_algo_class(TradingAlgorithmClass, start, end, algofile,
                             algotext, analyze, before_trading_start,
                             benchmark_returns, benchmark_sid, blotter,
                             bundle_data, capital_base, data, data_frequency,
                             emission_rate, handle_data, initialize,
                             metrics_set, namespace, trading_calendar)

    perf = algo.run()

    if output == '-':
        click.echo(str(perf))
    elif output != os.devnull:  # make the zipline magic not write any data
        perf.to_pickle(output)

    return perf
예제 #26
0
            if not val: continue
            if isinstance(val, bool):
                args_strings.append('--' + arg)
            else:
                args_strings.append('--' + arg + ' \'' + str(val) + '\'')

        for package in args.usepackage:
            if package in {'amsmath', 'amssymb'}: continue
            args_strings.append('--usepackage \'' + str(package) + '\'')

        environment['args'] = ' '.join(args_strings)

        print('')
        script = post_commit_template % environment
        try:
            from pygments import highlight
            from pygments.lexers import BashLexer
            from pygments.formatters import TerminalFormatter
            print(highlight(script, BashLexer(), TerminalFormatter()))
        except NameError:
            print(script)

        response = input("Would you like to write this to %s? [y/N] " %
                         '.git/hooks/post-commit')
        if response.lower() != 'y':
            exit(1)

        if args.add_git_hook:
            with open('.git/hooks/post-commit', 'w') as f:
                f.write(script)
예제 #27
0
파일: run_algo.py 프로젝트: zariza/catalyst
def _run(handle_data, initialize, before_trading_start, analyze, algofile,
         algotext, defines, data_frequency, capital_base, data, bundle,
         bundle_timestamp, start, end, output, print_algo, local_namespace,
         environ, live, exchange, algo_namespace, base_currency, live_graph,
         analyze_live, simulate_orders, auth_aliases, stats_output):
    """Run a backtest for the given algorithm.

    This is shared between the cli and :func:`catalyst.run_algo`.
    """
    # TODO: refactor for more granularity
    if algotext is not None:
        if local_namespace:
            ip = get_ipython()  # noqa
            namespace = ip.user_ns
        else:
            namespace = {}

        for assign in defines:
            try:
                name, value = assign.split('=', 2)
            except ValueError:
                raise ValueError(
                    'invalid define %r, should be of the form name=value' %
                    assign, )
            try:
                # evaluate in the same namespace so names may refer to
                # eachother
                namespace[name] = eval(value, namespace)
            except Exception as e:
                raise ValueError(
                    'failed to execute definition for name %r: %s' %
                    (name, e), )
    elif defines:
        raise _RunAlgoError(
            'cannot pass define without `algotext`',
            "cannot pass '-D' / '--define' without '-t' / '--algotext'",
        )
    else:
        namespace = {}
        if algofile is not None:
            algotext = algofile.read()

    if print_algo:
        if PYGMENTS:
            highlight(
                algotext,
                PythonLexer(),
                TerminalFormatter(),
                outfile=sys.stdout,
            )
        else:
            click.echo(algotext)

    log.warn('Catalyst is currently in ALPHA. It is going through rapid '
             'development and it is subject to errors. Please use carefully. '
             'We encourage you to report any issue on GitHub: '
             'https://github.com/enigmampc/catalyst/issues')
    log.info('Catalyst version {}'.format(catalyst.__version__))
    sleep(3)

    if live:
        if simulate_orders:
            mode = 'paper-trading'
        else:
            mode = 'live-trading'
    else:
        mode = 'backtest'

    log.info('running algo in {mode} mode'.format(mode=mode))

    exchange_name = exchange
    if exchange_name is None:
        raise ValueError('Please specify at least one exchange.')

    if isinstance(auth_aliases, string_types):
        aliases = auth_aliases.split(',')
        if len(aliases) < 2 or len(aliases) % 2 != 0:
            raise ValueError(
                'the `auth_aliases` parameter must contain an even list '
                'of comma-delimited values. For example, '
                '"binance,auth2" or "binance,auth2,bittrex,auth2".')

        auth_aliases = dict(zip(aliases[::2], aliases[1::2]))

    exchange_list = [x.strip().lower() for x in exchange.split(',')]
    exchanges = dict()
    for name in exchange_list:
        if auth_aliases is not None and name in auth_aliases:
            auth_alias = auth_aliases[name]
        else:
            auth_alias = None

        exchanges[name] = get_exchange(
            exchange_name=name,
            base_currency=base_currency,
            must_authenticate=(live and not simulate_orders),
            skip_init=True,
            auth_alias=auth_alias,
        )

    open_calendar = get_calendar('OPEN')

    env = TradingEnvironment(
        load=partial(load_crypto_market_data,
                     environ=environ,
                     start_dt=start,
                     end_dt=end),
        environ=environ,
        exchange_tz='UTC',
        asset_db_path=None  # We don't need an asset db, we have exchanges
    )
    env.asset_finder = ExchangeAssetFinder(exchanges=exchanges)

    def choose_loader(column):
        bound_cols = TradingPairPricing.columns
        if column in bound_cols:
            return ExchangePricingLoader(data_frequency)
        raise ValueError("No PipelineLoader registered for column %s." %
                         column)

    if live:
        start = pd.Timestamp.utcnow()

        # TODO: fix the end data.
        if end is None:
            end = start + timedelta(hours=8760)

        data = DataPortalExchangeLive(exchanges=exchanges,
                                      asset_finder=env.asset_finder,
                                      trading_calendar=open_calendar,
                                      first_trading_day=pd.to_datetime(
                                          'today', utc=True))

        sim_params = create_simulation_parameters(start=start,
                                                  end=end,
                                                  capital_base=capital_base,
                                                  emission_rate='minute',
                                                  data_frequency='minute')

        # TODO: use the constructor instead
        sim_params._arena = 'live'

        algorithm_class = partial(
            ExchangeTradingAlgorithmLive,
            exchanges=exchanges,
            algo_namespace=algo_namespace,
            live_graph=live_graph,
            simulate_orders=simulate_orders,
            stats_output=stats_output,
            analyze_live=analyze_live,
            end=end,
        )
    elif exchanges:
        # Removed the existing Poloniex fork to keep things simple
        # We can add back the complexity if required.

        # I don't think that we should have arbitrary price data bundles
        # Instead, we should center this data around exchanges.
        # We still need to support bundles for other misc data, but we
        # can handle this later.

        if start != pd.tslib.normalize_date(start) or \
                        end != pd.tslib.normalize_date(end):
            # todo: add to Sim_Params the option to start & end at specific times
            log.warn(
                "Catalyst currently starts and ends on the start and "
                "end of the dates specified, respectively. We hope to "
                "Modify this and support specific times in a future release.")

        data = DataPortalExchangeBacktest(
            exchange_names=[exchange_name for exchange_name in exchanges],
            asset_finder=None,
            trading_calendar=open_calendar,
            first_trading_day=start,
            last_available_session=end)

        sim_params = create_simulation_parameters(
            start=start,
            end=end,
            capital_base=capital_base,
            data_frequency=data_frequency,
            emission_rate=data_frequency,
        )

        algorithm_class = partial(ExchangeTradingAlgorithmBacktest,
                                  exchanges=exchanges)

    elif bundle is not None:
        bundle_data = load(
            bundle,
            environ,
            bundle_timestamp,
        )

        prefix, connstr = re.split(
            r'sqlite:///',
            str(bundle_data.asset_finder.engine.url),
            maxsplit=1,
        )
        if prefix:
            raise ValueError(
                "invalid url %r, must begin with 'sqlite:///'" %
                str(bundle_data.asset_finder.engine.url), )

        env = TradingEnvironment(asset_db_path=connstr, environ=environ)
        first_trading_day = \
            bundle_data.equity_minute_bar_reader.first_trading_day

        data = DataPortal(
            env.asset_finder,
            open_calendar,
            first_trading_day=first_trading_day,
            equity_minute_reader=bundle_data.equity_minute_bar_reader,
            equity_daily_reader=bundle_data.equity_daily_bar_reader,
            adjustment_reader=bundle_data.adjustment_reader,
        )

    perf = algorithm_class(
        namespace=namespace,
        env=env,
        get_pipeline_loader=choose_loader,
        sim_params=sim_params,
        **{
            'initialize': initialize,
            'handle_data': handle_data,
            'before_trading_start': before_trading_start,
            'analyze': analyze,
        } if algotext is None else {
            'algo_filename': getattr(algofile, 'name', '<algorithm>'),
            'script': algotext,
        }).run(
            data,
            overwrite_sim_params=False,
        )

    if output == '-':
        click.echo(str(perf))
    elif output != os.devnull:  # make the catalyst magic not write any data
        perf.to_pickle(output)

    return perf
예제 #28
0
    if outfn:
        if not fmter:
            try:
                fmter = get_formatter_for_filename(outfn, **parsed_opts)
            except (OptionError, ClassNotFound), err:
                print >> sys.stderr, 'Error:', err
                return 1
        try:
            outfile = open(outfn, 'wb')
        except Exception, err:
            print >> sys.stderr, 'Error: cannot open outfile:', err
            return 1
    else:
        if not fmter:
            fmter = TerminalFormatter(**parsed_opts)
        outfile = sys.stdout

    # select lexer
    lexer = opts.pop('-l', None)
    if lexer:
        try:
            lexer = get_lexer_by_name(lexer, **parsed_opts)
        except (OptionError, ClassNotFound), err:
            print >> sys.stderr, 'Error:', err
            return 1

    if args:
        if len(args) > 1:
            print >> sys.stderr, usage
            return 2
예제 #29
0
def main(args=sys.argv):
    """
    Main command line entry point.
    """
    # pylint: disable-msg=R0911,R0912,R0915

    usage = USAGE % ((args[0], ) * 6)

    try:
        popts, args = getopt.getopt(args[1:], "l:f:F:o:O:P:LS:a:N:hVHgs")
    except getopt.GetoptError:
        print(usage, file=sys.stderr)
        return 2
    opts = {}
    O_opts = []
    P_opts = []
    F_opts = []
    for opt, arg in popts:
        if opt == '-O':
            O_opts.append(arg)
        elif opt == '-P':
            P_opts.append(arg)
        elif opt == '-F':
            F_opts.append(arg)
        opts[opt] = arg

    if opts.pop('-h', None) is not None:
        print(usage)
        return 0

    if opts.pop('-V', None) is not None:
        print('Pygments version %s, (c) 2006-2014 by Georg Brandl.' %
              __version__)
        return 0

    # handle ``pygmentize -L``
    L_opt = opts.pop('-L', None)
    if L_opt is not None:
        if opts:
            print(usage, file=sys.stderr)
            return 2

        # print version
        main(['', '-V'])
        if not args:
            args = ['lexer', 'formatter', 'filter', 'style']
        for arg in args:
            _print_list(arg.rstrip('s'))
        return 0

    # handle ``pygmentize -H``
    H_opt = opts.pop('-H', None)
    if H_opt is not None:
        if opts or len(args) != 2:
            print(usage, file=sys.stderr)
            return 2

        what, name = args
        if what not in ('lexer', 'formatter', 'filter'):
            print(usage, file=sys.stderr)
            return 2

        _print_help(what, name)
        return 0

    # parse -O options
    parsed_opts = _parse_options(O_opts)
    opts.pop('-O', None)

    # parse -P options
    for p_opt in P_opts:
        try:
            name, value = p_opt.split('=', 1)
        except ValueError:
            parsed_opts[p_opt] = True
        else:
            parsed_opts[name] = value
    opts.pop('-P', None)

    # encodings
    inencoding = parsed_opts.get('inencoding', parsed_opts.get('encoding'))
    outencoding = parsed_opts.get('outencoding', parsed_opts.get('encoding'))

    # handle ``pygmentize -N``
    infn = opts.pop('-N', None)
    if infn is not None:
        try:
            lexer = get_lexer_for_filename(infn, **parsed_opts)
        except ClassNotFound as err:
            lexer = TextLexer()
        except OptionError as err:
            print('Error:', err, file=sys.stderr)
            return 1

        print(lexer.aliases[0])
        return 0

    # handle ``pygmentize -S``
    S_opt = opts.pop('-S', None)
    a_opt = opts.pop('-a', None)
    if S_opt is not None:
        f_opt = opts.pop('-f', None)
        if not f_opt:
            print(usage, file=sys.stderr)
            return 2
        if opts or args:
            print(usage, file=sys.stderr)
            return 2

        try:
            parsed_opts['style'] = S_opt
            fmter = get_formatter_by_name(f_opt, **parsed_opts)
        except ClassNotFound as err:
            print(err, file=sys.stderr)
            return 1

        arg = a_opt or ''
        try:
            print(fmter.get_style_defs(arg))
        except Exception as err:
            print('Error:', err, file=sys.stderr)
            return 1
        return 0

    # if no -S is given, -a is not allowed
    if a_opt is not None:
        print(usage, file=sys.stderr)
        return 2

    # parse -F options
    F_opts = _parse_filters(F_opts)
    opts.pop('-F', None)

    # select formatter
    outfn = opts.pop('-o', None)
    fmter = opts.pop('-f', None)
    if fmter:
        try:
            fmter = get_formatter_by_name(fmter, **parsed_opts)
        except (OptionError, ClassNotFound) as err:
            print('Error:', err, file=sys.stderr)
            return 1

    if outfn:
        if not fmter:
            try:
                fmter = get_formatter_for_filename(outfn, **parsed_opts)
            except (OptionError, ClassNotFound) as err:
                print('Error:', err, file=sys.stderr)
                return 1
        try:
            outfile = open(outfn, 'wb')
        except Exception as err:
            print('Error: cannot open outfile:', err, file=sys.stderr)
            return 1
    else:
        if not fmter:
            fmter = TerminalFormatter(**parsed_opts)
        if sys.version_info > (3, ):
            # Python 3: we have to use .buffer to get a binary stream
            outfile = sys.stdout.buffer
        else:
            outfile = sys.stdout

    # determine output encoding if not explicitly selected
    if not outencoding:
        if outfn:
            # output file? -> encoding pass-through
            fmter.encoding = inencoding
        else:
            # else use terminal encoding
            fmter.encoding = terminal_encoding(sys.stdout)

    # provide coloring under Windows, if possible
    if not outfn and sys.platform in ('win32', 'cygwin') and \
       fmter.name in ('Terminal', 'Terminal256'):
        # unfortunately colorama doesn't support binary streams on Py3
        if sys.version_info > (3, ):
            import io
            outfile = io.TextIOWrapper(outfile, encoding=fmter.encoding)
            fmter.encoding = None
        try:
            import colorama.initialise
        except ImportError:
            pass
        else:
            outfile = colorama.initialise.wrap_stream(outfile,
                                                      convert=None,
                                                      strip=None,
                                                      autoreset=False,
                                                      wrap=True)

    # select lexer
    lexer = opts.pop('-l', None)
    if lexer:
        try:
            lexer = get_lexer_by_name(lexer, **parsed_opts)
        except (OptionError, ClassNotFound) as err:
            print('Error:', err, file=sys.stderr)
            return 1

    # read input code
    if args:
        if len(args) > 1:
            print(usage, file=sys.stderr)
            return 2

        if '-s' in opts:
            print('Error: -s option not usable when input file specified',
                  file=sys.stderr)
            return 1

        infn = args[0]
        try:
            with open(infn, 'rb') as infp:
                code = infp.read()
        except Exception as err:
            print('Error: cannot read infile:', err, file=sys.stderr)
            return 1
        if not inencoding:
            code, inencoding = guess_decode(code)

        # do we have to guess the lexer?
        if not lexer:
            try:
                lexer = get_lexer_for_filename(infn, code, **parsed_opts)
            except ClassNotFound as err:
                if '-g' in opts:
                    try:
                        lexer = guess_lexer(code, **parsed_opts)
                    except ClassNotFound:
                        lexer = TextLexer(**parsed_opts)
                else:
                    print('Error:', err, file=sys.stderr)
                    return 1
            except OptionError as err:
                print('Error:', err, file=sys.stderr)
                return 1

    elif '-s' not in opts:  # treat stdin as full file (-s support is later)
        # read code from terminal, always in binary mode since we want to
        # decode ourselves and be tolerant with it
        if sys.version_info > (3, ):
            # Python 3: we have to use .buffer to get a binary stream
            code = sys.stdin.buffer.read()
        else:
            code = sys.stdin.read()
        if not inencoding:
            code, inencoding = guess_decode_from_terminal(code, sys.stdin)
            # else the lexer will do the decoding
        if not lexer:
            try:
                lexer = guess_lexer(code, **parsed_opts)
            except ClassNotFound:
                lexer = TextLexer(**parsed_opts)

    # When using the LaTeX formatter and the option `escapeinside` is
    # specified, we need a special lexer which collects escaped text
    # before running the chosen language lexer.
    escapeinside = parsed_opts.get('escapeinside', '')
    if len(escapeinside) == 2 and isinstance(fmter, LatexFormatter):
        left = escapeinside[0]
        right = escapeinside[1]
        lexer = LatexEmbeddedLexer(left, right, lexer)

    # ... and do it!
    try:
        # process filters
        for fname, fopts in F_opts:
            lexer.add_filter(fname, **fopts)

        if '-s' not in opts:
            # process whole input as per normal...
            highlight(code, lexer, fmter, outfile)
        else:
            if not lexer:
                print(
                    'Error: when using -s a lexer has to be selected with -l',
                    file=sys.stderr)
                return 1
            # line by line processing of stdin (eg: for 'tail -f')...
            try:
                while 1:
                    if sys.version_info > (3, ):
                        # Python 3: we have to use .buffer to get a binary stream
                        line = sys.stdin.buffer.readline()
                    else:
                        line = sys.stdin.readline()
                    if not line:
                        break
                    if not inencoding:
                        line = guess_decode_from_terminal(line, sys.stdin)[0]
                    highlight(line, lexer, fmter, outfile)
                    if hasattr(outfile, 'flush'):
                        outfile.flush()
            except KeyboardInterrupt:
                return 0

    except Exception:
        import traceback
        info = traceback.format_exception(*sys.exc_info())
        msg = info[-1].strip()
        if len(info) >= 3:
            # extract relevant file and position info
            msg += '\n   (f%s)' % info[-2].split('\n')[0].strip()[1:]
        print(file=sys.stderr)
        print('*** Error while highlighting:', file=sys.stderr)
        print(msg, file=sys.stderr)
        return 1

    return 0
예제 #30
0
def _run(handle_data, initialize, before_trading_start, analyze, algofile,
         algotext, defines, data_frequency, capital_base, bundle,
         bundle_timestamp, start, end, output, trading_calendar, print_algo,
         metrics_set, local_namespace, environ, blotter, benchmark_spec):
    """Run a backtest for the given algorithm.

    This is shared between the cli and :func:`zipline.run_algo`.
    """

    bundle_data = bundles.load(
        bundle,
        environ,
        bundle_timestamp,
    )

    if trading_calendar is None:
        trading_calendar = get_calendar('XNYS')

    # date parameter validation
    if trading_calendar.session_distance(start, end) < 1:
        raise _RunAlgoError(
            'There are no trading days between %s and %s' % (
                start.date(),
                end.date(),
            ), )

    benchmark_sid, benchmark_returns = benchmark_spec.resolve(
        asset_finder=bundle_data.asset_finder,
        start_date=start,
        end_date=end,
    )

    if algotext is not None:
        if local_namespace:
            ip = get_ipython()  # noqa
            namespace = ip.user_ns
        else:
            namespace = {}

        for assign in defines:
            try:
                name, value = assign.split('=', 2)
            except ValueError:
                raise ValueError(
                    'invalid define %r, should be of the form name=value' %
                    assign, )
            try:
                # evaluate in the same namespace so names may refer to
                # eachother
                namespace[name] = eval(value, namespace)
            except Exception as e:
                raise ValueError(
                    'failed to execute definition for name %r: %s' %
                    (name, e), )
    elif defines:
        raise _RunAlgoError(
            'cannot pass define without `algotext`',
            "cannot pass '-D' / '--define' without '-t' / '--algotext'",
        )
    else:
        namespace = {}
        if algofile is not None:
            algotext = algofile.read()

    if print_algo:
        if PYGMENTS:
            highlight(
                algotext,
                PythonLexer(),
                TerminalFormatter(),
                outfile=sys.stdout,
            )
        else:
            click.echo(algotext)

    first_trading_day = \
        bundle_data.equity_minute_bar_reader.first_trading_day

    data = DataPortal(
        bundle_data.asset_finder,
        trading_calendar=trading_calendar,
        first_trading_day=first_trading_day,
        equity_minute_reader=bundle_data.equity_minute_bar_reader,
        equity_daily_reader=bundle_data.equity_daily_bar_reader,
        adjustment_reader=bundle_data.adjustment_reader,
    )

    pipeline_loader = USEquityPricingLoader.without_fx(
        bundle_data.equity_daily_bar_reader,
        bundle_data.adjustment_reader,
    )

    def choose_loader(column):
        if column in USEquityPricing.columns:
            return pipeline_loader
        raise ValueError("No PipelineLoader registered for column %s." %
                         column)

    if isinstance(metrics_set, six.string_types):
        try:
            metrics_set = metrics.load(metrics_set)
        except ValueError as e:
            raise _RunAlgoError(str(e))

    if isinstance(blotter, six.string_types):
        try:
            blotter = load(Blotter, blotter)
        except ValueError as e:
            raise _RunAlgoError(str(e))

    try:
        perf = TradingAlgorithm(
            namespace=namespace,
            data_portal=data,
            get_pipeline_loader=choose_loader,
            trading_calendar=trading_calendar,
            sim_params=SimulationParameters(
                start_session=start,
                end_session=end,
                trading_calendar=trading_calendar,
                capital_base=capital_base,
                data_frequency=data_frequency,
            ),
            metrics_set=metrics_set,
            blotter=blotter,
            benchmark_returns=benchmark_returns,
            benchmark_sid=benchmark_sid,
            **{
                'initialize': initialize,
                'handle_data': handle_data,
                'before_trading_start': before_trading_start,
                'analyze': analyze,
            } if algotext is None else {
                'algo_filename': getattr(algofile, 'name', '<algorithm>'),
                'script': algotext,
            }).run()
    except NoBenchmark:
        raise _RunAlgoError(
            ('No ``benchmark_spec`` was provided, and'
             ' ``zipline.api.set_benchmark`` was not called in'
             ' ``initialize``.'),
            ("Neither '--benchmark-symbol' nor '--benchmark-sid' was"
             " provided, and ``zipline.api.set_benchmark`` was not called"
             " in ``initialize``. Did you mean to pass '--no-benchmark'?"),
        )

    if output == '-':
        click.echo(str(perf))
    elif output != os.devnull:  # make the zipline magic not write any data
        perf.to_pickle(output)

    return perf