コード例 #1
0
ファイル: agendas.py プロジェクト: edulou13/enlaces
 def get_api(self, start=7, end=30):
     now = _utc.now().date()
     backward, forward = (now - _td(days=start)), (now + _td(days=end))
     return _Agenda.select(
         lambda ag: (ag.fecha_con >= backward and ag.fecha_con <= forward
                     ) and (ag.mensaje.tipo >= 1 and ag.mensaje.tipo <= 5)
         and not (ag.sms_estado and ag.lmd_estado)).order_by(
             lambda ag: (ag.persona.comunidad.nombre, ag.persona.nombres, ag
                         .persona.apellidos))
コード例 #2
0
ファイル: agendas.py プロジェクト: edulou13/enlaces
 def radio_operator(cls):
     #return cls.get_api(start=15, end=15).filter(lambda ag: ag.persona.cobertura==2)
     now = _utc.now().date()
     backward, forward = (now - _td(days=15)), (now + _td(days=15))
     return _Agenda.select(lambda ag: ag.persona.cobertura == 2 and (
         ag.fecha_con >= backward and ag.fecha_con <= forward) and (
             ag.mensaje.tipo >= 1 and ag.mensaje.tipo <= 5
         ) and not ag.rad_estado).order_by(
             lambda ag: (ag.persona.comunidad.nombre, ag.persona.nombres, ag
                         .persona.apellidos, _desc(ag.fecha_con)))
コード例 #3
0
    def _schedule(self):
        """
        Initializes scheduler for hci device nodata checking
        """
        logger.debug(
            f'>>> enter {type(self._scheduler)} device_timeout:{self._device_timeout}'
        )

        if not self._scheduler:
            return

        if self._device_timeout:
            l_jobid = f'bleak_timeout'
            try:
                self._scheduler.add_job(
                    self._do_bleak_timeout,
                    'interval',
                    seconds=1,
                    kwargs={
                        'jobid': l_jobid,
                        'reset': self._device_reset
                    },
                    id=l_jobid,
                    replace_existing=True,
                    max_instances=self.SCHEDULER_MAX_INSTANCES,
                    coalesce=True,
                    next_run_time=_dt.now() +
                    _td(seconds=self._device_timeout))
                logger.info(f'>>> jobid:{l_jobid} scheduled')
            except:
                logger.exception(f'>>> jobid:{l_jobid}')
コード例 #4
0
    def _schedule(self, *, scheduler):
        logger.debug(f'{self._name} enter {type(scheduler)}')

        l_lwt = bool(self._cfg.get('lwt', _def.MQTT_LWT))
        l_lwtperiod = int(self._cfg.get('lwtperiod', _def.MQTT_LWTPERIOD))
        if l_lwt and l_lwtperiod:
            try:
                l_jobid = f'{self._name}_publish_lwt'
                scheduler.add_job(self._publish_lwt,
                                  'interval',
                                  seconds=l_lwtperiod,
                                  kwargs={
                                      'payload':
                                      self._cfg.get('lwtonline',
                                                    _def.MQTT_LWTONLINE)
                                  },
                                  id=l_jobid,
                                  replace_existing=True,
                                  max_instances=self.SCHEDULER_MAX_INSTANCES,
                                  coalesce=True,
                                  next_run_time=_dt.now() +
                                  _td(seconds=l_lwtperiod))
                logger.info(
                    f'{self._name} {l_jobid} scheduled lwtperiod:{l_lwtperiod}'
                )
            except:
                logger.exception(f'*** {self._name}')
コード例 #5
0
	def get_forDashboard(cls, days=30, id_red=None, id_mup=None, id_cen=None):
		time_ago = _utc.now().date() - _td(days=days)
		query = _newBorn.select(lambda nb: nb.embarazo.parto_inst>=time_ago).order_by(lambda nb: (nb.embarazo.embarazada.comunidad.nombre, nb.embarazo.parto_inst, nb.nombres, nb.apellidos))
		if id_red:
			municipios = _networksCrt.get_byId(id_red).municipios
			return query.filter(lambda nb: nb.embarazo.embarazada.comunidad.municipio in municipios)
		elif id_mup:
			comunidades = _townshipsCrt.get_byId(id_mup).comunidades
			return query.filter(lambda nb: nb.embarazo.embarazada.comunidad in comunidades)
		elif id_cen:
			comunidades = _hospitalsCrt.get_byId(id_cen).comunidades
			return query.filter(lambda nb: nb.embarazo.embarazada.comunidad in comunidades)
		else:
			return query
コード例 #6
0
ファイル: childrens.py プロジェクト: edulou13/enlaces
	def get_forDashboard(cls, days=30, id_red=None, id_mup=None, id_cen=None):
		time_ago = _utc.now().date() - _td(days=days)
		query = _newBorn.select(lambda nb: nb.embarazo.parto_inst>=time_ago).order_by(lambda nb: (nb.embarazo.embarazada.comunidad.nombre, nb.embarazo.parto_inst, nb.nombres, nb.apellidos))
		if id_red:
			municipios = _networksCrt.get_byId(id_red).municipios
			return query.filter(lambda nb: nb.embarazo.embarazada.comunidad.municipio in municipios)
		elif id_mup:
			comunidades = _townshipsCrt.get_byId(id_mup).comunidades
			return query.filter(lambda nb: nb.embarazo.embarazada.comunidad in comunidades)
		elif id_cen:
			comunidades = _hospitalsCrt.get_byId(id_cen).comunidades
			return query.filter(lambda nb: nb.embarazo.embarazada.comunidad in comunidades)
		else:
			return query
コード例 #7
0
	def save(self, persona, mensaje, fecha_con=None, days=7):
		try:
			with _db_session:
				if fecha_con is None:
					now = _utc.now().date().isoformat()
					ag = _Agenda(persona=persona, mensaje=mensaje, fecha_msj=now, fecha_con=now, enviado=True)
				else:
					fecha_msj = (fecha_con - _td(days=days)).isoformat()
					fecha_con = fecha_con.isoformat()
					ag = _Agenda(persona=persona, mensaje=mensaje, fecha_msj=fecha_msj, fecha_con=fecha_con)
				_commit()
				return True if ag else False
		except Exception, e:
			print e
			return False
コード例 #8
0
ファイル: ruuvigw_aioclient.py プロジェクト: hulttis/ruuvigw
    def _schedule(self, *, scheduler):
        logger.debug(f'{self._name} enter {type(scheduler)}')

        if self._write_lastdata_int:
            try:
                l_jobid = f'{self._name}_lastdata'
                scheduler.add_job(
                    self._check_lastdata,
                    'interval',
                    seconds = 1,
                    id = l_jobid,
                    replace_existing = True,
                    max_instances = self.SCHEDULER_MAX_INSTANCES,
                    coalesce = True,
                    next_run_time = _dt.now()+_td(seconds=_def.RUUVI_WRITE_LASTDATA_DELAY)
                )
                logger.info(f'{self._name} {l_jobid} scheduled')
            except:
                logger.exception(f'*** {self._name}')
コード例 #9
0
ファイル: conftest.py プロジェクト: tkphd/NexusLIMS
def fix_mountain_time(monkey_session):
    """
    Hack to determine if we need to adjust our datetime objects for the time
    difference between Boulder and G'burg
    """
    def currenttz():
        if time.daylight:
            return _tz(_td(seconds=-time.altzone), time.tzname[1])
        else:  # pragma: no cover
            return _tz(_td(seconds=-time.timezone), time.tzname[0])

    tz_string = currenttz().tzname(_dt.now())

    # if timezone is MST or MDT, we're 2 hours behind, so we need to adjust
    # datetime objects to match file store
    if tz_string in ['MST', 'MDT']:
        # get current timezone, and adjust tz_offset as needed
        monkey_session.setattr(nexusLIMS.utils, "tz_offset", _td(hours=-2))
        monkey_session.setenv('ignore_mib', 'True')
        monkey_session.setenv('is_mountain_time', 'True')
コード例 #10
0
def get_regions_parallel(positions, genome_file, base=0, count=7):
    """Return a list of regions surrounding a position.

    Will loop through each chromosome and search all positions in that
    chromosome in one batch. Lookup is serial per chromosome.

    Args:
        positions (dict):  Dictionary of {chrom->positons}
        genome_file (str): Location of a genome fasta file or directory of
                           files. If directory, file names must be
                           <chrom_name>.fa[.gz]. Gzipped OK.
        base (int):        Either 0 or 1, base of positions in your list
        count (int):       Distance + and - the position to extract

    Returns:
        dict: {chrom->{postion->sequence}}
    """
    outs = []
    for chrom in positions.keys():
        if os.path.isdir(genome_file):
            fa_file = get_fasta_file(genome_file, chrom)
        if not os.path.isfile(fa_file):
            raise FileNotFoundError('{} not found.'.format(genome_file))
        mins = int(len(positions[chrom]) / 2000) + 60
        time = str(_td(minutes=mins))
        outs.append(
            fyrd.submit(
                get_regions,
                ({
                    chrom: positions[chrom]
                }, fa_file, base, count),
                cores=1,
                mem='6GB',
                time=time,
            ))

    final = {}
    for out in outs:
        final.update(out.get())
    return final
コード例 #11
0
ファイル: agendas.py プロジェクト: edulou13/enlaces
 def save(self, persona, mensaje, fecha_con=None, days=0):
     try:
         with _db_session:
             if fecha_con is None:
                 now = _utc.now().date().isoformat()
                 ag = _Agenda(persona=persona,
                              mensaje=mensaje,
                              fecha_msj=now,
                              fecha_con=now,
                              sms_estado=True)
             else:
                 fecha_msj = (fecha_con - _td(days=days)).isoformat()
                 fecha_con = fecha_con.isoformat()
                 ag = _Agenda(persona=persona,
                              mensaje=mensaje,
                              fecha_msj=fecha_msj,
                              fecha_con=fecha_con)
             _commit()
             return True if ag else False
     except Exception, e:
         print e
         return False
コード例 #12
0
ファイル: influx_aioclient.py プロジェクト: hulttis/ruuvigw
    def _schedule(self, *, scheduler):
        logger.debug(f'{self._name} enter {type(scheduler)}')

        try:
            l_jobid = f'{self._name}_do_connect'
            scheduler.add_job(self._do_connect,
                              'interval',
                              seconds=self._cfg.get(
                                  'supervision_interval',
                                  _def.INFLUX_SUPERVISION_INTERVAL),
                              kwargs={
                                  'cfg': self._cfg,
                                  'jobid': l_jobid
                              },
                              id=l_jobid,
                              replace_existing=True,
                              max_instances=self.SCHEDULER_MAX_INSTANCES,
                              coalesce=True,
                              next_run_time=_dt.now() +
                              _td(seconds=self.INFLUX_CONNECT_DELAY))
        except:
            logger.exception(f'*** {self._name}')
コード例 #13
0
ファイル: get_region.py プロジェクト: MikeDacre/mike_tools
def get_regions_parallel(positions, genome_file, base=0, count=7):
    """Return a list of regions surrounding a position.

    Will loop through each chromosome and search all positions in that
    chromosome in one batch. Lookup is serial per chromosome.

    Args:
        positions (dict):  Dictionary of {chrom->positons}
        genome_file (str): Location of a genome fasta file or directory of
                           files. If directory, file names must be
                           <chrom_name>.fa[.gz]. Gzipped OK.
        base (int):        Either 0 or 1, base of positions in your list
        count (int):       Distance + and - the position to extract

    Returns:
        dict: {chrom->{postion->sequence}}
    """
    outs = []
    for chrom in positions.keys():
        if os.path.isdir(genome_file):
            fa_file = get_fasta_file(genome_file, chrom)
        if not os.path.isfile(fa_file):
            raise FileNotFoundError('{} not found.'.format(genome_file))
        mins = int(len(positions[chrom])/2000)+60
        time = str(_td(minutes=mins))
        outs.append(
            fyrd.submit(
                get_regions,
                ({chrom: positions[chrom]}, fa_file, base, count),
                cores=1, mem='6GB', time=time,
            )
        )

    final = {}
    for out in outs:
        final.update(out.get())
    return final
コード例 #14
0
 def dst(self, dt):
     return _td()
コード例 #15
0
ファイル: config.py プロジェクト: abreen/socrates.py
hooks_dir = _parser.get('socrates', 'hooks_dir',
                        fallback=SOCRATES_DIR + os.sep + 'hooks')
scripts_dir = _parser.get('socrates', 'scripts_dir',
                          fallback=SOCRATES_DIR + os.sep + 'scripts')
static_dir = _parser.get('socrates', 'static_dir',
                         fallback=SOCRATES_DIR + os.sep + 'static')
dropbox_dir = _parser.get('socrates', 'dropbox_dir',
                          fallback=SOCRATES_DIR + os.sep + 'dropbox')
criteria_dir = _parser.get('socrates', 'criteria_dir',
                           fallback=SOCRATES_DIR + os.sep + 'criteria')

from datetime import timedelta as _td
if _parser.has_option('socrates', 'grace_period'):
    _grace_str = _parser.get('socrates', 'grace_period')
    grace_period = _td(seconds=int(_grace_str))

else:
    grace_period = _td(seconds=0)


_f = False
if not os.path.isdir(hooks_dir):
    _f = True
    util.error("hooks directory does not exist or cannot be accessed")

if not os.path.isdir(scripts_dir):
    _f = True
    util.error("scripts directory does not exist or cannot be accessed")

if not os.path.isdir(static_dir):
コード例 #16
0
ファイル: childrens.py プロジェクト: edulou13/enlaces
	def get_allChildrens(cls, days=30):
		time_ago = _utc.now().date() - _td(days=days)
		return _newBorn.select(lambda nb: nb.embarazo.parto_inst>=time_ago)
コード例 #17
0
def metrics(returns,
            benchmark=None,
            rf=0.,
            display=True,
            mode='basic',
            sep=False,
            compounded=True,
            periods_per_year=252,
            prepare_returns=True,
            match_dates=False,
            **kwargs):

    win_year, _ = _get_trading_periods(periods_per_year)

    if benchmark is not None \
            and isinstance(benchmark, _pd.DataFrame) and len(benchmark.columns) > 1:
        raise ValueError("`benchmark` must be a pandas Series, "
                         "but a multi-column DataFrame was passed")

    blank = ['']

    if isinstance(returns, _pd.DataFrame):
        if len(returns.columns) > 1:
            raise ValueError(
                "`returns` needs to be a Pandas Series or one column DataFrame. multi colums DataFrame was passed"
            )
        returns = returns[returns.columns[0]]

    if prepare_returns:
        returns = _utils._prepare_returns(returns)

    df = _pd.DataFrame({"returns": returns})

    if benchmark is not None:
        blank = ['', '']
        benchmark = _utils._prepare_benchmark(benchmark, returns.index, rf)
        if match_dates is True:
            returns, benchmark = _match_dates(returns, benchmark)
        df["returns"] = returns
        df["benchmark"] = benchmark

    df = df.fillna(0)

    # pct multiplier
    pct = 100 if display or "internal" in kwargs else 1
    if kwargs.get("as_pct", False):
        pct = 100

    # return df
    dd = _calc_dd(df,
                  display=(display or "internal" in kwargs),
                  as_pct=kwargs.get("as_pct", False))

    metrics = _pd.DataFrame()

    s_start = {'returns': df['returns'].index.strftime('%Y-%m-%d')[0]}
    s_end = {'returns': df['returns'].index.strftime('%Y-%m-%d')[-1]}
    s_rf = {'returns': rf}

    if "benchmark" in df:
        s_start['benchmark'] = df['benchmark'].index.strftime('%Y-%m-%d')[0]
        s_end['benchmark'] = df['benchmark'].index.strftime('%Y-%m-%d')[-1]
        s_rf['benchmark'] = rf

    metrics['Start Period'] = _pd.Series(s_start)
    metrics['End Period'] = _pd.Series(s_end)
    metrics['Risk-Free Rate %'] = _pd.Series(s_rf)
    metrics['Time in Market %'] = _stats.exposure(df,
                                                  prepare_returns=False) * pct

    metrics['~'] = blank

    if compounded:
        metrics['Cumulative Return %'] = (_stats.comp(df) * pct).map(
            '{:,.2f}'.format)
    else:
        metrics['Total Return %'] = (df.sum() * pct).map('{:,.2f}'.format)

    metrics['CAGR﹪%'] = _stats.cagr(df, rf, compounded) * pct

    metrics['~~~~~~~~~~~~~~'] = blank

    metrics['Sharpe'] = _stats.sharpe(df, rf, win_year, True)
    if mode.lower() == 'full':
        metrics['Smart Sharpe'] = _stats.smart_sharpe(df, rf, win_year, True)
    metrics['Sortino'] = _stats.sortino(df, rf, win_year, True)
    if mode.lower() == 'full':
        metrics['Smart Sortino'] = _stats.smart_sortino(df, rf, win_year, True)
    metrics['Sortino/√2'] = metrics['Sortino'] / _sqrt(2)
    if mode.lower() == 'full':
        metrics['Smart Sortino/√2'] = metrics['Smart Sortino'] / _sqrt(2)
    metrics['Omega'] = _stats.omega(df, rf, 0., win_year)

    metrics['~~~~~~~~'] = blank
    metrics['Max Drawdown %'] = blank
    metrics['Longest DD Days'] = blank

    if mode.lower() == 'full':
        ret_vol = _stats.volatility(
            df['returns'], win_year, True, prepare_returns=False) * pct
        if "benchmark" in df:
            bench_vol = _stats.volatility(
                df['benchmark'], win_year, True, prepare_returns=False) * pct
            metrics['Volatility (ann.) %'] = [ret_vol, bench_vol]
            metrics['R^2'] = _stats.r_squared(df['returns'],
                                              df['benchmark'],
                                              prepare_returns=False)
        else:
            metrics['Volatility (ann.) %'] = [ret_vol]

        metrics['Calmar'] = _stats.calmar(df, prepare_returns=False)
        metrics['Skew'] = _stats.skew(df, prepare_returns=False)
        metrics['Kurtosis'] = _stats.kurtosis(df, prepare_returns=False)

        metrics['~~~~~~~~~~'] = blank

        metrics['Expected Daily %%'] = _stats.expected_return(
            df, prepare_returns=False) * pct
        metrics['Expected Monthly %%'] = _stats.expected_return(
            df, aggregate='M', prepare_returns=False) * pct
        metrics['Expected Yearly %%'] = _stats.expected_return(
            df, aggregate='A', prepare_returns=False) * pct
        metrics['Kelly Criterion %'] = _stats.kelly_criterion(
            df, prepare_returns=False) * pct
        metrics['Risk of Ruin %'] = _stats.risk_of_ruin(df,
                                                        prepare_returns=False)

        metrics['Daily Value-at-Risk %'] = -abs(
            _stats.var(df, prepare_returns=False) * pct)
        metrics['Expected Shortfall (cVaR) %'] = -abs(
            _stats.cvar(df, prepare_returns=False) * pct)

    metrics['~~~~~~'] = blank

    metrics['Gain/Pain Ratio'] = _stats.gain_to_pain_ratio(df, rf)
    metrics['Gain/Pain (1M)'] = _stats.gain_to_pain_ratio(df, rf, "M")
    # if mode.lower() == 'full':
    #     metrics['GPR (3M)'] = _stats.gain_to_pain_ratio(df, rf, "Q")
    #     metrics['GPR (6M)'] = _stats.gain_to_pain_ratio(df, rf, "2Q")
    #     metrics['GPR (1Y)'] = _stats.gain_to_pain_ratio(df, rf, "A")
    metrics['~~~~~~~'] = blank

    metrics['Payoff Ratio'] = _stats.payoff_ratio(df, prepare_returns=False)
    metrics['Profit Factor'] = _stats.profit_factor(df, prepare_returns=False)
    metrics['Common Sense Ratio'] = _stats.common_sense_ratio(
        df, prepare_returns=False)
    metrics['CPC Index'] = _stats.cpc_index(df, prepare_returns=False)
    metrics['Tail Ratio'] = _stats.tail_ratio(df, prepare_returns=False)
    metrics['Outlier Win Ratio'] = _stats.outlier_win_ratio(
        df, prepare_returns=False)
    metrics['Outlier Loss Ratio'] = _stats.outlier_loss_ratio(
        df, prepare_returns=False)

    # returns
    metrics['~~'] = blank
    comp_func = _stats.comp if compounded else _np.sum

    today = df.index[-1]  # _dt.today()
    metrics['MTD %'] = comp_func(
        df[df.index >= _dt(today.year, today.month, 1)]) * pct

    d = today - _td(3 * 365 / 12)
    metrics['3M %'] = comp_func(
        df[df.index >= _dt(d.year, d.month, d.day)]) * pct

    d = today - _td(6 * 365 / 12)
    metrics['6M %'] = comp_func(
        df[df.index >= _dt(d.year, d.month, d.day)]) * pct

    metrics['YTD %'] = comp_func(df[df.index >= _dt(today.year, 1, 1)]) * pct

    d = today - _td(12 * 365 / 12)
    metrics['1Y %'] = comp_func(
        df[df.index >= _dt(d.year, d.month, d.day)]) * pct
    d = today - _td(3 * 365)
    metrics['3Y (ann.) %'] = _stats.cagr(
        df[df.index >= _dt(d.year, d.month, d.day)], 0., compounded) * pct
    d = today - _td(5 * 365)
    metrics['5Y (ann.) %'] = _stats.cagr(
        df[df.index >= _dt(d.year, d.month, d.day)], 0., compounded) * pct
    d = today - _td(10 * 365)
    metrics['10Y (ann.) %'] = _stats.cagr(
        df[df.index >= _dt(d.year, d.month, d.day)], 0., compounded) * pct
    metrics['All-time (ann.) %'] = _stats.cagr(df, 0., compounded) * pct

    # best/worst
    if mode.lower() == 'full':
        metrics['~~~'] = blank
        metrics['Best Day %'] = _stats.best(df, prepare_returns=False) * pct
        metrics['Worst Day %'] = _stats.worst(df, prepare_returns=False) * pct
        metrics['Best Month %'] = _stats.best(
            df, aggregate='M', prepare_returns=False) * pct
        metrics['Worst Month %'] = _stats.worst(
            df, aggregate='M', prepare_returns=False) * pct
        metrics['Best Year %'] = _stats.best(
            df, aggregate='A', prepare_returns=False) * pct
        metrics['Worst Year %'] = _stats.worst(
            df, aggregate='A', prepare_returns=False) * pct

    # dd
    metrics['~~~~'] = blank
    for ix, row in dd.iterrows():
        metrics[ix] = row
    metrics['Recovery Factor'] = _stats.recovery_factor(df)
    metrics['Ulcer Index'] = _stats.ulcer_index(df)
    metrics['Serenity Index'] = _stats.serenity_index(df, rf)

    # win rate
    if mode.lower() == 'full':
        metrics['~~~~~'] = blank
        metrics['Avg. Up Month %'] = _stats.avg_win(
            df, aggregate='M', prepare_returns=False) * pct
        metrics['Avg. Down Month %'] = _stats.avg_loss(
            df, aggregate='M', prepare_returns=False) * pct
        metrics['Win Days %%'] = _stats.win_rate(df,
                                                 prepare_returns=False) * pct
        metrics['Win Month %%'] = _stats.win_rate(
            df, aggregate='M', prepare_returns=False) * pct
        metrics['Win Quarter %%'] = _stats.win_rate(
            df, aggregate='Q', prepare_returns=False) * pct
        metrics['Win Year %%'] = _stats.win_rate(
            df, aggregate='A', prepare_returns=False) * pct

        if "benchmark" in df:
            metrics['~~~~~~~'] = blank
            greeks = _stats.greeks(df['returns'],
                                   df['benchmark'],
                                   win_year,
                                   prepare_returns=False)
            metrics['Beta'] = [str(round(greeks['beta'], 2)), '-']
            metrics['Alpha'] = [str(round(greeks['alpha'], 2)), '-']

    # prepare for display
    for col in metrics.columns:
        try:
            metrics[col] = metrics[col].astype(float).round(2)
            if display or "internal" in kwargs:
                metrics[col] = metrics[col].astype(str)
        except Exception:
            pass
        if (display or "internal" in kwargs) and "%" in col:
            metrics[col] = metrics[col] + '%'
    try:
        metrics['Longest DD Days'] = _pd.to_numeric(
            metrics['Longest DD Days']).astype('int')
        metrics['Avg. Drawdown Days'] = _pd.to_numeric(
            metrics['Avg. Drawdown Days']).astype('int')

        if display or "internal" in kwargs:
            metrics['Longest DD Days'] = metrics['Longest DD Days'].astype(str)
            metrics['Avg. Drawdown Days'] = metrics[
                'Avg. Drawdown Days'].astype(str)
    except Exception:
        metrics['Longest DD Days'] = '-'
        metrics['Avg. Drawdown Days'] = '-'
        if display or "internal" in kwargs:
            metrics['Longest DD Days'] = '-'
            metrics['Avg. Drawdown Days'] = '-'

    metrics.columns = [
        col if '~' not in col else '' for col in metrics.columns
    ]
    metrics.columns = [
        col[:-1] if '%' in col else col for col in metrics.columns
    ]
    metrics = metrics.T

    if "benchmark" in df:
        metrics.columns = ['Strategy', 'Benchmark']
    else:
        metrics.columns = ['Strategy']

    if display:
        print(_tabulate(metrics, headers="keys", tablefmt='simple'))
        return None

    if not sep:
        metrics = metrics[metrics.index != '']
    return metrics
コード例 #18
0
ファイル: test_date.py プロジェクト: marrow/mongo
		def utcoffset(self, dt):
			return _td(hours=-7)
コード例 #19
0
def get_dinucleotides_parallel(positions, genome_file, base=0, return_as='list'):
    """Return a list of all + and - strand dinucleotides around each position.

    Will loop through each chromosome and search all positions in that
    chromosome in one batch. Lookup is parallel per chromosome.

    Args:
        positions (dict):  Dictionary of {chrom->positons}
        genome_file (str): Location of a genome fasta file or directory of
                           files. If directory, file names must be
                           <chrom_name>.fa[.gz]. Gzipped OK. Directory is
                           preferred in parallel mode.
        base (int):        Either 0 or 1, base of positions in your list
        return_as (str):   dict: Return a dictionary of:
                           {chrom->{postion->{'ref': str, '+': tuple, '-': tuple}}}
                           list: just returns two lists with no positions.
                           df: return DataFrame

    Returns:
        (list, list): + strand dinucleotides, - strand dinucleotides. Returns
                      a dict or instead if requested through return_as.
    """
    outs = []
    for chrom in positions.keys():
        if os.path.isdir(genome_file):
            fa_file = get_fasta_file(genome_file, chrom)
        if not os.path.isfile(fa_file):
            raise FileNotFoundError('{} not found.'.format(genome_file))
        mins = int(len(positions[chrom])/2000)+45
        time = str(_td(minutes=mins))
        outs.append(
            fyrd.submit(
                get_dinucleotides,
                ({chrom: positions[chrom]}, fa_file, base, return_as),
                cores=1, mem='6GB', time=time,
            )
        )

    if return_as == 'df':
        final = []
    elif return_as == 'dict':
        final = {}
    else:
        final = ([], [])

    fyrd.wait(outs)
    print('Getting results')
    for out in outs:
        res = out.get()
        if return_as == 'df':
            if isinstance(res, dict):
                res = dict_to_df(res, base)
            final.append(res)
        elif return_as == 'dict':
            final.update(res)
        else:
            plus, minus = res
            final[0] += plus
            final[1] += minus

    if return_as == 'df':
        print('Joining dataframe')
        final = pd.concat(final)

    return final
コード例 #20
0
	def get_api(self, start=7, end=30):
		now = _utc.now().date()
		backward, forward = (now - _td(days=start)), (now + _td(days=end))
		return _Agenda.select(lambda ag: (ag.fecha_con>=backward and ag.fecha_con<=forward) and (ag.mensaje.tipo>=1 and ag.mensaje.tipo<=5) and not(ag.sms_estado and ag.lmd_estado)).order_by(lambda ag: (ag.persona.comunidad.nombre, ag.persona.nombres, ag.persona.apellidos))
コード例 #21
0
 def utcoffset(self, dt):
     return _td(hours=-7)
コード例 #22
0
	def get_all(cls):
		backward = _utc.now().date() - _td(days=30)
		return _Agenda.select(lambda ag: ag.fecha_con>=backward).order_by(lambda ag: (ag.persona.comunidad.nombre, ag.persona.nombres, ag.persona.apellidos, _desc(ag.fecha_con)))
コード例 #23
0
ファイル: test_date.py プロジェクト: marrow/mongo
		def dst(self, dt):
			return _td()
コード例 #24
0
ファイル: conftest.py プロジェクト: tkphd/NexusLIMS
 def currenttz():
     if time.daylight:
         return _tz(_td(seconds=-time.altzone), time.tzname[1])
     else:  # pragma: no cover
         return _tz(_td(seconds=-time.timezone), time.tzname[0])
コード例 #25
0
ファイル: test_date.py プロジェクト: marrow/mongo
	def test_timedelta_assignment(self, Sample):
		fuzz = _td(seconds=5)
		delta = _td(days=2, hours=12)
		now = _dt.utcnow()
		instance = Sample(delta)
		assert abs(instance.field.replace(tzinfo=None) - (now + delta)) < fuzz
コード例 #26
0
	def get_allChildrens(cls, days=30):
		time_ago = _utc.now().date() - _td(days=days)
		return _newBorn.select(lambda nb: nb.embarazo.parto_inst>=time_ago)
コード例 #27
0
ファイル: agendas.py プロジェクト: edulou13/enlaces
 def get_all(cls):
     backward = _utc.now().date() - _td(days=30)
     return _Agenda.select(lambda ag: ag.fecha_con >= backward).order_by(
         lambda ag: (ag.persona.comunidad.nombre, ag.persona.nombres, ag.
                     persona.apellidos, _desc(ag.fecha_con)))
コード例 #28
0
ファイル: reports.py プロジェクト: kuangtu/quantstats
def metrics(returns,
            benchmark=None,
            rf=0.,
            display=True,
            mode='basic',
            sep=False,
            **kwargs):

    if isinstance(returns, _pd.DataFrame) and len(returns.columns) > 1:
        raise ValueError("`returns` must be a pandas Series, "
                         "but a multi-column DataFrame was passed")

    if benchmark is not None:
        if isinstance(returns, _pd.DataFrame) and len(returns.columns) > 1:
            raise ValueError("`benchmark` must be a pandas Series, "
                             "but a multi-column DataFrame was passed")

    blank = ['']
    df = _pd.DataFrame({"returns": _utils._prepare_returns(returns, rf)})
    if benchmark is not None:
        blank = ['', '']
        df["benchmark"] = _utils._prepare_benchmark(benchmark, returns.index,
                                                    rf)

    df = df.dropna()

    # pct multiplier
    pct = 100 if display or "internal" in kwargs else 1

    # return df
    dd = _calc_dd(df, display=(display or "internal" in kwargs))

    metrics = _pd.DataFrame()

    s_start = {'returns': df['returns'].index.strftime('%Y-%m-%d')[0]}
    s_end = {'returns': df['returns'].index.strftime('%Y-%m-%d')[-1]}
    s_rf = {'returns': rf}

    if "benchmark" in df:
        s_start['benchmark'] = df['benchmark'].index.strftime('%Y-%m-%d')[0]
        s_end['benchmark'] = df['benchmark'].index.strftime('%Y-%m-%d')[-1]
        s_rf['benchmark'] = rf

    metrics['Start Period'] = _pd.Series(s_start)
    metrics['End Period'] = _pd.Series(s_end)
    metrics['Risk-free rate %'] = _pd.Series(s_rf)
    metrics['Exposure %%'] = _stats.exposure(df) * pct

    metrics['~'] = blank

    metrics['Cumulative Return %'] = _stats.comp(df) * pct
    metrics['CAGR%%'] = _stats.cagr(df, rf) * pct
    metrics['Sharpe'] = _stats.sharpe(df, rf)
    metrics['Sortino'] = _stats.sortino(df, rf)
    metrics['Max Drawdown %'] = blank
    metrics['Longest DD Days'] = blank

    if mode.lower() == 'full':
        ret_vol = _stats.volatility(df['returns']) * pct
        if "benchmark" in df:
            bench_vol = _stats.volatility(df['benchmark']) * pct
            metrics['Volatility (ann.) %'] = [ret_vol, bench_vol]
            metrics['R^2'] = _stats.r_squared(df['returns'], df['benchmark'])
        else:
            metrics['Volatility (ann.) %'] = [ret_vol]

        metrics['Calmar'] = _stats.calmar(df)
        metrics['Skew'] = _stats.skew(df)
        metrics['Kurtosis'] = _stats.kurtosis(df)

    if mode.lower() == 'full':
        metrics['~~~~~~~~~~'] = blank

        metrics['Expected Daily %%'] = _stats.expected_return(df) * pct
        metrics['Expected Monthly %%'] = _stats.expected_return(
            df, aggregate='M') * pct
        metrics['Expected Yearly %%'] = _stats.expected_return(
            df, aggregate='A') * pct
        metrics['Kelly Criterion %'] = _stats.kelly_criterion(df) * pct
        metrics['Risk of Ruin %'] = _stats.risk_of_ruin(df)

        metrics['Daily Value-at-Risk %'] = -abs(_stats.var(df) * pct)
        metrics['Expected Shortfall (cVaR) %'] = -abs(_stats.cvar(df) * pct)

    metrics['~~~~~~'] = blank

    metrics['Payoff Ratio'] = _stats.payoff_ratio(df)
    metrics['Profit Factor'] = _stats.profit_factor(df)
    metrics['Common Sense Ratio'] = _stats.common_sense_ratio(df)
    metrics['CPC Index'] = _stats.cpc_index(df)
    metrics['Tail Ratio'] = _stats.tail_ratio(df)
    metrics['Outlier Win Ratio'] = _stats.outlier_win_ratio(df)
    metrics['Outlier Loss Ratio'] = _stats.outlier_loss_ratio(df)

    # returns
    metrics['~~'] = blank

    today = _dt.today()
    metrics['MTD %'] = _stats.comp(
        df[df.index >= _dt(today.year, today.month, 1)]) * pct

    d = today - _td(3 * 365 / 12)
    metrics['3M %'] = _stats.comp(
        df[df.index >= _dt(d.year, d.month, d.day)]) * pct

    d = today - _td(6 * 365 / 12)
    metrics['6M %'] = _stats.comp(
        df[df.index >= _dt(d.year, d.month, d.day)]) * pct

    metrics['YTD %'] = _stats.comp(df[df.index >= _dt(today.year, 1, 1)]) * pct

    d = today - _td(12 * 365 / 12)
    metrics['1Y %'] = _stats.comp(
        df[df.index >= _dt(d.year, d.month, d.day)]) * pct
    metrics['3Y (ann.) %'] = _stats.cagr(
        df[df.index >= _dt(today.year - 3, today.month, today.day)]) * pct
    metrics['5Y (ann.) %'] = _stats.cagr(
        df[df.index >= _dt(today.year - 5, today.month, today.day)]) * pct
    metrics['10Y (ann.) %'] = _stats.cagr(
        df[df.index >= _dt(today.year - 10, today.month, today.day)]) * pct
    metrics['All-time (ann.) %'] = _stats.cagr(df) * pct

    # best/worst
    if mode.lower() == 'full':
        metrics['~~~'] = blank
        metrics['Best Day %'] = _stats.best(df) * pct
        metrics['Worst Day %'] = _stats.worst(df) * pct
        metrics['Best Month %'] = _stats.best(df, aggregate='M') * pct
        metrics['Worst Month %'] = _stats.worst(df, aggregate='M') * pct
        metrics['Best Year %'] = _stats.best(df, aggregate='A') * pct
        metrics['Worst Year %'] = _stats.worst(df, aggregate='A') * pct

    # dd
    metrics['~~~~'] = blank
    for ix, row in dd.iterrows():
        metrics[ix] = row
    metrics['Recovery Factor'] = _stats.recovery_factor(df)
    metrics['Ulcer Index'] = _stats.ulcer_index(df, rf)

    # win rate
    if mode.lower() == 'full':
        metrics['~~~~~'] = blank
        metrics['Avg. Up Month %'] = _stats.avg_win(df, aggregate='M') * pct
        metrics['Avg. Down Month %'] = _stats.avg_loss(df, aggregate='M') * pct
        metrics['Win Days %%'] = _stats.win_rate(df) * pct
        metrics['Win Month %%'] = _stats.win_rate(df, aggregate='M') * pct
        metrics['Win Quarter %%'] = _stats.win_rate(df, aggregate='Q') * pct
        metrics['Win Year %%'] = _stats.win_rate(df, aggregate='A') * pct

    if mode.lower() == "full" and "benchmark" in df:
        metrics['~~~~~~~'] = blank
        greeks = _stats.greeks(df['returns'], df['benchmark'])
        metrics['Beta'] = [str(round(greeks['beta'], 2)), '-']
        metrics['Alpha'] = [str(round(greeks['alpha'], 2)), '-']

    # prepare for display
    for col in metrics.columns:
        try:
            metrics[col] = metrics[col].astype(float).round(2)
            if display or "internal" in kwargs:
                metrics[col] = metrics[col].astype(str)
        except Exception:
            pass
        if (display or "internal" in kwargs) and "%" in col:
            metrics[col] = metrics[col] + '%'

        metrics['Longest DD Days'] = _pd.to_numeric(
            metrics['Longest DD Days']).astype('int')
        metrics['Avg. Drawdown Days'] = _pd.to_numeric(
            metrics['Avg. Drawdown Days']).astype('int')

        if display or "internal" in kwargs:
            metrics['Longest DD Days'] = metrics['Longest DD Days'].astype(str)
            metrics['Avg. Drawdown Days'] = metrics[
                'Avg. Drawdown Days'].astype(str)

    metrics.columns = [
        col if '~' not in col else '' for col in metrics.columns
    ]
    metrics.columns = [
        col[:-1] if '%' in col else col for col in metrics.columns
    ]
    metrics = metrics.T

    if "benchmark" in df:
        metrics.columns = ['Strategy', 'Benchmark']
    else:
        metrics.columns = ['Strategy']

    if display:
        print(_tabulate(metrics, headers="keys", tablefmt='simple'))
        return

    if not sep:
        metrics = metrics[metrics.index != '']
    return metrics
コード例 #29
0
	def radio_operator(cls):
		#return cls.get_api(start=15, end=15).filter(lambda ag: ag.persona.cobertura==2)
		now = _utc.now().date()
		backward, forward = (now - _td(days=15)), (now + _td(days=15))
		return _Agenda.select(lambda ag: ag.persona.cobertura==2 and (ag.fecha_con>=backward and ag.fecha_con<=forward) and (ag.mensaje.tipo>=1 and ag.mensaje.tipo<=5) and not ag.rad_estado).order_by(lambda ag: (ag.persona.comunidad.nombre, ag.persona.nombres, ag.persona.apellidos, _desc(ag.fecha_con)))
コード例 #30
0
def get_dinucleotides_parallel(positions,
                               genome_file,
                               base=0,
                               return_as='list'):
    """Return a list of all + and - strand dinucleotides around each position.

    Will loop through each chromosome and search all positions in that
    chromosome in one batch. Lookup is parallel per chromosome.

    Args:
        positions (dict):  Dictionary of {chrom->positons}
        genome_file (str): Location of a genome fasta file or directory of
                           files. If directory, file names must be
                           <chrom_name>.fa[.gz]. Gzipped OK. Directory is
                           preferred in parallel mode.
        base (int):        Either 0 or 1, base of positions in your list
        return_as (str):   dict: Return a dictionary of:
                           {chrom->{postion->{'ref': str, '+': tuple, '-': tuple}}}
                           list: just returns two lists with no positions.
                           df: return DataFrame

    Returns:
        (list, list): + strand dinucleotides, - strand dinucleotides. Returns
                      a dict or instead if requested through return_as.
    """
    outs = []
    for chrom in positions.keys():
        if os.path.isdir(genome_file):
            fa_file = get_fasta_file(genome_file, chrom)
        if not os.path.isfile(fa_file):
            raise FileNotFoundError('{} not found.'.format(genome_file))
        mins = int(len(positions[chrom]) / 2000) + 45
        time = str(_td(minutes=mins))
        outs.append(
            fyrd.submit(
                get_dinucleotides,
                ({
                    chrom: positions[chrom]
                }, fa_file, base, return_as),
                cores=1,
                mem='6GB',
                time=time,
            ))

    if return_as == 'df':
        final = []
    elif return_as == 'dict':
        final = {}
    else:
        final = ([], [])

    fyrd.wait(outs)
    print('Getting results')
    for out in outs:
        res = out.get()
        if return_as == 'df':
            if isinstance(res, dict):
                res = dict_to_df(res, base)
            final.append(res)
        elif return_as == 'dict':
            final.update(res)
        else:
            plus, minus = res
            final[0] += plus
            final[1] += minus

    if return_as == 'df':
        print('Joining dataframe')
        final = pd.concat(final)

    return final
コード例 #31
0
 def test_timedelta_assignment(self, Sample):
     fuzz = _td(seconds=5)
     delta = _td(days=2, hours=12)
     now = _dt.utcnow()
     instance = Sample(delta)
     assert abs(instance.field.replace(tzinfo=None) - (now + delta)) < fuzz