Exemple #1
0
def plot_experiment(experiment_spec, experiment_df):
    '''
    Plot the variable specs vs fitness vector of an experiment, where each point is a trial.
    ref colors: https://plot.ly/python/heatmaps-contours-and-2dhistograms-tutorial/#plotlys-predefined-color-scales
    '''
    y_cols = ['fitness'] + FITNESS_COLS
    x_cols = ps.difference(experiment_df.columns.tolist(), y_cols)

    fig = viz.tools.make_subplots(rows=len(y_cols), cols=len(x_cols), shared_xaxes=True, shared_yaxes=True)
    fitness_sr = experiment_df['fitness']
    min_fitness = fitness_sr.values.min()
    max_fitness = fitness_sr.values.max()
    for row_idx, y in enumerate(y_cols):
        for col_idx, x in enumerate(x_cols):
            x_sr = experiment_df[x]
            guard_cat_x = x_sr.astype(str) if x_sr.dtype == 'object' else x_sr
            trace = viz.go.Scatter(
                y=experiment_df[y], yaxis=f'y{row_idx+1}',
                x=guard_cat_x, xaxis=f'x{col_idx+1}',
                showlegend=False, mode='markers',
                marker={
                    'symbol': 'circle-open-dot', 'color': experiment_df['fitness'], 'opacity': 0.5,
                    # dump first quarter of colorscale that is too bright
                    'cmin': min_fitness - 0.25 * (max_fitness - min_fitness), 'cmax': max_fitness,
                    'colorscale': 'YIGnBu', 'reversescale': True
                },
            )
            fig.append_trace(trace, row_idx + 1, col_idx + 1)
            fig.layout[f'xaxis{col_idx+1}'].update(title='<br>'.join(ps.chunk(x, 20)), zerolinewidth=1, categoryarray=sorted(guard_cat_x.unique()))
        fig.layout[f'yaxis{row_idx+1}'].update(title=y, rangemode='tozero')
    fig.layout.update(title=f'experiment graph: {experiment_spec["name"]}', width=max(600, len(x_cols) * 300), height=700)
    viz.plot(fig)
    return fig
Exemple #2
0
def add_identical_offers(collection_name,
                         offer_limit,
                         n_highest,
                         provenance=None):
    collection = get_collection("mpnoffers")
    model = load_model_from_s3(collection_name)
    now = datetime.now()
    mongo_filter = {
        "validThrough": {
            "$gt": now
        },
        "siteCollection": collection_name
    }
    if provenance:
        mongo_filter["provenance"] = provenance
    offers = collection.find(
        mongo_filter,
        projection=MONGO_PROJECTION,
        limit=offer_limit,
    )
    offers_list = list(offers)
    result = list(
        add_identical_offers_to_batch(batch, model, collection_name, n_highest)
        for batch in pydash.chunk(offers_list, CHUNK_SIZE))
    return result
Exemple #3
0
def plot_experiment(experiment_spec, experiment_df):
    '''
    Plot the variable specs vs fitness vector of an experiment, where each point is a trial.
    ref colors: https://plot.ly/python/heatmaps-contours-and-2dhistograms-tutorial/#plotlys-predefined-color-scales
    '''
    y_cols = ['fitness'] + FITNESS_COLS
    x_cols = ps.difference(experiment_df.columns.tolist(), y_cols)

    fig = viz.tools.make_subplots(rows=len(y_cols), cols=len(x_cols), shared_xaxes=True, shared_yaxes=True)
    fitness_sr = experiment_df['fitness']
    min_fitness = fitness_sr.values.min()
    max_fitness = fitness_sr.values.max()
    for row_idx, y in enumerate(y_cols):
        for col_idx, x in enumerate(x_cols):
            x_sr = experiment_df[x]
            guard_cat_x = x_sr.astype(str) if x_sr.dtype == 'object' else x_sr
            trace = viz.go.Scatter(
                y=experiment_df[y], yaxis=f'y{row_idx+1}',
                x=guard_cat_x, xaxis=f'x{col_idx+1}',
                showlegend=False, mode='markers',
                marker={
                    'symbol': 'circle-open-dot', 'color': experiment_df['fitness'], 'opacity': 0.5,
                    # dump first quarter of colorscale that is too bright
                    'cmin': min_fitness - 0.50 * (max_fitness - min_fitness), 'cmax': max_fitness,
                    'colorscale': 'YIGnBu', 'reversescale': True
                },
            )
            fig.append_trace(trace, row_idx + 1, col_idx + 1)
            fig.layout[f'xaxis{col_idx+1}'].update(title='<br>'.join(ps.chunk(x, 20)), zerolinewidth=1, categoryarray=sorted(guard_cat_x.unique()))
        fig.layout[f'yaxis{row_idx+1}'].update(title=y, rangemode='tozero')
    fig.layout.update(title=f'experiment graph: {experiment_spec["name"]}', width=max(600, len(x_cols) * 300), height=700)
    viz.plot(fig)
    return fig
def read_spec_and_run(spec_file, spec_name, lab_mode):
    '''Read a spec and run it in lab mode'''
    logger.info(
        f'Running lab spec_file:{spec_file} spec_name:{spec_name} in mode:{lab_mode}'
    )
    if lab_mode in TRAIN_MODES:
        spec = spec_util.get(spec_file, spec_name)
    else:  # eval mode
        if '@' in lab_mode:
            lab_mode, prename = lab_mode.split('@')
            spec = spec_util.get_eval_spec(spec_file, spec_name, prename)
        else:
            spec = spec_util.get(spec_file, spec_name)

    if 'spec_params' not in spec:
        run_spec(spec, lab_mode)
    else:  # spec is parametrized; run them in parallel
        param_specs = spec_util.get_param_specs(spec)
        num_pro = spec['meta']['param_spec_process']
        # can't use Pool since it cannot spawn nested Process, which is needed for VecEnv and parallel sessions. So these will run and wait by chunks
        workers = [
            mp.Process(target=run_spec, args=(spec, lab_mode))
            for spec in param_specs
        ]
        for chunk_w in ps.chunk(workers, num_pro):
            for w in chunk_w:
                w.start()
            for w in chunk_w:
                w.join()
Exemple #5
0
def process_in_chunks(keys, process, chunk_size=10000):
    docs = []
    chunks = py_.chunk(keys, chunk_size)
    pbar = tqdm(total=len(keys))
    for chunk in chunks:
        docs.extend(process(chunk))
        pbar.update(len(chunk))
    pbar.close()
    return docs
Exemple #6
0
def plot_experiment(experiment_spec, experiment_df, metrics_cols):
    '''
    Plot the metrics vs. specs parameters of an experiment, where each point is a trial.
    ref colors: https://plot.ly/python/heatmaps-contours-and-2dhistograms-tutorial/#plotlys-predefined-color-scales
    '''
    y_cols = metrics_cols
    x_cols = ps.difference(experiment_df.columns.tolist(), y_cols + ['trial'])
    fig = subplots.make_subplots(rows=len(y_cols),
                                 cols=len(x_cols),
                                 shared_xaxes=True,
                                 shared_yaxes=True,
                                 print_grid=False)
    strength_sr = experiment_df['strength']
    min_strength, max_strength = strength_sr.min(), strength_sr.max()
    for row_idx, y in enumerate(y_cols):
        for col_idx, x in enumerate(x_cols):
            x_sr = experiment_df[x]
            guard_cat_x = x_sr.astype(str) if x_sr.dtype == 'object' else x_sr
            trace = go.Scatter(
                y=experiment_df[y],
                yaxis=f'y{row_idx+1}',
                x=guard_cat_x,
                xaxis=f'x{col_idx+1}',
                showlegend=False,
                mode='markers',
                marker={
                    'symbol': 'circle-open-dot',
                    'color': strength_sr,
                    'opacity': 0.5,
                    # dump first portion of colorscale that is too bright
                    'cmin': min_strength - 0.5 * (max_strength - min_strength),
                    'cmax': max_strength,
                    'colorscale': 'YlGnBu',
                    'reversescale': False
                },
            )
            fig.add_trace(trace, row_idx + 1, col_idx + 1)
            fig.update_xaxes(title_text='<br>'.join(ps.chunk(x, 20)),
                             zerolinewidth=1,
                             categoryarray=sorted(guard_cat_x.unique()),
                             row=len(y_cols),
                             col=col_idx + 1)
        fig.update_yaxes(title_text=y,
                         rangemode='tozero',
                         row=row_idx + 1,
                         col=1)
    fig.layout.update(title=f'experiment graph: {experiment_spec["name"]}',
                      width=100 + 300 * len(x_cols),
                      height=200 + 300 * len(y_cols))
    plot(fig)
    graph_prepath = experiment_spec['meta']['graph_prepath']
    save_image(fig, f'{graph_prepath}_experiment_graph.png')
    # save important graphs in prepath directly
    prepath = experiment_spec['meta']['prepath']
    save_image(fig, f'{prepath}_experiment_graph.png')
    return fig
Exemple #7
0
def get_latex_im_body(envs):
    latex_ims = []
    for env in envs:
        env = guard_env_name(env)
        latex_im = f'\subfloat{{\includegraphics[width=1.22in]{{images/{env}_multi_trial_graph_mean_returns_ma_vs_frames.png}}}}'
        latex_ims.append(latex_im)

    im_matrix = ps.chunk(latex_ims, 4)
    latex_im_body = '\\\\\n'.join([' & \n'.join(row) for row in im_matrix])
    return latex_im_body
Exemple #8
0
    def average_implied_volatility(self, tenor: str, strike_reference: EdrDataReference, relative_strike: Real, *,
                                   real_time: bool = False, request_id: Optional[str] = None,
                                   source: Optional[str] = None) -> pd.Series:
        """
        Weighted average implied volatility

        :param tenor: relative date representation of expiration date e.g. 1m
        :param strike_reference: reference for strike level
        :param relative_strike: strike relative to reference
        :param real_time: whether to retrieve intraday data instead of EOD
        :param request_id: service request id, if any
        :param source: name of function caller
        :return: time series of the average implied volatility
        """

        if real_time:
            raise NotImplementedError('real-time basket implied vol not implemented')

        ref_string, relative_strike = preprocess_implied_vol_strikes_eq(VolReference(strike_reference.value),
                                                                        relative_strike)

        log_debug(request_id, _logger, 'where tenor=%s, strikeReference=%s, relativeStrike=%s', tenor, ref_string,
                  relative_strike)
        where = dict(tenor=tenor, strikeReference=ref_string, relativeStrike=relative_strike)
        tasks = []
        for i, chunked_assets in enumerate(chunk(self.get_marquee_ids(), 3)):
            query = GsDataApi.build_market_data_query(
                chunked_assets,
                QueryType.IMPLIED_VOLATILITY,
                where=where,
                source=source,
                real_time=real_time)

            tasks.append(partial(GsDataApi.get_market_data, query, request_id))

        results = ThreadPoolManager.run_async(tasks)
        vol_data = pd.concat(results)

        actual_weights = self.get_actual_weights(request_id)

        # Add in today's data
        if not real_time and DataContext.current.end_date >= datetime.date.today():
            vol_data = ts.append_last_for_measure(vol_data, self.get_marquee_ids(), QueryType.IMPLIED_VOLATILITY, where,
                                                  source=source, request_id=request_id)
            vol_data.index.rename('date', inplace=True)

        vols = vol_data.pivot_table('impliedVolatility', ['date'], 'assetId')
        vols.reindex(self.get_marquee_ids(), axis=1)
        vols.index.name = None

        # Necessary when current values appended - set weights index to match vols index
        actual_weights = actual_weights.reindex(vols.index).fillna(method='pad')

        return actual_weights.mul(vols).sum(axis=1, skipna=False)
Exemple #9
0
    def send_message(self) -> List[SlackResponse]:
        parent_message = self.client.chat_postMessage(channel=self.channel,
                                                      text="*Snow Peak 入荷情報*")

        # blocks are no more than 50 items allowed.
        blocks_list = chunk(self.blocks, 50)
        parent_message_ts = parent_message['ts']
        for blocks in blocks_list:
            message = Message(channel=self.channel, blocks=blocks)
            self.client.chat_postMessage(**message,
                                         thread_ts=parent_message_ts)

        return parent_message
Exemple #10
0
    def bulk_compliant_status(cls, imeis):
        """Method to get IMEIs status from CORE in bulk."""
        non_compliant_list, compliant_list = [], []
        imei_list = pydash.flatten_deep(imeis)
        imei_chunks = pydash.chunk(imei_list, 1000)

        url = cls.core_api_v2 + '/imei-batch'
        for chunk in imei_chunks:
            response = requests.post(url, json={'imeis': chunk})
            compliant, non_compliant = Utilities.get_bulk_compliant_imeis(response)
            non_compliant_list = pydash.interleave(non_compliant_list, non_compliant)
            compliant_list = pydash.interleave(compliant_list, compliant)
        return compliant_list, non_compliant_list
def generate_interpolation_frames(trainer, latents_low, latents_high, num_frames, batch_size=50, device="cuda:0"):
    trainer.GAN.eval()
    num_rows = 1

    latent_dim = trainer.GAN.latent_dim
    image_size = trainer.GAN.image_size

    # latents and noise

    #latents_low = torch.randn(num_rows ** 2, latent_dim).cuda(self.rank)
    #latents_high = torch.randn(num_rows ** 2, latent_dim).cuda(self.rank)
    ratios = torch.linspace(0., 1., num_frames)

    chunks = list(py_.chunk(ratios, size=batch_size))

    latents_low = latents_low.unsqueeze(0)
    latents_high = latents_high.unsqueeze(0)

    ret = None

    for i, chunk_ratios in enumerate(chunks):
        batch_latents = []
        for j, ratio in enumerate(chunk_ratios):
            if (i + j) == 0:
                interp_latents = latents_low
            elif (i + j) == len(ratios) - 1:
                interp_latents = latents_high
            else:
                interp_latents = slerp(ratio, latents_low, latents_high)
            batch_latents.append(interp_latents)

        stacked_latents = torch.vstack(batch_latents).to(device)
        generated_images = trainer.generate_truncated(
            trainer.GAN.GE, stacked_latents).cpu()

        if ret is None:
            ret = generated_images
        else:
            ret = ret.stack(generated_images, axis=0)

    return ret
Exemple #12
0
def test_chunk(case, expected):
    assert _.chunk(*case) == expected
Exemple #13
0
def list_case_books():
    casebooks = chunk(CaseBook.select(), 4)
    return render_template('list_case_books.html', casebooks=casebooks)
async def command_clear(message, client, *args, **kwargs):
    if message.content.startswith('!clear'):
        logger.info('Run command !clear')

        # If no arguments were found for the command, clear the entire channel
        command_args = kwargs.pop('command_args', [])
        if not command_args:

            # Get all the messages in the channel
            messages = client.logs_from(message.channel)
            messages = [m async for m in messages]

            # Try bulk deleting the messages in chunks of 100
            for chunk in pydash.chunk(messages, 100):
                await client.delete_messages(chunk)
                messages = pydash.difference(messages, chunk)

            # Individually delete each message which could not be deleted
            # via bulk
            for message_ in messages:
                await client.delete_message(message_)

            return True

        # If arguments were found for the command, do different things
        else:
            command_args = list(command_args)

            # If there is more than one argument, do nothing
            if len(command_args) > 1:
                return True

            # Save the first argument to a variable to readability
            clear_amount = command_args[0]

            # If the first argument is an integer, only clear so many messages
            if type(clear_amount) is int:

                # TODO: Support retrieving more than 100 messages
                # If clear_amount is greater than 100, send an apology
                if clear_amount > 100:
                    await client.send_message(
                        message.channel,
                        'I can only clear 100 messages at a time.'
                    )
                    return True

                # Get the messages to be deleted
                messages = client.logs_from(
                    message.channel, limit=clear_amount)
                messages = [m async for m in messages]

                # Try bulk deleting the messages in chunks of 100
                for chunk in pydash.chunk(messages, 100):
                    await client.delete_messages(chunk)
                    messages = pydash.difference(messages, chunk)

                # Individually delete each message which could not be deleted
                # via bulk
                for message_ in messages:
                    await client.delete_message(message_)

                return True

    return False