def reduce_mean_over_time(exps, expected_len=300):
    new_data = {}
    for data_key in exps[0].progress.keys():
        agg_data = [exp.progress[data_key] for exp in exps]
        filtered_agg_data = [
            data for data in agg_data if len(data) == expected_len
        ]
        new_data[data_key] = scipy.stats.trim_mean(filtered_agg_data,
                                                   0.1,
                                                   axis=0)
    return log_processor.ExperimentLog({}, new_data, None)
def reduce_mean_partitions_over_time(partitions, split_key, expected_len=300):
    for partition_key in partitions:
        if isinstance(split_key, (list, tuple)):
            new_params = dict(zip(split_key, partition_key))
        else:
            new_params = {split_key: partition_key}

        exps = partitions[partition_key]
        new_data = {}
        for data_key in exps[0].progress.keys():
            if data_key in ['validation_stop_step']:
                continue
            agg_data = [exp.progress[data_key] for exp in exps]
            filtered_agg_data = [data for data in agg_data if len(data)==expected_len]
            new_data[data_key] = scipy.stats.trim_mean(filtered_agg_data, 0.1, axis=0)
        yield partition_key, log_processor.ExperimentLog(new_params, new_data, None)