def plot(self): g = Plot( dict( width=640, height=480, graph_title="Plot", show_graph_title=True, no_css=True, key=True, scale_x_integers=True, scale_y_integers=True, min_x_value=0, min_y_value=0, show_data_labels=True, show_x_guidelines=True, show_x_title=True, x_title="Time", show_y_title=True, y_title="Ice Cream Cones", y_title_text_direction='bt', )) # add a few random datasets for n in range(1, 4): g.add_data( dict(data=flatten(get_data_set()), title='series %d' % n)) res = XML(g.burn()) return render(chart=res)
def min_value(self): if self.min_scale_value: return self.min_scale_value data = map(itemgetter('data'), self.data) if self.stacked: data = self.get_cumulative_data() return min(flatten(data))
def plot(self): g = Plot(dict( width = 640, height = 480, graph_title = "Plot", show_graph_title = True, no_css = True, key = True, scale_x_integers = True, scale_y_integers = True, min_x_value = 0, min_y_value = 0, show_data_labels = True, show_x_guidelines = True, show_x_title = True, x_title = "Time", show_y_title = True, y_title = "Ice Cream Cones", y_title_text_direction = 'bt', )) # add a few random datasets for n in range(1, 4): g.add_data(dict( data = flatten(get_data_set()), title = 'series %d' % n, )) res = XML(g.burn()) return render(chart=res)
def test_lines_completes(gzip_stream): """ When reading lines from a gzip stream, the operation should complete when the stream is exhausted. """ chunks = gzip.read_chunks(gzip_stream) streams = gzip.load_streams(chunks) lines = flatten(map(gzip.lines_from_stream, streams)) consume(lines)
def test_iterable_data_flat(self): g = Plot() spec = dict( data=flatten(self.get_data()), title='labels', ) g.add_data(spec) svg = g.burn() assert 'text="(1.00, 0.00)"' in svg
def test_iterable_data_flat(self): g = Plot() spec = dict( data=flatten(self.get_data()), title='labels', ) g.add_data(spec) svg = g.burn() assert b'text="(1.00, 0.00)"' in svg
def test_lines_from_stream(gzip_stream): chunks = gzip.read_chunks(gzip_stream) streams = gzip.load_streams(chunks) lines = flatten(map(gzip.lines_from_stream, streams)) first_line = next(lines) assert first_line == '[' second_line = next(lines) result = json.loads(second_line.rstrip('\n,')) assert isinstance(result, dict) assert 'id' in result
def nearest_weekday(self, calendar): """ Return the nearest weekday to self. """ weekend_days = calendar.get_weekend_days() deltas = (timedelta(n) for n in itertools.count()) candidates = recipes.flatten( (self - delta, self + delta) for delta in deltas ) matches = ( day for day in candidates if day.weekday() not in weekend_days ) return next(matches)
def __main(): models = ( "4300u,4450u,4500u,4600u,4650u," "4600h,4600hs,4700u,4750u,4800u,4800h,4800hs,4900h,4900HS".split(",") ) models = """4450u,4650u,4900HS""".split(",") query_data = __prepare_query_data(models) pprint(query_data) data = flatten(concurrent_map(__get_geekbench_results, query_data)) DataFrame(data).to_excel( os.path.join(os.path.dirname(__file__), "result.xlsx"), index=False )
def __init__( self, df: pd.DataFrame, exposure_path: Path, raw_dir: Path, name_list: Path, transform=transforms.ToTensor(), metric="mse", ): self.exposure_path = exposure_path self.transform = transform names = flatten(pd.read_csv(name_list).to_numpy()) by_ev = df.pivot_table(index="name", columns=["metric", "ev"], values="score") by_ev = by_ev.loc[by_ev.index.intersection(names)] exp_min = -3 exp_max = 6 exp_step = 0.25 evs: np.ndarray = np.linspace(exp_min, exp_max, int((exp_max - exp_min) / exp_step + 1)) self.evs = np.array([*evs[evs < 0], *evs[evs > 0]]) self.ev_indices = {ev: i for (i, ev) in enumerate(self.evs)} self.opt_choices = by_ev[metric].idxmin(axis=1) self.metric = metric self.data = by_ev self.names = pd.Series(self.data.index) self.generator = DataGenerator( raw_path=raw_dir, out_path=exposure_path.parent, store_path=None, compute_scores=False, )
def max_value(self): data = map(itemgetter('data'), self.data) if self.stacked: data = self.get_cumulative_data() return max(flatten(data))