Example #1
0
def test_pipeline_funcs():
    def a(x):
        pass

    def b(x):
        pass

    pipelines = [
        compose.FuncTransformer(a) | b,
        compose.FuncTransformer(a) | ('b', b),
        compose.FuncTransformer(a) | ('b', compose.FuncTransformer(b)),
        a | compose.FuncTransformer(b), ('a', a) | compose.FuncTransformer(b),
        ('a', compose.FuncTransformer(a)) | compose.FuncTransformer(b)
    ]

    for pipeline in pipelines:
        assert str(pipeline) == 'a | b'
Example #2
0
def test_union_funcs():
    def a(x):
        pass

    def b(x):
        pass

    pipelines = [
        compose.FuncTransformer(a) + b,
        compose.FuncTransformer(a) + ('b', b),
        compose.FuncTransformer(a) + ('b', compose.FuncTransformer(b)),
        a + compose.FuncTransformer(b), ('a', a) + compose.FuncTransformer(b),
        ('a', compose.FuncTransformer(a)) + compose.FuncTransformer(b)
    ]

    for pipeline in pipelines:
        assert str(pipeline) == 'a + b'
Example #3
0
def test_union_funcs():
    def a(x):
        pass

    def b(x):
        pass

    pipelines = [
        compose.FuncTransformer(a) + b,
        compose.FuncTransformer(a) + ('b', b),
        compose.FuncTransformer(a) + ('b', compose.FuncTransformer(b)),
        a + compose.FuncTransformer(b), ('a', a) + compose.FuncTransformer(b),
        ('a', compose.FuncTransformer(a)) + compose.FuncTransformer(b)
    ]

    for i, pipeline in enumerate(pipelines):
        print(i, str(pipeline))
        assert str(pipeline) == 'a + b'
                 optimizers.SGD(lr=LR, momentum=.1))
}


def add_intercept(x):
    return {**x, 'intercept': 1.}


for name, (creme_optim, torch_optim, keras_optim) in OPTIMIZERS.items():

    X_y = stream.iter_sklearn_dataset(dataset=datasets.load_boston(),
                                      shuffle=True,
                                      random_state=42)
    n_features = 13

    creme_lin_reg = (compose.FuncTransformer(add_intercept)
                     | linear_model.LinearRegression(
                         optimizer=creme_optim, l2=0, intercept_lr=0))

    torch_model = PyTorchNet(n_features=n_features)
    torch_lin_reg = PyTorchRegressor(network=torch_model,
                                     loss_fn=torch.nn.MSELoss(),
                                     optimizer=torch_optim(
                                         torch_model.parameters()))

    inputs = layers.Input(shape=(n_features, ))
    predictions = layers.Dense(1,
                               kernel_initializer='zeros',
                               bias_initializer='zeros')(inputs)
    keras_model = models.Model(inputs=inputs, outputs=predictions)
    keras_model.compile(optimizer=keras_optim, loss='mean_squared_error')
Example #5
0
    # Ranking ratio
    blue_rank = safe_mean(filter(None, [get_ranking(p) for p in blue_side]))
    red_rank = safe_mean(filter(None, [get_ranking(p) for p in red_side]))
    rank_ratio = safe_ratio(max(blue_rank, red_rank), min(blue_rank, red_rank))

    return {
        'mode': match['gameMode'],
        'type': match['gameType'],
        'champion_mastery_points_ratio': champion_points_ratio,
        'total_mastery_points_ratio': total_points_ratio,
        'rank_ratio': rank_ratio
    }


MODELS = {
    'v0': (compose.FuncTransformer(process_match) | compose.TransformerUnion([
        compose.Whitelister(
            'champion_mastery_points_ratio',
            'total_mastery_points_ratio',
            'rank_ratio',
        ),
        preprocessing.OneHotEncoder('mode', sparse=False),
        preprocessing.OneHotEncoder('type', sparse=False)
    ]) | preprocessing.StandardScaler()
           | linear_model.LinearRegression(optim.VanillaSGD(0.005)))
}


class Command(base.BaseCommand):
    def handle(self, *args, **options):