Esempio n. 1
0
#    loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
#    loss_function=partial(scaled_cost, loss_func=mse),
#    loss_function=ignore_inactive,
#    loss_function=partial(scaled_cost3, ignore_inactive=False),
#    updates_func=momentum,
    updates_func=clipped_nesterov_momentum,
    updates_kwargs={'clip_range': (0, 10)},
    learning_rate=1e-2,
    learning_rate_changes_by_iteration={
        2000: 1e-3,
        10000: 1e-4
    },
    do_save_activations=True,
    auto_reshape=False,
#    plotter=CentralOutputPlotter
    plotter=Plotter(n_seq_to_plot=32)
)


def exp_a(name):
    global source
    source_dict_copy = deepcopy(source_dict)
    source = RealApplianceSource(**source_dict_copy)
    net_dict_copy = deepcopy(net_dict)
    net_dict_copy.update(dict(
        experiment_name=name,
        source=source
    ))
    net_dict_copy['layers_config'] = [
        {
            'type': DimshuffleLayer,
Esempio n. 2
0
        # 800: 1e-4
#        500: 1e-3
       #  4000: 1e-03,
       # 6000: 5e-06,
       # 7000: 1e-06
       # 2000: 5e-06
        # 3000: 1e-05
        # 7000: 5e-06,
        # 10000: 1e-06,
        # 15000: 5e-07,
        # 50000: 1e-07
    },
    do_save_activations=True,
#    auto_reshape=False,
#    plotter=CentralOutputPlotter
    plotter=Plotter(n_seq_to_plot=10)
)


def exp_a(name):
    # ReLU hidden layers
    # linear output
    # output one appliance
    # 0% skip prob for first appliance
    # 100% skip prob for other appliances
    # input is diff
    global source
    source_dict_copy = deepcopy(source_dict)
    source_dict_copy['lag'] = 30
    source = RealApplianceSource(**source_dict_copy)
    net_dict_copy = deepcopy(net_dict)
Esempio n. 3
0
def exp_a(name):
    logger = logging.getLogger(name)
    real_appliance_source1 = RealApplianceSource(
        logger=logger,
        filename=UKDALE_FILENAME,
        appliances=[
            TARGET_APPLIANCE, ['fridge freezer', 'fridge', 'freezer'],
            'dish washer', 'kettle', ['washer dryer', 'washing machine']
        ],
        max_appliance_powers=[MAX_TARGET_POWER, 300, 2500, 2600, 2400],
        on_power_thresholds=[ON_POWER_THRESHOLD] + [10] * 4,
        min_on_durations=[MIN_ON_DURATION, 60, 1800, 12, 1800],
        min_off_durations=[MIN_OFF_DURATION, 12, 1800, 12, 600],
        divide_input_by_max_input_power=False,
        window_per_building=WINDOW_PER_BUILDING,
        seq_length=SEQ_LENGTH,
        output_one_appliance=True,
        train_buildings=TRAIN_BUILDINGS,
        validation_buildings=VALIDATION_BUILDINGS,
        n_seq_per_batch=N_SEQ_PER_BATCH,
        skip_probability=0.75,
        skip_probability_for_first_appliance=SKIP_PROBABILITY_FOR_TARGET,
        #        target_is_start_and_end_and_mean=True,
        standardise_input=True,
        input_stats=INPUT_STATS,
        independently_center_inputs=INDEPENDENTLY_CENTER_INPUTS)

    #     same_location_source1 = SameLocation(
    #         logger=logger,
    #         filename=UKDALE_FILENAME,
    #         target_appliance=TARGET_APPLIANCE,
    #         window_per_building=WINDOW_PER_BUILDING,
    #         seq_length=SEQ_LENGTH,
    #         train_buildings=TRAIN_BUILDINGS,
    #         validation_buildings=VALIDATION_BUILDINGS,
    #         n_seq_per_batch=N_SEQ_PER_BATCH,
    #         skip_probability=SKIP_PROBABILITY_FOR_TARGET,
    # #        target_is_start_and_end_and_mean=True,
    #         standardise_input=True,
    #         offset_probability=1,
    #         divide_target_by=MAX_TARGET_POWER,
    #         input_stats=INPUT_STATS,
    #         independently_center_inputs=INDEPENDENTLY_CENTER_INPUTS,
    #         on_power_threshold=ON_POWER_THRESHOLD,
    #         min_on_duration=MIN_ON_DURATION,
    #         min_off_duration=MIN_OFF_DURATION
    #     )

    # multi_source = MultiSource(
    #     sources=[
    #         {
    #             'source': real_appliance_source1,
    #             'train_probability': 0.5,
    #             'validation_probability': 0
    #         },
    #         {
    #             'source': same_location_source1,
    #             'train_probability': 0.5,
    #             'validation_probability': 1
    #         }
    #     ],
    #     standardisation_source=same_location_source1
    # )

    net_dict_copy = deepcopy(net_dict)
    net_dict_copy.update(
        dict(experiment_name=name,
             source=real_appliance_source1,
             plotter=Plotter(n_seq_to_plot=32,
                             n_training_examples_to_plot=16)))
    net = Net(**net_dict_copy)
    return net
Esempio n. 4
0
def net_dict_ae(seq_length):
    NUM_FILTERS = 8
    return dict(
        epochs=100000,
        save_plot_interval=25000,
        loss_function=lambda x, t: squared_error(x, t).mean(),
        updates_func=nesterov_momentum,
        learning_rate=1e-1,
        learning_rate_changes_by_iteration={
            50000: 1e-2,
            75000: 1e-3
        },
        epoch_callbacks={
#            40000: only_train_on_real_data
        },
        do_save_activations=True,
        auto_reshape=False,
        plotter=Plotter(
            n_seq_to_plot=32,
            n_training_examples_to_plot=16
        ),
        layers_config=[
            {
                'type': DimshuffleLayer,
                'pattern': (0, 2, 1)  # (batch, features, time)
            },
            {
                'label': 'conv0',
                'type': Conv1DLayer,  # convolve over the time axis
                'num_filters': NUM_FILTERS,
                'filter_size': 4,
                'stride': 1,
                'nonlinearity': None,
                'border_mode': 'valid'
            },
            {
                'type': DimshuffleLayer,
                'pattern': (0, 2, 1)  # back to (batch, time, features)
            },
            {
                'type': DenseLayer,
                'num_units': (seq_length - 3) * NUM_FILTERS,
                'nonlinearity': rectify
            },
            {
                'type': DenseLayer,
                'num_units': 128,
                'nonlinearity': rectify
            },            
            {
                'type': DenseLayer,
                'num_units': (seq_length - 3) * NUM_FILTERS,
                'nonlinearity': rectify
            },            
            {
                'type': ReshapeLayer,
                'shape': (-1, (seq_length - 3), NUM_FILTERS)
            },
            {
                'type': DimshuffleLayer,
                'pattern': (0, 2, 1)  # (batch, features, time)
            },
            {   # DeConv
                'type': Conv1DLayer,
                'num_filters': 1,
                'filter_size': 4,
                'stride': 1,
                'nonlinearity': None,
                'border_mode': 'full'
            },
            {
                'type': DimshuffleLayer,
                'pattern': (0, 2, 1)  # back to (batch, time, features)
            }
        ]
    )
Esempio n. 5
0
def net_dict_rnn(seq_length):
    if seq_length <= 300:
        learning_rate = 1e-2
        learning_rate_changes_by_iteration = {
            1000: 1e-3,
            10000: 1e-4
        }
    elif seq_length < 1500:
        learning_rate = 1e-4
        learning_rate_changes_by_iteration = {
            5000: 1e-5,
            9000: 1e-6
        }
    else:
        learning_rate = 1e-5
        learning_rate_changes_by_iteration = {
            5000: 1e-6,
            9000: 1e-7
        }
    return dict(
        epochs=10000,
        save_plot_interval=1000,
        loss_function=lambda x, t: squared_error(x, t).mean(),
        updates_func=nesterov_momentum,
        learning_rate=learning_rate,
        learning_rate_changes_by_iteration=learning_rate_changes_by_iteration,
        do_save_activations=True,
        auto_reshape=True,
        plotter=Plotter(
            n_seq_to_plot=32,
            n_training_examples_to_plot=16
        ),
        layers_config=[
            {
                'type': DimshuffleLayer,
                'pattern': (0, 2, 1)  # (batch, features, time)
            },
            {
                'type': Conv1DLayer,  # convolve over the time axis
                'num_filters': 16,
                'filter_size': 4,
                'stride': 1,
                'nonlinearity': None,
                'border_mode': 'same'
            },
            {
                'type': DimshuffleLayer,
                'pattern': (0, 2, 1),  # back to (batch, time, features)
                'label': 'dimshuffle3'
            },
            {
                'type': BLSTMLayer,
                'num_units': 128,
                'merge_mode': 'concatenate',
                'grad_clipping': 10.0,
                'gradient_steps': 500
            },
            {
                'type': BLSTMLayer,
                'num_units': 256,
                'merge_mode': 'concatenate',
                'grad_clipping': 10.0,
                'gradient_steps': 500
            },
            {
                'type': DenseLayer,
                'num_units': 128,
                'nonlinearity': tanh
            },
            {
                'type': DenseLayer,
                'num_units': 1,
                'nonlinearity': None
            }
        ]
    )
Esempio n. 6
0
def exp_g(name):
    # conv at beginning
    # b but with dropout
    logger = logging.getLogger(name)
    global multi_source

    SEQ_LENGTH = 256
    N_SEQ_PER_BATCH = 64

    real_appliance_source1 = RealApplianceSource(
        logger=logger,
        filename=UKDALE_FILENAME,
        appliances=[
            TARGET_APPLIANCE, ['fridge freezer', 'fridge', 'freezer'],
            'dish washer', 'kettle', ['washer dryer', 'washing machine']
        ],
        max_appliance_powers=[MAX_TARGET_POWER, 300, 2500, 2600, 2400],
        on_power_thresholds=[ON_POWER_THRESHOLD] + [10] * 4,
        min_on_durations=[MIN_ON_DURATION, 60, 1800, 12, 1800],
        min_off_durations=[MIN_OFF_DURATION, 12, 1800, 12, 600],
        divide_input_by_max_input_power=False,
        window_per_building=WINDOW_PER_BUILDING,
        seq_length=SEQ_LENGTH,
        output_one_appliance=True,
        train_buildings=TRAIN_BUILDINGS,
        validation_buildings=VALIDATION_BUILDINGS,
        n_seq_per_batch=N_SEQ_PER_BATCH,
        skip_probability=0.75,
        skip_probability_for_first_appliance=SKIP_PROBABILITY_FOR_TARGET,
        standardise_input=True,
        input_stats=INPUT_STATS,
        independently_center_inputs=INDEPENDENTLY_CENTER_INPUTS,
        subsample_target=SUBSAMPLE_TARGET,
        input_padding=INPUT_PADDING)

    same_location_source1 = SameLocation(
        logger=logger,
        filename=UKDALE_FILENAME,
        target_appliance=TARGET_APPLIANCE,
        window_per_building=WINDOW_PER_BUILDING,
        seq_length=SEQ_LENGTH,
        train_buildings=TRAIN_BUILDINGS,
        validation_buildings=VALIDATION_BUILDINGS,
        n_seq_per_batch=N_SEQ_PER_BATCH,
        skip_probability=SKIP_PROBABILITY_FOR_TARGET,
        standardise_input=True,
        offset_probability=1,
        divide_target_by=MAX_TARGET_POWER,
        input_stats=INPUT_STATS,
        independently_center_inputs=INDEPENDENTLY_CENTER_INPUTS,
        on_power_threshold=ON_POWER_THRESHOLD,
        min_on_duration=MIN_ON_DURATION,
        min_off_duration=MIN_OFF_DURATION,
        include_all=True,
        allow_incomplete=True,
        subsample_target=SUBSAMPLE_TARGET,
        input_padding=INPUT_PADDING)

    multi_source = MultiSource(sources=[{
        'source': real_appliance_source1,
        'train_probability': 0.5,
        'validation_probability': 0
    }, {
        'source': same_location_source1,
        'train_probability': 0.5,
        'validation_probability': 1
    }],
                               standardisation_source=same_location_source1)

    net_dict_copy = deepcopy(net_dict)
    net_dict_copy.update(
        dict(
            auto_reshape=True,
            experiment_name=name,
            source=multi_source,
            plotter=Plotter(n_seq_to_plot=32, n_training_examples_to_plot=16),
            layers_config=[
                {
                    'type': DimshuffleLayer,
                    'pattern': (0, 2, 1)  # (batch, features, time)
                },
                {
                    'type': Conv1DLayer,  # convolve over the time axis
                    'num_filters': 16,
                    'filter_size': 4,
                    'stride': 1,
                    'nonlinearity': None,
                    'border_mode': 'same'
                },
                {
                    'type': DimshuffleLayer,
                    'pattern': (0, 2, 1),  # back to (batch, time, features)
                    'label': 'dimshuffle3'
                },
                {
                    'type': DropoutLayer
                },
                {
                    'type': BLSTMLayer,
                    'num_units': 128,
                    'merge_mode': 'concatenate'
                },
                {
                    'type': DropoutLayer
                },
                {
                    'type': BLSTMLayer,
                    'num_units': 256,
                    'merge_mode': 'concatenate'
                },
                {
                    'type': DropoutLayer
                },
                {
                    'type': DenseLayer,
                    'num_units': 128,
                    'nonlinearity': tanh
                },
                {
                    'type': DenseLayer,
                    'num_units': 1,
                    'nonlinearity': None
                }
            ]))
    net = Net(**net_dict_copy)
    return net
Esempio n. 7
0
def exp_a(name):
    # longer seq length
    logger = logging.getLogger(name)
    global multi_source

    SEQ_LENGTH = 2048
    N_SEQ_PER_BATCH = 8

    # real_appliance_source1 = RealApplianceSource(
    #     logger=logger,
    #     filename=UKDALE_FILENAME,
    #     appliances=[
    #         TARGET_APPLIANCE,
    #         ['fridge freezer', 'fridge', 'freezer'],
    #         'dish washer',
    #         'kettle',
    #         ['washer dryer', 'washing machine']
    #     ],
    #     max_appliance_powers=[MAX_TARGET_POWER, 300, 2500, 2600, 2400],
    #     on_power_thresholds=[ON_POWER_THRESHOLD] + [10] * 4,
    #     min_on_durations=[MIN_ON_DURATION, 60, 1800, 12, 1800],
    #     min_off_durations=[MIN_OFF_DURATION, 12, 1800, 12, 600],
    #     divide_input_by_max_input_power=False,
    #     window_per_building=WINDOW_PER_BUILDING,
    #     seq_length=SEQ_LENGTH,
    #     output_one_appliance=True,
    #     train_buildings=TRAIN_BUILDINGS,
    #     validation_buildings=VALIDATION_BUILDINGS,
    #     n_seq_per_batch=N_SEQ_PER_BATCH,
    #     skip_probability=0.75,
    #     skip_probability_for_first_appliance=SKIP_PROBABILITY_FOR_TARGET,
    #     standardise_input=True,
    #     input_stats=INPUT_STATS,
    #     independently_center_inputs=INDEPENDENTLY_CENTER_INPUTS,
    #     subsample_target=SUBSAMPLE_TARGET,
    #     input_padding=INPUT_PADDING
    # )

    # same_location_source1 = SameLocation(
    #     logger=logger,
    #     filename=UKDALE_FILENAME,
    #     target_appliance=TARGET_APPLIANCE,
    #     window_per_building=WINDOW_PER_BUILDING,
    #     seq_length=SEQ_LENGTH,
    #     train_buildings=TRAIN_BUILDINGS,
    #     validation_buildings=VALIDATION_BUILDINGS,
    #     n_seq_per_batch=N_SEQ_PER_BATCH,
    #     skip_probability=SKIP_PROBABILITY_FOR_TARGET,
    #     standardise_input=True,
    #     offset_probability=1,
    #     divide_target_by=MAX_TARGET_POWER,
    #     input_stats=INPUT_STATS,
    #     independently_center_inputs=INDEPENDENTLY_CENTER_INPUTS,
    #     on_power_threshold=ON_POWER_THRESHOLD,
    #     min_on_duration=MIN_ON_DURATION,
    #     min_off_duration=MIN_OFF_DURATION,
    #     include_all=True,
    #     allow_incomplete=True,
    #     subsample_target=SUBSAMPLE_TARGET,
    #     input_padding=INPUT_PADDING
    # )

    # multi_source = MultiSource(
    #     sources=[
    #         {
    #             'source': real_appliance_source1,
    #             'train_probability': 0.5,
    #             'validation_probability': 0
    #         },
    #         {
    #             'source': same_location_source1,
    #             'train_probability': 0.5,
    #             'validation_probability': 1
    #         }
    #     ],
    #     standardisation_source=same_location_source1
    # )

    net_dict_copy = deepcopy(net_dict)
    net_dict_copy.update(
        dict(
            experiment_name=name,
            source=multi_source,
            plotter=Plotter(n_seq_to_plot=32, n_training_examples_to_plot=16),
            learning_rate=1e-7,
            learning_rate_changes_by_iteration={
                1000: 1e-8,
                10000: 1e-9
            },
            layers_config=[
                {
                    'type': DenseLayer,
                    'num_units': 64,
                    'nonlinearity': tanh
                },
                {
                    'type': BLSTMLayer,
                    'num_units': 128,
                    'merge_mode': 'concatenate',
                    'grad_clipping': 1.0
                    #                'gradient_steps': 200
                },
                {
                    'type': BLSTMLayer,
                    'num_units': 256,
                    'merge_mode': 'concatenate',
                    'grad_clipping': 1.0
                    #                'gradient_steps': 200
                },
                {
                    'type': DenseLayer,
                    'num_units': 128,
                    'nonlinearity': tanh
                },
                {
                    'type': DenseLayer,
                    'num_units': 1,
                    'nonlinearity': None
                }
            ]))
    net = Net(**net_dict_copy)
    return net
Esempio n. 8
0
    #    loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
    #    loss_function=partial(scaled_cost, loss_func=mse),
    #    loss_function=ignore_inactive,
    #    loss_function=partial(scaled_cost3, ignore_inactive=False),
    #    updates_func=momentum,
    updates_func=clipped_nesterov_momentum,
    updates_kwargs={'clip_range': (0, 10)},
    learning_rate=1e-1,
    learning_rate_changes_by_iteration={
        2000: 1e-2,
        10000: 1e-3
    },
    do_save_activations=True,
    auto_reshape=False,
    #    plotter=CentralOutputPlotter
    plotter=Plotter(n_seq_to_plot=20))


def exp_a(name):
    global source
    source_dict_copy = deepcopy(source_dict)
    source = RealApplianceSource(**source_dict_copy)
    net_dict_copy = deepcopy(net_dict)
    net_dict_copy.update(dict(experiment_name=name, source=source))
    net_dict_copy['layers_config'] = [
        {
            'type': DimshuffleLayer,
            'pattern': (0, 2, 1)  # (batch, features, time)
        },
        {
            'type': Conv1DLayer,  # convolve over the time axis