def exp_d(name): global source MAX_TARGET_POWER = 2500 source_dict_copy = deepcopy(source_dict) source_dict_copy.update(dict( logger=logging.getLogger(name), appliances=[ 'dish washer', ['fridge freezer', 'fridge', 'freezer'], ['washer dryer', 'washing machine'], 'kettle', 'HTPC' ], max_appliance_powers=[MAX_TARGET_POWER, 300, 2400, 2600, 200], on_power_thresholds=[5] * 5, min_on_durations=[1800, 60, 1800, 30, 60], min_off_durations=[1800, 12, 600, 1, 12], seq_length=2048 )) source = RealApplianceSource(**source_dict_copy) net_dict_copy = deepcopy(net_dict) net_dict_copy.update(dict( experiment_name=name, source=source, plotter=StartEndMeanPlotter( n_seq_to_plot=32, max_target_power=MAX_TARGET_POWER) )) net = Net(**net_dict_copy) return net
def exp_c(name): global source MAX_TARGET_POWER = 200 source_dict_copy = deepcopy(source_dict) source_dict_copy.update(dict( logger=logging.getLogger(name), appliances=[ 'HTPC', 'dish washer', ['fridge freezer', 'fridge', 'freezer'], ['washer dryer', 'washing machine'], 'kettle' ], max_appliance_powers=[MAX_TARGET_POWER, 2500, 300, 2400, 2600], on_power_thresholds=[5] * 5, min_on_durations=[60, 1800, 60, 1800, 30], min_off_durations=[12, 1800, 12, 600, 1], seq_length=2048 )) source = RealApplianceSource(**source_dict_copy) net_dict_copy = deepcopy(net_dict) net_dict_copy.update(dict( experiment_name=name, source=source, plotter=StartEndMeanPlotter( n_seq_to_plot=32, max_target_power=MAX_TARGET_POWER), learning_rate_changes_by_iteration={ 150000: 1e-4, 275000: 1e-5 } )) net = Net(**net_dict_copy) net.load_params(146758) return net
def exp_a(name): net_dict_copy = deepcopy(net_dict) net_dict_copy.update( dict(experiment_name=name, source=multi_source, plotter=StartEndMeanPlotter(n_seq_to_plot=32, max_target_power=MAX_TARGET_POWER))) net = Net(**net_dict_copy) return net
# loss_function=partial(scaled_cost, loss_func=mse), # loss_function=ignore_inactive, # loss_function=partial(scaled_cost3, ignore_inactive=False), # updates_func=momentum, updates_func=clipped_nesterov_momentum, updates_kwargs={'clip_range': (0, 10)}, learning_rate=1e-2, learning_rate_changes_by_iteration={ 1000: 1e-3, 50000: 1e-4 }, do_save_activations=True, auto_reshape=False, # plotter=CentralOutputPlotter # plotter=Plotter(n_seq_to_plot=32) plotter=StartEndMeanPlotter(n_seq_to_plot=32, max_target_power=MAX_TARGET_POWER)) def exp_a(name): # conv, conv global source source_dict_copy = deepcopy(source_dict) source_dict_copy.update(dict(logger=logging.getLogger(name))) source = RealApplianceSource(**source_dict_copy) net_dict_copy = deepcopy(net_dict) net_dict_copy.update(dict(experiment_name=name, source=source)) NUM_FILTERS = 16 target_seq_length = source.output_shape_after_processing()[1] net_dict_copy['layers_config'] = [ { 'type': DimshuffleLayer,
def exp_a(name): logger = logging.getLogger(name) real_appliance_source1 = RealApplianceSource( logger=logger, filename=UKDALE_FILENAME, appliances=[ TARGET_APPLIANCE, ['fridge freezer', 'fridge', 'freezer'], 'dish washer', 'kettle', ['washer dryer', 'washing machine'] ], max_appliance_powers=[MAX_TARGET_POWER, 300, 2500, 2600, 2400], on_power_thresholds=[5] * 5, min_on_durations=[12, 60, 1800, 12, 1800], min_off_durations=[12, 12, 1800, 12, 600], divide_input_by_max_input_power=False, window_per_building=WINDOW_PER_BUILDING, seq_length=SEQ_LENGTH, output_one_appliance=True, train_buildings=TRAIN_BUILDINGS, validation_buildings=VALIDATION_BUILDINGS, n_seq_per_batch=N_SEQ_PER_BATCH, skip_probability=0.75, skip_probability_for_first_appliance=SKIP_PROBABILITY_FOR_TARGET, target_is_start_and_end_and_mean=True, standardise_input=True, input_stats=INPUT_STATS, independently_center_inputs=INDEPENDENTLY_CENTER_INPUTS) same_location_source1 = SameLocation( logger=logger, filename=UKDALE_FILENAME, target_appliance=TARGET_APPLIANCE, window_per_building=WINDOW_PER_BUILDING, seq_length=SEQ_LENGTH, train_buildings=TRAIN_BUILDINGS, validation_buildings=VALIDATION_BUILDINGS, n_seq_per_batch=N_SEQ_PER_BATCH, skip_probability=SKIP_PROBABILITY_FOR_TARGET, target_is_start_and_end_and_mean=True, standardise_input=True, offset_probability=1, divide_target_by=MAX_TARGET_POWER, input_stats=INPUT_STATS, independently_center_inputs=INDEPENDENTLY_CENTER_INPUTS) multi_source = MultiSource(sources=[{ 'source': real_appliance_source1, 'train_probability': 0.5, 'validation_probability': 0 }, { 'source': same_location_source1, 'train_probability': 0.5, 'validation_probability': 1 }], standardisation_source=same_location_source1) net_dict_copy = deepcopy(net_dict) net_dict_copy.update( dict(experiment_name=name, source=multi_source, plotter=StartEndMeanPlotter(n_seq_to_plot=32, n_training_examples_to_plot=16, max_target_power=MAX_TARGET_POWER))) net = Net(**net_dict_copy) net.load_params(730532) return net
# loss_function=partial(scaled_cost, loss_func=mse), # loss_function=ignore_inactive, # loss_function=partial(scaled_cost3, ignore_inactive=False), # updates_func=momentum, updates_func=clipped_nesterov_momentum, updates_kwargs={'clip_range': (0, 10)}, learning_rate=1e-2, learning_rate_changes_by_iteration={ 1000: 1e-3, 5000: 1e-4 }, do_save_activations=True, auto_reshape=False, # plotter=CentralOutputPlotter # plotter=Plotter(n_seq_to_plot=32) plotter=StartEndMeanPlotter()) def exp_a(name, target_appliance, seq_length): global source source_dict_copy = deepcopy(source_dict) source_dict_copy.update( dict(target_appliance=target_appliance, logger=logging.getLogger(name), seq_length=seq_length)) source = SameLocation(**source_dict_copy) net_dict_copy = deepcopy(net_dict) net_dict_copy.update(dict(experiment_name=name, source=source)) NUM_FILTERS = 4 target_seq_length = source.output_shape_after_processing()[1] net_dict_copy['layers_config'] = [
def net_dict_rectangles(seq_length): return dict( epochs=300000, save_plot_interval=25000, loss_function=lambda x, t: squared_error(x, t).mean(), updates_func=nesterov_momentum, learning_rate=1e-4, learning_rate_changes_by_iteration={ 200000: 1e-5, 250000: 1e-6 }, epoch_callbacks={350000: only_train_on_real_data}, do_save_activations=True, auto_reshape=False, plotter=StartEndMeanPlotter(n_seq_to_plot=32, n_training_examples_to_plot=16), layers_config=[ { 'type': DimshuffleLayer, 'pattern': (0, 2, 1) # (batch, features, time) }, { 'type': PadLayer, 'width': 4 }, { 'type': Conv1DLayer, # convolve over the time axis 'num_filters': 16, 'filter_size': 4, 'stride': 1, 'nonlinearity': None, 'border_mode': 'valid' }, { 'type': Conv1DLayer, # convolve over the time axis 'num_filters': 16, 'filter_size': 4, 'stride': 1, 'nonlinearity': None, 'border_mode': 'valid' }, { 'type': DimshuffleLayer, 'pattern': (0, 2, 1) # back to (batch, time, features) }, { 'type': DenseLayer, 'num_units': 512 * 8, 'nonlinearity': rectify }, # { # 'type': DenseLayer, # 'num_units': 512 * 6, # 'nonlinearity': rectify # }, { 'type': DenseLayer, 'num_units': 512 * 4, 'nonlinearity': rectify }, { 'type': DenseLayer, 'num_units': 512, 'nonlinearity': rectify }, { 'type': DenseLayer, 'num_units': 3, 'nonlinearity': None } ])