def optimize(postfix):
    # sg.utils.redirect(sys.stdout, "gridopt_output_%s.txt" % postfix)
    
    user_id = 55864860
    (dataset, test) = load_prediction.prepare_datasets(user_id)
    
    day = 24
    freerun = day
    today = 4600
    
    # [len_data, res_size, leak, input, bias, spectral, 
    #  seed, ridge, tmp_sm, load_sm]
    train_hours = 336
    
    datas = \
        [sg.utils.Normalizer(dataset[today-train_hours:today+day-freerun,:], axis=0)
         for today in (1000, 2000, 3000, 4000)]
    
    input_data = []
    for data in datas:
        temps, loads = zip(*data.normalized)
        input_data.append([np.array((temps[24:], loads[:-24], loads[24:])).T])

    reservoir = Oger.nodes.LeakyReservoirNode(output_dim=400,
                                              leak_rate=1,
                                              input_scaling=0.5,
                                              bias_scaling=0.75,
                                              spectral_radius=1,
                                              reset_states=False)
    readout = Oger.nodes.RidgeRegressionNode(ridge_param = 0.001)
    flow = Oger.nodes.FreerunFlow(reservoir + readout,
                                  freerun_steps = freerun,
                                  external_input_range= \
                                  np.array([0, 1]))

    # gridsearch_parameters = {reservoir: {'_instance': range(5), 
    #                                      'spectral_radius': [0.6, 0.8, 1],
    #                                      'input_scaling': [0.1, 0.5, 0.9],
    #                                      'bias_scaling': [0.1, 0.5, 0.9],
    #                                      'leak_rate': [0.1, 0.5, 0.9]},
    #                          readout: {'_instance': range(5),
    #                                    'ridge_param': [0.1, 0.5, 0.9]}}
    
    gridsearch_parameters = {reservoir: {'_instance': range(20)},
                             readout: {'ridge_param': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}}

    print "gridsearch_parameters = " + str(gridsearch_parameters)
    optimizer = Oger.evaluation.Optimizer(gridsearch_parameters, 
                                          Oger.utils.nrmse)

    optimizer.grid_search([[], input_data], flow,
                          cross_validate_function=Oger.evaluation.leave_one_out)

    return (optimizer, reservoir)
import numpy as np
import Oger, mdp
import matplotlib.pyplot as plt
import scikits.timeseries as ts

import pywt
import sg.utils
from sg.data.sintef.create_full_temp_data import data as read_temperatures
import sg.data.sintef.userloads as ul
import load_prediction

from static import StaticNode

user_id = 55864860

(dataset, test) = load_prediction.prepare_datasets(user_id, False)

#day = 24
#today = random.randint(1000, dataset.shape[0]-day*2)
#today = 4600

#See if we can predict 24 times based on instances, learned from the training set.

data_raw = sg.utils.Normalizer(dataset, axis=0)

data = data_raw.normalized[:2**14,1]

# One year is 365*24 = 8760 datapoints. If we round down to 8192, we will get
# the maximum amount of scales for the decomposition (13), i.e. math.pow(2,13)
# The number of levels/scales determine how far we look back.
level = 4
def optimize(postfix):
    # sg.utils.redirect(sys.stdout, "gridopt_output_%s.txt" % postfix)

    user_id = 55864860
    (dataset, test) = load_prediction.prepare_datasets(user_id)

    day = 24
    freerun = day
    today = 4600

    # [len_data, res_size, leak, input, bias, spectral,
    #  seed, ridge, tmp_sm, load_sm]
    train_hours = 336

    datas = \
        [sg.utils.Normalizer(dataset[today-train_hours:today+day-freerun,:], axis=0)
         for today in (1000, 2000, 3000, 4000)]

    input_data = []
    for data in datas:
        temps, loads = zip(*data.normalized)
        input_data.append([np.array((temps[24:], loads[:-24], loads[24:])).T])

    reservoir = Oger.nodes.LeakyReservoirNode(output_dim=400,
                                              leak_rate=1,
                                              input_scaling=0.5,
                                              bias_scaling=0.75,
                                              spectral_radius=1,
                                              reset_states=False)
    readout = Oger.nodes.RidgeRegressionNode(ridge_param=0.001)
    flow = Oger.nodes.FreerunFlow(reservoir + readout,
                                  freerun_steps = freerun,
                                  external_input_range= \
                                  np.array([0, 1]))

    # gridsearch_parameters = {reservoir: {'_instance': range(5),
    #                                      'spectral_radius': [0.6, 0.8, 1],
    #                                      'input_scaling': [0.1, 0.5, 0.9],
    #                                      'bias_scaling': [0.1, 0.5, 0.9],
    #                                      'leak_rate': [0.1, 0.5, 0.9]},
    #                          readout: {'_instance': range(5),
    #                                    'ridge_param': [0.1, 0.5, 0.9]}}

    gridsearch_parameters = {
        reservoir: {
            '_instance': range(20)
        },
        readout: {
            'ridge_param': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
        }
    }

    print "gridsearch_parameters = " + str(gridsearch_parameters)
    optimizer = Oger.evaluation.Optimizer(gridsearch_parameters,
                                          Oger.utils.nrmse)

    optimizer.grid_search(
        [[], input_data],
        flow,
        cross_validate_function=Oger.evaluation.leave_one_out)

    return (optimizer, reservoir)