def get_model(num_timesteps, num_input_vars, num_output_vars, p_params=None): params = dict(archtype='rnn',optimizer='rmsprop',discount=0.9, hidden_layer_dims=[]) if p_params: params.update( p_params ) #params.update( dict(macro_dims=500, archtype='RNN',hidden_activation='leakyrelu') ) #params.update( dict(macro_dims=500, hidden_activation='leakyrelu') ) # gets better than mean perfornace on ieee300 #params.update( dict(macro_dims=trajs_trn.shape[2], archtype='RNNIdentity', hidden_activation='leakyrelu') ) # gets better than mean perfornace on ieee300 #params.update( dict(macro_dims=trajs_trn.shape[2], archtype='RNNIdentity', hidden_activation='linear') ) # gets better than mean perfornace on ieee300 #params.update( dict(macro_dims=500, hidden_activation='srelu') )# , hidden_layer_dims=[500,]) model_name = "models/" + "_".join(["%s"%x[1] for x in sorted(params.items()) if x[1]]) + \ "-" + "-".join(map(str, [num_timesteps, num_input_vars, num_output_vars])) if False: # elsewhere... from keras.models import model_from_json with timeIt("Loading model object"): model = model_from_json(open('%s.json'%model_name).read()) model.load_weights('%s_weights.h5' % model_name) else: with timeIt("Creating model object"): model = trainrnn.get_rnn_model(num_timesteps, num_input_vars, num_output_vars=num_output_vars, output_type='real', **params) model.model_name = model_name return model
def get_model(num_timesteps, num_input_vars, num_output_vars, p_params=None): params = dict(archtype='rnn', optimizer='rmsprop', discount=0.9, hidden_layer_dims=[]) if p_params: params.update(p_params) #params.update( dict(macro_dims=500, archtype='RNN',hidden_activation='leakyrelu') ) #params.update( dict(macro_dims=500, hidden_activation='leakyrelu') ) # gets better than mean perfornace on ieee300 #params.update( dict(macro_dims=trajs_trn.shape[2], archtype='RNNIdentity', hidden_activation='leakyrelu') ) # gets better than mean perfornace on ieee300 #params.update( dict(macro_dims=trajs_trn.shape[2], archtype='RNNIdentity', hidden_activation='linear') ) # gets better than mean perfornace on ieee300 #params.update( dict(macro_dims=500, hidden_activation='srelu') )# , hidden_layer_dims=[500,]) model_name = "models/" + "_".join(["%s"%x[1] for x in sorted(params.items()) if x[1]]) + \ "-" + "-".join(map(str, [num_timesteps, num_input_vars, num_output_vars])) if False: # elsewhere... from keras.models import model_from_json with timeIt("Loading model object"): model = model_from_json(open('%s.json' % model_name).read()) model.load_weights('%s_weights.h5' % model_name) else: with timeIt("Creating model object"): model = trainrnn.get_rnn_model(num_timesteps, num_input_vars, num_output_vars=num_output_vars, output_type='real', **params) model.model_name = model_name return model
def get_data(DIRNAME='ieee300'): #DIRNAME = 'alpha1.0r0.0' TOPNUM = None with timeIt(): df = get_df(DIRNAME, TOPNUM) df['PertId'] = df.PertId.apply( lambda x: tuple(sorted(str(x).split(',')))) def split_mx(df, unique_perts=False): done_perts = set() clocs = None pertDF = df[df.PertId != -1] def emit(clocs): cPerts = pertDF.PertId.iloc[clocs[0]] if unique_perts and cPerts in done_perts: return None done_perts.add(cPerts) return np.vstack(pertDF.Effs2.iloc[clocs]), np.vstack( pertDF.Eff.iloc[clocs]) for ndx, t in enumerate(pertDF.t): if t == 0: if clocs is not None: if len(clocs) == 15: d = emit(clocs) if d is not None: yield d clocs = [] clocs.append(ndx) if len(clocs) == 15: d = emit(clocs) if d is not None: yield d with timeIt("Loading trajectories"): mxs = zip(*split_mx(df, unique_perts=True)) trajs = np.stack(mxs[0]) #trajs = trajs[0:1000] trajs_trn = trajs.copy() trajs_trn[:, 1:, :] = 0.0 if False: num_mostvaried = 1 #mostvaried=np.argsort(trajs[:,-1,:].var(axis=0))[-num_mostvaried:] #observable = trajs[:,:,mostvaried] observable = trajs - trajs.mean(axis=0)[None, :, :] print trajs.shape, observable.shape observable = trajs #observable = np.stack(mxs[1])*1000 v = ((observable - observable.mean(axis=0)[None, :, :])**2) print "Mean error for avg : %0.7f" % v.mean() print "Mean error for avg (last10%%) : %0.7f" % v[int(len(v) * .9):].mean() print "Mean error for avg (frst10%%) : %0.7f" % v[:int(len(v) * .1)].mean() return trajs, trajs_trn, observable
def get_data(DIRNAME="ieee300"): # DIRNAME = 'alpha1.0r0.0' TOPNUM = None with timeIt(): df = get_df(DIRNAME, TOPNUM) df["PertId"] = df.PertId.apply(lambda x: tuple(sorted(str(x).split(",")))) def split_mx(df, unique_perts=False): done_perts = set() clocs = None pertDF = df[df.PertId != -1] def emit(clocs): cPerts = pertDF.PertId.iloc[clocs[0]] if unique_perts and cPerts in done_perts: return None done_perts.add(cPerts) return np.vstack(pertDF.Effs2.iloc[clocs]), np.vstack(pertDF.Eff.iloc[clocs]) for ndx, t in enumerate(pertDF.t): if t == 0: if clocs is not None: if len(clocs) == 15: d = emit(clocs) if d is not None: yield d clocs = [] clocs.append(ndx) if len(clocs) == 15: d = emit(clocs) if d is not None: yield d with timeIt("Loading trajectories"): mxs = zip(*split_mx(df, unique_perts=True)) trajs = np.stack(mxs[0]) # trajs = trajs[0:1000] trajs_trn = trajs.copy() trajs_trn[:, 1:, :] = 0.0 if False: num_mostvaried = 1 # mostvaried=np.argsort(trajs[:,-1,:].var(axis=0))[-num_mostvaried:] # observable = trajs[:,:,mostvaried] observable = trajs - trajs.mean(axis=0)[None, :, :] print trajs.shape, observable.shape observable = trajs # observable = np.stack(mxs[1])*1000 v = (observable - observable.mean(axis=0)[None, :, :]) ** 2 print "Mean error for avg : %0.7f" % v.mean() print "Mean error for avg (last10%%) : %0.7f" % v[int(len(v) * 0.9) :].mean() print "Mean error for avg (frst10%%) : %0.7f" % v[: int(len(v) * 0.1)].mean() return trajs, trajs_trn, observable