def runSimpleModelWithParams(model_params,data):

	
	#model_params
	BATCH_SIZE = model_params["batchSize"]
	np.random.seed(RANDOM_SEED)
	NUM_EPOCHS = model_params['num_epochs']
	#data
	AP_train,TRP_train = data[0]
	AP_dev,TRP_dev = data[1]

	#Check direction
	if (model_params["DirectionForward"]):
		X_train,Y_train,X_dev,Y_dev = TRP_train,AP_train,TRP_dev,AP_dev
	else:
		X_train,Y_train,X_dev,Y_dev = AP_train,TRP_train,AP_dev,TRP_dev
		model_params["OutputNames"],model_params["InputNames"] = model_params["InputNames"],model_params["OutputNames"]


	model = customModels.SimpleNNMultiOutputModel(model_params)

	if model_params["scaling"]:
		X_train,X_dev,_ = prep.scaleData(X_train,X_dev)


	metrics_vect = {}
	for k in (model_params["OutputNames"]):
		if "ph" in k:
			metrics_vect[k] = [atan_mse] + ["mse"] + [modulo_2pi_error] + [phase_mse]+ [smape]
		else:
			metrics_vect[k] =  ["mse"] + [smape] + [custom_loss]

	losses_vect = {}
	losses_weights = {}
	for k in (model_params["OutputNames"]):
		if "ph" in k:
			if model_params["phase_loss"] == "mse":
				losses_vect[k] = "mse"
			elif model_params["phase_loss"] == "phase_mse":
				losses_vect[k] = phase_mse
			elif model_params["phase_loss"] == "atan_mse":
				losses_vect[k] =  atan_mse
			else:
				raise NotImplementedError("not valid phase_loss")
			#losses_weights[k] = 1
		elif k in model_params["OutputNames"]:
			losses_vect[k] =  "mse"
			#losses_vect[k] =  custom_loss
			#losses_weights[k] = 1
		else:
			losses_vect[k] =  "mse"
			#losses_weights[k] = 1

	if model_params["use_weights"]:
		wights =  (Y_train.mean(axis = 0)).tolist()
	else:
		wights = [1]*len(model_params["OutputNames"])

	for i,k in enumerate(model_params["OutputNames"]):
		f = model_params["weights_factor"]
		w = wights[i]
		losses_weights[k] = (1/w**2)


	#losses_vect["phi_1"] = atan_mse
	#losses_vect["phi_2"] = atan_mse
	#losses_vect["phi_3"] = phase_mse



	model.compile(
	 optimizer = "adam",
	 loss = losses_vect,
	 loss_weights = losses_weights,
	 metrics = metrics_vect
	)

	#CALBACKS SETUP
	print("Local Training Params:")
	customUtils.print_dic_params(model_params,True,delimitor = "_",kvdelimitor = "_" )
	print("="*50)


	callback_list = customCallbacks.addCallBacks(model_params)

	results = model.fit(
	 X_train,
	 dict(zip(model_params["OutputNames"],[row for row in Y_train.T])),
	 epochs = NUM_EPOCHS ,
	 batch_size = model_params["batchSize"],
	 validation_data = (
	 X_dev, 
	 dict(zip(model_params["OutputNames"],[row for row in Y_dev.T]))
	 ),
	 verbose =0,
	 callbacks = callback_list,
	 shuffle = True
	)

	#run eval:
	model_location = os.path.join('models',model_params["model_name"] +  '.hdf5')
	with open(os.path.join('model_params',model_params["model_name"] +  '.json'), 'w') as fp:
		json.dump(model_params, fp, sort_keys=True)


	#eval_ensemble.run_ensemble_eval(models_locations = [model_location])
	#pdb.set_trace()


	mse_total_train = run_eval_base(model_location,dataset = "train",email = model_params["email"])
	mse_total_dev = run_eval_base(model_location,dataset = "dev",email = model_params["email"])
	mse_total_test = run_eval_base(model_location,dataset = "test",email = model_params["email"])

	return (
		mse_total_train[0:len(model_params["OutputNames"])],
		mse_total_dev[0:len(model_params["OutputNames"])],
		mse_total_test[0:len(model_params["OutputNames"])]
		)
def runCustomAutpEncoderWithParams(model_params,data):


	#model_params
	np.random.seed(RANDOM_SEED)
	NUM_EPOCHS = model_params['num_epochs']
	#data
	AP_train,TRP_train = data[0]
	AP_dev,TRP_dev = data[1]

	#Check direction
	if (model_params["DirectionForward"]):
		X_train,Y_train,X_dev,Y_dev = TRP_train,AP_train,TRP_dev,AP_dev
	else:
		X_train,Y_train,X_dev,Y_dev = AP_train,TRP_train,AP_dev,TRP_dev
		model_params["OutputNames"],model_params["InputNames"] = model_params["InputNames"],model_params["OutputNames"]

	if model_params["scaling"]:
		X_train,X_dev,_ = prep.scaleData(X_train,X_dev)


	model_params["InverseModelLocation"]  = "models/Forward_BaseLine_MSE_run_0_time_2018_11_07_00_01_09.hdf5" # mse based
	#model_params["InverseModelLocation"] = "models/Fowrard_BaseLine_run_0_time_2018_11_05_19_05_19.hdf5" # atanmse based
	model_params["ForwardModelLocation"] = os.path.join('models',model_params["model_name"] + "forward" +  '.hdf5')

	model_location = os.path.join('models',model_params["model_name"] +  '.hdf5')
	weights_location = os.path.join('weights',model_params["model_name"] +  '.h5')

	#model_forward = customModels.SimpleNNMultiOutputModelConcat(model_params)

	model_forward = customModels.DistNNMultiOutputModelCustom(model_params)


	#pdb.set_trace()





	model_reverse = run_eval.loadBestModel(model_params["InverseModelLocation"])
	model_reverse.trainable = False
	#model_reverse.compile(optimizer = "adam", loss = ["mse","mse","mse",atan_mse,atan_mse,phase_mse])

	inputs = model_forward.input
	outputs = model_forward.outputs + model_reverse(model_forward(model_forward.inputs))
	

	combined_model = keras.models.Model(inputs = inputs  , outputs = outputs )
	#combined_model = keras.models.Model(inputs = model_forward.inputs  , outputs = model_forward.outputs + model_reverse(model_forward.outputs) )

	keras.utils.plot_model(combined_model,to_file=os.path.join('images',model_params["model_name"] +  '.png'),show_shapes=True)
	#email_logger.send_email(text_mesage = "Test", title_text = "ModelImage",image_path = 'demo.png')

	#pdb.set_trace()



	model_forward.save(model_params["ForwardModelLocation"])




	"""
	combined_model.compile(
	 optimizer = keras.optimizers.adam(),
	 loss = ["mse","mse","mse","mse","mse","mse",atan_mse,atan_mse,phase_mse],
	 loss_weights = [10,10,10,1,1,1,10,10,10],
	 #metrics = "mse"
	)
	"""

	combined_model.compile(
	 #optimizer = keras.optimizers.Adadelta(lr = 1.0),
	 optimizer = keras.optimizers.RMSprop(lr = 0.001),
	 #optimizer = keras.optimizers.Nadam(),
	 #optimizer = keras.optimizers.Adam(lr=0.001),
	 loss = [custom_loss,"mse","mse","mse","mse","mse","mse"],
	 #custom loss - zero
	 loss_weights = [1,0.1,0.1,0.1,0.3,0.3,0.3],
	 metrics = {"out_TRP":[custom_metroc_TRP,custom_loss_TRP]}
	)
	combined_model.save(model_location)



	#CALBACKS SETUP
	print("Local Training Params:")
	customUtils.print_dic_params(model_params,False	,delimitor = "_",kvdelimitor = "_" )
	print("="*50)



	callback_list = customCallbacks.addCallBacks(model_params)


	
	input_train = [X_train]
	output_train = [Y_train,X_train[:,0],X_train[:,1],X_train[:,2],X_train[:,3],X_train[:,4],X_train[:,5]]
	input_dev = [X_dev]
	output_dev = [Y_dev,X_dev[:,0],X_dev[:,1],X_dev[:,2],X_dev[:,3],X_dev[:,4],X_dev[:,5]]
	


	"""
	input_train = [X_train]
	output_train = [Y_train[:,0],Y_train[:,1],Y_train[:,2],X_train[:,0],X_train[:,1],X_train[:,2],X_train[:,3],X_train[:,4],X_train[:,5]]
	input_dev = [X_dev]
	output_dev = [Y_dev[:,0],Y_dev[:,1],Y_dev[:,2],X_dev[:,0],X_dev[:,1],X_dev[:,2],X_dev[:,3],X_dev[:,4],X_dev[:,5]]
	"""

	"""
	input_train = [X_train]
	output_train = [X_train[:,0],X_train[:,1],X_train[:,2],X_train[:,3],X_train[:,4],X_train[:,5]]
	input_dev = [X_dev]
	output_dev = [X_dev[:,0],X_dev[:,1],X_dev[:,2],X_dev[:,3],X_dev[:,4],X_dev[:,5]]
	"""


	results = combined_model.fit(
	 input_train,output_train,
	 epochs = NUM_EPOCHS ,
	 batch_size = model_params["batchSize"],
	 validation_data = (input_dev,output_dev),
	 verbose =2,
	 #callbacks = [customCallbacks.CustomSaveModel(model_params = model_params,model = combined_model)],
	 callbacks = callback_list,
	 shuffle = True
	)
	
	




	print("*"*50)
	print("Finished Training - Saving Models")
	print("*"*50)


	with open(os.path.join('model_params',model_params["model_name"] +  '.json'), 'w') as fp:
		json.dump(model_params, fp, sort_keys=True)

	mse_total_dev = run_eval_base(model_location,dataset = "dev",email = model_params["email"])
	mse_total_train = run_eval_base(model_location,dataset = "train",email = model_params["email"])
	mse_total_test = run_eval_base(model_location,dataset = "test",email = model_params["email"])


	pdb.set_trace()

	return (
		mse_total_train[0:len(model_params["OutputNames"])],
		mse_total_dev[0:len(model_params["OutputNames"])],
		mse_total_test[0:len(model_params["OutputNames"])]
		)
def run_ensemble_eval(models_locations=None):

    mse_total_list = []
    smaple_total_list = []
    rmse_total_list = []

    for dataset in ["train", "dev", "test"]:

        Y_pred_list = []
        #Load Data
        Y_true_dev = 0

        for ix, model_location in enumerate(models_locations):

            best_model = run_eval.loadBestModel(model_location)

            path, model_name_with_ext = os.path.split(model_location)
            model_name, file_extension = os.path.splitext(model_name_with_ext)

            model_params_loc = os.path.join('model_params',
                                            model_name + ".json")

            with open(model_params_loc) as jf:
                model_params = json.load(jf)

            X_train, Y_train, X_dev, Y_dev = run_eval.loadData(model_params,
                                                               dataset=dataset)
            Y_true_dev = np.transpose(Y_dev)

            if model_params["scaling"]:
                X_train, X_dev, scaler = prep.scaleData(X_train, X_dev)

            Y_pred_dev = best_model.predict(X_dev)

            output = np.zeros(
                (Y_pred_dev[0].shape[0], len(model_params["OutputNames"])))
            for i in range(len(model_params["OutputNames"])):
                temp = Y_pred_dev[i]
                output[:, i] = temp[:, 0]

            Y_pred_dev = np.transpose(output)

            #print mse_models

            mse = ((Y_pred_dev - Y_true_dev)**2).mean(axis=-1)
            mape = 100 * (np.abs(Y_pred_dev - Y_true_dev) /
                          (Y_true_dev + EPSILON)).mean(axis=-1)
            smape = 2 * 100 * (np.abs(Y_pred_dev - Y_true_dev) /
                               np.abs(Y_true_dev + Y_pred_dev + EPSILON)).mean(
                                   axis=-1)
            rmse = (np.sqrt((Y_pred_dev - Y_true_dev)**2)).mean(axis=-1)

            non_valid_p_percent = 100 * np.where(Y_pred_dev[1, :] > Y_pred_dev[
                2, :])[0].shape[0] / Y_pred_dev[1, :].shape[0]

            #Print Errors

            print("Dataset: ", dataset)
            print("Model: ", ix, ":", model_location)
            print("non Valid P: ", non_valid_p_percent)
            print("MSE: ", mse)
            print("SMAPE: ", smape)
            #print("RMSE: ", rmse)

            Y_pred_list.append(Y_pred_dev)

        #pdb.set_trace()

        #?,3

        if (len(models_locations) > 1):

            Y_pred_enseble = sum(Y_pred_list) / len(Y_pred_list)

            mse = ((Y_pred_enseble - Y_true_dev)**2).mean(axis=-1)
            smape = 2 * 100 * (
                np.abs(Y_pred_enseble - Y_true_dev) /
                np.abs(Y_true_dev + Y_pred_enseble + EPSILON)).mean(axis=-1)
            rmse = (np.sqrt((Y_pred_enseble - Y_true_dev)**2)).mean(axis=-1)
            mse_total_list.append(mse)
            smaple_total_list.append(smape)
            rmse_total_list.append(rmse)
            print("Dataset: ", dataset)
            print("Model: Ensamble")
            print("non Valid P: ", non_valid_p_percent)
            print("MSE: ", mse)
            print("SMAPE: ", smape)
def runDistModelWithParams(model_params,data):


	#model_params
	np.random.seed(RANDOM_SEED)
	NUM_EPOCHS = model_params['num_epochs']
	#data
	AP_train,TRP_train = data[0]
	AP_dev,TRP_dev = data[1]

	#Check direction
	if (model_params["DirectionForward"]):
		X_train,Y_train,X_dev,Y_dev = TRP_train,AP_train,TRP_dev,AP_dev
	else:
		X_train,Y_train,X_dev,Y_dev = AP_train,TRP_train,AP_dev,TRP_dev
		model_params["OutputNames"],model_params["InputNames"] = model_params["InputNames"],model_params["OutputNames"]

	if model_params["scaling"]:
		X_train,X_dev,_ = prep.scaleData(X_train,X_dev)


	model = customModels.DistNNMultiOutputModel(model_params)

	metrics_vect = {}
	for k in (model_params["OutputNames"]):
		if "ph" in k:
			metrics_vect[k] = [atan_mse] + ["mse"] + [modulo_2pi_error] + [mean_squared_error_mu] + [smape]
		elif k in ["T","R","P"]:
			metrics_vect[k] =  [nll] + [log_likelihood_normal_cost] + [mean_squared_error_mu] +[acc_dist] + [cv_test] + ["mse"] + [smape]
		else:
			metrics_vect[k] =  ["mse"] + [mean_squared_error_mu] + [smape]

	losses_vect = {}
	losses_weights = {}

	for k in (model_params["OutputNames"]):
		if "ph" in k:
			losses_vect[k] = "mse"
			#losses_weights[k] = 1
		elif k in ["T","R","P"]:
			losses_vect[k] =  log_likelihood_normal_cost
			#losses_vect[k] =  custom_loss
			#losses_weights[k] = 1
		else:
			losses_vect[k] =  "mse"
			#losses_weights[k] = 1


	if model_params["use_weights"]:
		wights =  (Y_train.mean(axis = 0)).tolist()
	else:
		wights = [1]*len(model_params["OutputNames"])


	for i,k in enumerate(model_params["OutputNames"]):
		f = model_params["weights_factor"]
		w = wights[i]
		losses_weights[k] = w  #0.4/0.12/0.4





	model.compile(
	 optimizer = "adam",
	 loss = losses_vect,
	 loss_weights = losses_weights,
	 metrics = metrics_vect
	)

	#CALBACKS SETUP
	print("Local Training Params:")
	customUtils.print_dic_params(model_params,True,delimitor = "_",kvdelimitor = "_" )
	print("="*50)



	callback_list = customCallbacks.addCallBacks(model_params)


	results = model.fit(
	 X_train,
	 dict(zip(model_params["OutputNames"],[np.stack((row,row), axis = 0).T for row in Y_train.T])),
	 epochs = NUM_EPOCHS ,
	 batch_size = model_params["batchSize"],
	 validation_data = (
	 X_dev, 
	 dict(zip(model_params["OutputNames"],[np.stack((row,row), axis = 0).T for row in Y_dev.T])),
	 ),
	 verbose =0,
	 callbacks = callback_list,
	 shuffle = True
	)

	


	model_location = os.path.join('models',model_params["model_name"] +  '.hdf5')
	with open(os.path.join('model_params',model_params["model_name"] +  '.json'), 'w') as fp:
		json.dump(model_params, fp, sort_keys=True)


	mse_total_train = run_eval_base(model_location,dataset = "train",email = model_params["email"])
	mse_total_dev = run_eval_base(model_location,dataset = "dev",email = model_params["email"])
	mse_total_test = run_eval_base(model_location,dataset = "test",email = model_params["email"])

	return (
		mse_total_train[0:len(model_params["OutputNames"])],
		mse_total_dev[0:len(model_params["OutputNames"])],
		mse_total_test[0:len(model_params["OutputNames"])]
		)
def run_ensemble_eval_reverse_model(models_locations=None):

    #LOAD REvERSE MODEL
    reverse_model_loc = "models/Fowrard_BaseLine_run_0_time_2018_11_05_19_05_19.hdf5"
    best_reverse_model = run_eval.loadBestModel(reverse_model_loc)
    path, model_name_with_ext = os.path.split(reverse_model_loc)
    model_name, file_extension = os.path.splitext(model_name_with_ext)
    model_params_loc = os.path.join('model_params', model_name + ".json")
    with open(model_params_loc) as jf:
        reverse_model_params = json.load(jf)

    #for dataset in ["train","dev","test"]:
    for dataset in ["dev"]:

        Y_pred_list = []
        #Load Data
        Y_true_dev = 0

        for ix, model_location in enumerate(models_locations):

            best_model = run_eval.loadBestModel(model_location)

            path, model_name_with_ext = os.path.split(model_location)
            model_name, file_extension = os.path.splitext(model_name_with_ext)

            model_params_loc = os.path.join('model_params',
                                            model_name + ".json")

            with open(model_params_loc) as jf:
                model_params = json.load(jf)

            X_train, Y_train, X_dev, Y_dev = run_eval.loadData(model_params,
                                                               dataset=dataset)

            if model_params["scaling"]:
                X_train, X_dev, scaler = prep.scaleData(X_train, X_dev)

            Y_pred_dev = best_model.predict(X_dev)

            output = np.zeros(
                (Y_pred_dev[0].shape[0], len(model_params["OutputNames"])))
            for i in range(len(model_params["OutputNames"])):
                temp = Y_pred_dev[i]
                output[:, i] = temp[:, 0]

            Y_pred_dev = output

            ## Add reverse Stuff
            if reverse_model_params["scaling"]:
                _, Y_dev, scaler_reverse = prep.scaleData(Y_train, Y_dev)
                _, Y_pred_dev, scaler_reverse = prep.scaleData(
                    Y_train, Y_pred_dev)

            X_from_true = best_reverse_model.predict(Y_dev)
            X_from_pred = best_reverse_model.predict(Y_pred_dev)

            #pdb.set_trace()

            output1 = np.zeros((X_from_true[0].shape[0],
                                len(reverse_model_params["OutputNames"])))
            output2 = np.zeros((X_from_true[0].shape[0],
                                len(reverse_model_params["OutputNames"])))
            for i in range(len(reverse_model_params["OutputNames"])):
                temp1 = X_from_true[i]
                temp2 = X_from_pred[i]
                output1[:, i] = temp1[:, 0]
                output2[:, i] = temp2[:, 0]

            X_from_true = output1
            X_from_pred = output2

            #pdb.set_trace()

            ##NEED TO ADD CONVERT TO NUMPY

            angle = np.arctan2(
                np.sin(X_from_true[:, 3:6] - X_from_pred[:, 3:6]),
                np.cos(X_from_true[:, 3:6] - X_from_pred[:, 3:6]))
            mae_angs = np.abs(angle).mean(axis=0)

            mse_amp = ((X_from_pred[:, 0:3] -
                        X_from_true[:, 0:3])**2).mean(axis=0)

            errors = np.sqrt(mse_amp).tolist() + mae_angs.tolist()

            print("Dataset: ", dataset)
            print("Model: ", ix, ":", model_location)
            print("Errors: ", errors)

        if (len(models_locations) > 1):

            print("Dataset: ", dataset)
            print("Model: Ensamble")