def MLP_reg_leave_one_out(x, t, T=50000, n1=3, n2=1, ita=0.3):
    N = len(x)
    vals = [None] * N
    for i in range(N):
        traind = np.array(list(x[:i]) + list(x[i + 1 :]))
        testd = np.array(x[i])
        traint = np.array(t[:i] + t[i + 1 :])

        wih, who = trainMLP(x=traind, t=traint, T=T, n1=n1, n2=n2, ita=ita)
        val = MLP_reg(testd, wih, who)
        vals[i] = val
    return vals
def MLP_label_leave_one_out(x, t, T=50000, n1=3, n2=1, ita=0.3, th=0.5):
    tp = 0
    fp = 0
    tn = 0
    fn = 0
    N = len(x)
    labels = [None] * N
    for i in range(N):
        print("Training MLP model #:", i + 1)
        traind = np.array(list(x[:i]) + list(x[i + 1 :]))
        testd = np.array(x[i])
        traint = np.array(t[:i] + t[i + 1 :])
        testt = t[i]

        wih, who = trainMLP(x=traind, t=traint, T=T, n1=n1, n2=n2, ita=ita)
        testlabel = MLP_getLabel(testd, wih, who, th)
        labels[i] = testlabel
        if testlabel == 1:
            if testt[0] == 1:
                tp += 1
            else:
                fp += 1
        elif testlabel == 0:
            if testt[0] == 0:
                tn += 1
            else:
                fn += 1

    try:
        acc = (tp + tn) / N
        precision = tp / (tp + fp)
        recall = tp / (tp + fn)
        sensitivity = recall
        specificity = tn / (tn + fp)
        f1_measure = (2 * (precision * recall)) / (precision + recall)
        conf_matrix = np.matrix([[tn, fp], [fn, tp]])
        return labels, acc, precision, recall, sensitivity, specificity, f1_measure, conf_matrix
    except ZeroDivisionError:
        print("One of the evaluation metrics is 0")
def UA_leave_one_out(path,features1,features2,T=50000,k=1,ita=0.3,th=0.5):
    tp = 0
    fp = 0
    tn = 0
    fn = 0
    xMLP,t = import_data(path,features1)
    xKNN,t = import_data(path,features2)
    
    N = len(xMLP)
    m = len(xMLP[0])
    labels = [None]*N
    
    for i in range(N):
        print("Training MLP model #:",i+1)
        traind = np.array(list(xMLP[:i]) + list(xMLP[i+1:]))
        traint = np.array(t[:i] + t[i+1:])
        testt = t[i]
        
        #train on training data
        wih,who = trainMLP(x=traind, t=traint, T=T, n1=3, n2=1, ita=ita)
        
        #get values for entire data
        valsMLP=[None]*N
        for i in range(N):
            valsMLP[i] = MLP_reg(testd=np.array(xMLP[i]), wih=wih, who=who, n1=3, n2=1)
        
        #insert absolute errors as feature
        err = abs(np.array(valsMLP)-t[:][0])
        
        xKNN = np.insert(arr=xKNN, obj=m-1, values=err, axis=1)
        traind = np.array(list(xKNN[:i]) + list(xKNN[i+1:]))
        traint = np.array(t[:i] + t[i+1:])
        
        #make tree with training data
        kd_tree = getKDTree(x=traind, t=traint)
        valsKNN=[None]*N
        for i in range(N):
            valsKNN[i] = KNN_reg(testd=np.array(xKNN[i]), kd_tree=kd_tree, k=k)
            
        #inputs for MLP2
        x2 = np.ndarray([N,2]) 
        x2[:,0] = valsMLP
        x2[:,1] = valsKNN
        
        traind = np.array(list(x2[:i]) + list(x2[i+1:]))
        traint = np.array(t[:i] + t[i+1:])
        testd=np.array(x2[i])
        
        #train on training data
        wih,who = trainMLP(x=traind, t=traint, T=T, n1=1, n2=1, ita=ita)
        
        #get value for test point
        label = MLP_getLabel(testd, wih=wih, who=who, th=th, n1=1, n2=1)
        labels[i] = label
        
        if(label==1):
            if(testt[0] == 1):
                tp+=1
            else:
                fp+=1
        elif(label==0):
            if(testt[0] == 0):
                tn+=1
            else:
                fn+=1
    
    try:
        acc = (tp+tn)/N
        precision = tp/(tp+fp)
        recall = tp/(tp+fn)
        sensitivity = recall
        specificity = tn/(tn+fp)
        f1_measure = (2*(precision*recall))/(precision+recall)
        conf_matrix = np.matrix([[tn,fp],[fn,tp]])
        return labels,acc,precision,recall,sensitivity,specificity,f1_measure,conf_matrix
    except ZeroDivisionError:
        print("One of the evaluation metrics is 0")
    def train(self, X, Y):

        time_start = time.time()

        # ============ Compute reservoir states ============
        res_states = self._reservoir.get_states(X,
                                                n_drop=self.n_drop,
                                                bidir=self.bidir)

        # ============ Dimensionality reduction of the reservoir states ============
        if self.dimred_method.lower() == 'pca':
            # matricize
            N_samples = res_states.shape[0]
            res_states = res_states.reshape(-1, res_states.shape[2])
            # ..transform..
            red_states = self._dim_red.fit_transform(res_states)
            # ..and put back in tensor form
            red_states = red_states.reshape(N_samples, -1, red_states.shape[1])
        elif self.dimred_method.lower() == 'tenpca':
            red_states = self._dim_red.fit_transform(res_states)
        else:  # Skip dimensionality reduction
            red_states = res_states

        # ============ Generate representation of the MTS ============
        coeff_tr = []
        biases_tr = []

        # Output model space representation
        if self.mts_rep == 'output':
            if self.bidir:
                X = np.concatenate((X, X[:, ::-1, :]), axis=2)

            for i in range(X.shape[0]):
                self._ridge_embedding.fit(red_states[i, 0:-1, :],
                                          X[i, self.n_drop + 1:, :])
                coeff_tr.append(self._ridge_embedding.coef_.ravel())
                biases_tr.append(self._ridge_embedding.intercept_.ravel())
            input_repr = np.concatenate(
                (np.vstack(coeff_tr), np.vstack(biases_tr)), axis=1)

        # Reservoir model space representation
        elif self.mts_rep == 'reservoir':
            for i in range(X.shape[0]):
                self._ridge_embedding.fit(red_states[i, 0:-1, :],
                                          red_states[i, 1:, :])
                coeff_tr.append(self._ridge_embedding.coef_.ravel())
                biases_tr.append(self._ridge_embedding.intercept_.ravel())
            input_repr = np.concatenate(
                (np.vstack(coeff_tr), np.vstack(biases_tr)), axis=1)

        # Last state representation
        elif self.mts_rep == 'last':
            input_repr = red_states[:, -1, :]

        # Mean state representation
        elif self.mts_rep == 'mean':
            input_repr = np.mean(red_states, axis=1)

        else:
            raise RuntimeError('Invalid representation ID')

        # ============ Apply readout ============
        if self.readout_type == 'lin':  # Ridge regression
            self.readout.fit(input_repr, Y)

        elif self.readout_type == 'svm':  # SVM readout
            Ktr = squareform(pdist(input_repr, metric='sqeuclidean'))
            Ktr = np.exp(-self.svm_gamma * Ktr)
            self.readout.fit(Ktr, np.argmax(Y, axis=1))
            self.input_repr_tr = input_repr  # store them to build test kernel

        elif self.readout_type == 'mlp':  # MLP (deep readout)

            g = make_MLPgraph(input_dim=input_repr.shape[1],
                              output_dim=Y.shape[1],
                              mlp_layout=self.mlp_layout,
                              nonlinearity=self.nonlinearity,
                              init='he',
                              learning_rate=0.001,
                              w_l2=self.w_l2,
                              max_gradient_norm=1.0,
                              seed=self.seed)

            trainMLP(input_repr,
                     Y,
                     input_repr,
                     Y,
                     batch_size=25,
                     num_epochs=self.num_epochs,
                     dropout_prob=self.p_drop,
                     save_id='default',
                     input_graph=g)

        tot_time = (time.time() - time_start) / 60
        return tot_time