Exemplo n.º 1
0
def u_next_lax_wendroff(u_last, u_halfstep, delta_t, delta_x, j, time,
                        position):
    u_halfstep[j] = u_next_half_step(u_last, delta_t, delta_x, j, time,
                                     position)
    return u_last[j] - delta_t/delta_x*(func.f2(u_halfstep[j])-func.f2(u_halfstep[j-1])) \
           + delta_t*func.g2(u_halfstep, delta_x, j)\
           +(delta_t/2)*(func.s(time, position, u_halfstep, j)+func.s(time, position, u_halfstep, j-1))
Exemplo n.º 2
0
 def evaluate(self, numFuncion):
     if numFuncion == 1:
         for agent in self.agents:
             f1(agent)
     elif numFuncion == 2:
         for agent in self.agents:
             f2(agent)
     else:
         for agent in self.agents:
             f3(agent)
Exemplo n.º 3
0
def showConstant():
    print "Adding Constant"
    var = Tk.BooleanVar()
    x = func.T2()
    y = func.f2(func.T2())
    constant = Tk.Checkbutton(root,
                              text="Constant",
                              variable=var,
                              command=lambda: plot(var, x, y, "Constant"))
    constant.pack(side=Tk.TOP, anchor=Tk.W)
Exemplo n.º 4
0
def main():
    x = [[None], [None]]
    print("Решение системы при аналитическом методе: ")
    x = newton1(1, 1, 20, 1e-9, 1e-9)
    print("\tx0 =", x)
    print("Значение 1-ой функции в данной точке: ",
          format(f1(x[0], x[1]), '.9f'))
    print("Значение 2-ой функции в данной точке: ",
          format(f2(x[0], x[1]), '.9f'))
    print("\nРешение системы при численном методе: ")
    xM1 = newton2(1, 1, 20, 1e-9, 1e-9, 0.01)
    xM2 = newton2(1, 1, 20, 1e-9, 1e-9, 0.05)
    xM3 = newton2(1, 1, 20, 1e-9, 1e-9, 0.1)
    print("M = 0.01\tx0 =", xM1)
    print("M = 0.05\tx0 =", xM2)
    print("M = 0.10\tx0 =", xM3)
    print("Значение 1-ой функции в данной точке: ",
          format(f1(xM1[0], xM1[1]), '.9f'))
    print("Значение 2-ой функции в данной точке: ",
          format(f2(xM1[0], xM1[1]), '.9f'))
Exemplo n.º 5
0
 def test_plot_data(self):
     plotter = LatticePlot("./test_data/test_plot_data.pdf")
     # create the plot interval
     x = np.linspace(0., 24., 24)
     # some sample arguments, similar to the pion on A40.24
     args = [100., 0.14463]
     # time extent similar to A40.24
     add = [48.]
     # generate data
     y = f2(args, x, add)
     # error of 1%
     dy = y * 0.01
     # plot data
     plotter.plot_data(x, y, dy, "data")
     plotter.save()
Exemplo n.º 6
0
def compute(fname, fpath, winsize=30):
    df = pd.read_csv(fpath + fname,
                     sep=',', header=0, parse_dates=[0], dayfirst=True,
                     index_col=0, skiprows=[0, 1, 2, 4])

    df1 = df
    outs_ratio = 0.005  # this is input param

    # xstart, xend = 0, 500  # xstart, xend = 1, 20 # this is for testing
    # df1 = (df.ix[xstart:xend, 1:5])


    """ calculate returns
        NOTE: make sure the last day is the first row in the input dataset is sorted"""

    df_in = df1.sort_index()
    df3 = df_in.copy()
    df3 = (df3 / df3.shift(1)) - 1

    df_in = df_in.round(4)
    df3 = df3.round(4)  # round dataframes to 5 decimal

    df_no_outs = df3.copy()
    df_with_outs = df3.copy()

    df_outs_ind = df_in.copy()
    df_outs_ind[pd.notnull(df_outs_ind)] = 0
    df_outs_ind[df_outs_ind.isnull()] = 0

    # set outlier index for all prediction methods to 0
    CAD_mean_pred_outs_inds = df_outs_ind.copy()
    CAD_median_pred_outs_inds = df_outs_ind.copy()
    CAD_mode_pred_outs_inds = df_outs_ind.copy()
    CAD_max_prob_pred_outs_inds = df_outs_ind.copy()
    df_knn_inds = df_outs_ind.copy()
    df_arima_inds = df_outs_ind.copy()

    win_size = winsize  # this is set by the input to the algo (very early tests was using winsize 9)

    n_chops = int(len(df_no_outs.index) / win_size)

    df4 = df3.iloc[:-win_size, :]
    lst_sub_df_starts = np.random.choice(df4.index, n_chops)

    for sub in lst_sub_df_starts:
        df_sub_start = np.where(df3.index == sub)[0][0]  # we asume the index is unique (i.e. no duplicate datetime)
        df_sub_end = df_sub_start + win_size
        df_sub_in = df_no_outs.iloc[df_sub_start:df_sub_end, :].copy()

        df_sub_out, df_outs_ind = fn.replace_outs2(df_sub_in, df_outs_ind, outs_ratio)
        # print("number of outliers injected: ",df_outs_ind.sum(axis=1).sum())

        for inx in df_sub_out.index:
            df_with_outs.loc[inx] = df_sub_out.loc[inx]


    # store and retrieve the object inout data with artificial outliers
    """import pickle
    data1 = df_with_outs, df_outs_ind
    output = open('data.pkl', 'wb')
    pickle.dump(data1, output)
    pkl_file = open('data.pkl', 'rb')
    data1 = pickle.load(pkl_file)"""


    # CAD_mean_pred_outs_inds = df_in.copy() # we set the indexes for all prediction algs in the begining of the program
    # CAD_mean_pred_outs_inds[pd.notnull(CAD_mean_pred_outs_inds)] = 0

    strt = 6  # start from the 4th row (i.e. 6-2) row of the input dataframe

    while strt < len(df3.index) - win_size:
        strt -= 2
        # call CAD prediction for current df
        df_sub = df3.iloc[strt: strt + win_size, :]
        CAD_mean_preds_inds = fn.CAD_pred(df_sub, centroid_modes['mean'])[0]
        CAD_median_preds_inds = fn.CAD_pred(df_sub, centroid_modes['median'])[0]
        CAD_mode_preds_inds = fn.CAD_pred(df_sub, centroid_modes['mode'])[0]
        CAD_max_prob_preds_inds = fn.CAD_pred(df_sub, centroid_modes['max_prob'])[0]

        # update df_inds
        for inx in CAD_mean_preds_inds.index:
            CAD_mean_pred_outs_inds.loc[inx] = CAD_mean_preds_inds.loc[inx]

        #k for inx in CAD_median_preds_inds.index:
            CAD_median_pred_outs_inds.loc[inx] = CAD_median_preds_inds.loc[inx]

        #k for inx in CAD_mode_preds_inds.index:
            CAD_mode_pred_outs_inds.loc[inx] = CAD_mode_preds_inds.loc[inx]

        #k for inx in CAD_max_prob_preds_inds.index:
            CAD_max_prob_pred_outs_inds.loc[inx] = CAD_max_prob_preds_inds.loc[inx]


        # call kNN and Random Walk for current df
        df_knn_inds = fn.knn_preds(df_sub, df_knn_inds, k=4)
        df_arima_inds = fn.arima_pred2(df_sub, df_arima_inds)

        # print("at indx = {0} date = {1} out of {2}".format(strt, df3.iloc[strt].index, len(df3.index)/win_size))
        # print("at indx = ", strt)
        strt += win_size

    CAD_mean_prec, CAD_mean_rec = fn.get_fmeasure(df_outs_ind, CAD_mean_pred_outs_inds)
    CAD_mean_f2 = fn.f2(CAD_mean_prec, CAD_mean_rec)

    CAD_median_prec, CAD_median_rec = fn.get_fmeasure(df_outs_ind, CAD_median_pred_outs_inds)
    CAD_median_f2 = fn.f2(CAD_median_prec, CAD_median_rec)

    CAD_mode_prec, CAD_mode_rec = fn.get_fmeasure(df_outs_ind, CAD_mode_pred_outs_inds)
    CAD_mode_f2 = fn.f2(CAD_mode_prec, CAD_mode_rec)


    CAD_mode_prec, CAD_mode_rec = fn.get_fmeasure(df_outs_ind, CAD_mode_pred_outs_inds)
    CAD_mode_f2 = fn.f2(CAD_mode_prec, CAD_mode_rec)

    CAD_max_prob_prec, CAD_max_prob_rec = fn.get_fmeasure(df_outs_ind, CAD_max_prob_pred_outs_inds)
    CAD_max_prob_f2 = fn.f2(CAD_max_prob_prec, CAD_max_prob_rec)

    knn_prec, knn_rec = fn.get_fmeasure(df_outs_ind, df_knn_inds)
    knn_f2 = fn.f2(knn_prec, knn_rec)

    arima_prec, arima_rec = fn.get_fmeasure(df_outs_ind, df_arima_inds)
    arima_f2 = fn.f2(arima_prec, arima_rec)

    # print("               precision\t\t\trecall\t\t\tF2") # this is for testing mode
    print("kg             {0}\t\t{1}\t\t{2}\n"
          "kNN            {3}\t\t{4}\t\t{5}\n"
          "RandomWalk     {6}\t\t{7}\t\t{8}\n".format(CAD_mean_prec, CAD_mean_rec, CAD_mean_f2,
                                                      knn_prec, knn_rec, knn_f2,
                                                      arima_prec, arima_rec, arima_f2))

    # print("kg             {0}\t\t{1}\t\t{2}\n".format(CAD_mode_prec, CAD_mode_rec, CAD_mode_f2)) # this is for testing mode
    print("kg             {0}\t\t{1}\t\t{2}\n".format(CAD_max_prob_prec, CAD_max_prob_rec, CAD_max_prob_f2))
Exemplo n.º 7
0
def compute(fname, fpath, winsize=30):
    df = pd.read_csv(fpath + fname,
                     sep=',',
                     header=0,
                     parse_dates=[0],
                     dayfirst=True,
                     index_col=0,
                     skiprows=[0, 1, 2, 4])

    df1 = df
    outs_ratio = 0.005  # this is input param

    # xstart, xend = 0, 500  # xstart, xend = 1, 20 # this is for testing
    # df1 = (df.ix[xstart:xend, 1:5])
    """ calculate returns
        NOTE: make sure the last day is the first row in the input dataset is sorted"""

    df_in = df1.sort_index()
    df3 = df_in.copy()
    df3 = (df3 / df3.shift(1)) - 1

    df_in = df_in.round(4)
    df3 = df3.round(4)  # round dataframes to 5 decimal

    df_no_outs = df3.copy()
    df_with_outs = df3.copy()

    df_outs_ind = df_in.copy()
    df_outs_ind[pd.notnull(df_outs_ind)] = 0
    df_outs_ind[df_outs_ind.isnull()] = 0

    # set outlier index for all prediction methods to 0
    CAD_mean_pred_outs_inds = df_outs_ind.copy()
    CAD_median_pred_outs_inds = df_outs_ind.copy()
    CAD_mode_pred_outs_inds = df_outs_ind.copy()
    CAD_max_prob_pred_outs_inds = df_outs_ind.copy()
    df_knn_inds = df_outs_ind.copy()
    df_arima_inds = df_outs_ind.copy()

    win_size = winsize  # this is set by the input to the algo (very early tests was using winsize 9)

    n_chops = int(len(df_no_outs.index) / win_size)

    df4 = df3.iloc[:-win_size, :]
    lst_sub_df_starts = np.random.choice(df4.index, n_chops)

    for sub in lst_sub_df_starts:
        df_sub_start = np.where(df3.index == sub)[0][
            0]  # we asume the index is unique (i.e. no duplicate datetime)
        df_sub_end = df_sub_start + win_size
        df_sub_in = df_no_outs.iloc[df_sub_start:df_sub_end, :].copy()

        df_sub_out, df_outs_ind = fn.replace_outs2(df_sub_in, df_outs_ind,
                                                   outs_ratio)
        # print("number of outliers injected: ",df_outs_ind.sum(axis=1).sum())

        for inx in df_sub_out.index:
            df_with_outs.loc[inx] = df_sub_out.loc[inx]

    # store and retrieve the object inout data with artificial outliers
    """import pickle
    data1 = df_with_outs, df_outs_ind
    output = open('data.pkl', 'wb')
    pickle.dump(data1, output)
    pkl_file = open('data.pkl', 'rb')
    data1 = pickle.load(pkl_file)"""

    # CAD_mean_pred_outs_inds = df_in.copy() # we set the indexes for all prediction algs in the begining of the program
    # CAD_mean_pred_outs_inds[pd.notnull(CAD_mean_pred_outs_inds)] = 0

    strt = 6  # start from the 4th row (i.e. 6-2) row of the input dataframe

    while strt < len(df3.index) - win_size:
        strt -= 2
        # call CAD prediction for current df
        df_sub = df3.iloc[strt:strt + win_size, :]
        CAD_mean_preds_inds = fn.CAD_pred(df_sub, centroid_modes['mean'])[0]
        CAD_median_preds_inds = fn.CAD_pred(df_sub,
                                            centroid_modes['median'])[0]
        CAD_mode_preds_inds = fn.CAD_pred(df_sub, centroid_modes['mode'])[0]
        CAD_max_prob_preds_inds = fn.CAD_pred(df_sub,
                                              centroid_modes['max_prob'])[0]

        # update df_inds
        for inx in CAD_mean_preds_inds.index:
            CAD_mean_pred_outs_inds.loc[inx] = CAD_mean_preds_inds.loc[inx]

            #k for inx in CAD_median_preds_inds.index:
            CAD_median_pred_outs_inds.loc[inx] = CAD_median_preds_inds.loc[inx]

            #k for inx in CAD_mode_preds_inds.index:
            CAD_mode_pred_outs_inds.loc[inx] = CAD_mode_preds_inds.loc[inx]

            #k for inx in CAD_max_prob_preds_inds.index:
            CAD_max_prob_pred_outs_inds.loc[inx] = CAD_max_prob_preds_inds.loc[
                inx]

        # call kNN and Random Walk for current df
        df_knn_inds = fn.knn_preds(df_sub, df_knn_inds, k=4)
        df_arima_inds = fn.arima_pred2(df_sub, df_arima_inds)

        # print("at indx = {0} date = {1} out of {2}".format(strt, df3.iloc[strt].index, len(df3.index)/win_size))
        # print("at indx = ", strt)
        strt += win_size

    CAD_mean_prec, CAD_mean_rec = fn.get_fmeasure(df_outs_ind,
                                                  CAD_mean_pred_outs_inds)
    CAD_mean_f2 = fn.f2(CAD_mean_prec, CAD_mean_rec)

    CAD_median_prec, CAD_median_rec = fn.get_fmeasure(
        df_outs_ind, CAD_median_pred_outs_inds)
    CAD_median_f2 = fn.f2(CAD_median_prec, CAD_median_rec)

    CAD_mode_prec, CAD_mode_rec = fn.get_fmeasure(df_outs_ind,
                                                  CAD_mode_pred_outs_inds)
    CAD_mode_f2 = fn.f2(CAD_mode_prec, CAD_mode_rec)

    CAD_mode_prec, CAD_mode_rec = fn.get_fmeasure(df_outs_ind,
                                                  CAD_mode_pred_outs_inds)
    CAD_mode_f2 = fn.f2(CAD_mode_prec, CAD_mode_rec)

    CAD_max_prob_prec, CAD_max_prob_rec = fn.get_fmeasure(
        df_outs_ind, CAD_max_prob_pred_outs_inds)
    CAD_max_prob_f2 = fn.f2(CAD_max_prob_prec, CAD_max_prob_rec)

    knn_prec, knn_rec = fn.get_fmeasure(df_outs_ind, df_knn_inds)
    knn_f2 = fn.f2(knn_prec, knn_rec)

    arima_prec, arima_rec = fn.get_fmeasure(df_outs_ind, df_arima_inds)
    arima_f2 = fn.f2(arima_prec, arima_rec)

    # print("               precision\t\t\trecall\t\t\tF2") # this is for testing mode
    print("kg             {0}\t\t{1}\t\t{2}\n"
          "kNN            {3}\t\t{4}\t\t{5}\n"
          "RandomWalk     {6}\t\t{7}\t\t{8}\n".format(
              CAD_mean_prec, CAD_mean_rec, CAD_mean_f2, knn_prec, knn_rec,
              knn_f2, arima_prec, arima_rec, arima_f2))

    # print("kg             {0}\t\t{1}\t\t{2}\n".format(CAD_mode_prec, CAD_mode_rec, CAD_mode_f2)) # this is for testing mode
    print("kg             {0}\t\t{1}\t\t{2}\n".format(CAD_max_prob_prec,
                                                      CAD_max_prob_rec,
                                                      CAD_max_prob_f2))
Exemplo n.º 8
0
def u_next_lax_friedrichs(u_last, delta_t, delta_x, j, time, position):
    return (u_last[j+1]+u_last[j-1])/2 - delta_t/(2*delta_x)*(func.f2(u_last[j+1])-func.f2(u_last[j-1])) \
           + delta_t*func.g2(u_last, delta_x, j)\
           + delta_t*func.s(time, position, u_last, j)
Exemplo n.º 9
0
def getNewY(x, y):
    return y - (f.f2(x, y) * f.f1_dx(x, y) - f.f1(x, y) * f.f2_dx(x, y)) / (
        f.f1_dx(x, y) * f.f2_dy(x, y) - f.f2_dx(x, y) * f.f1_dy(x, y))
Exemplo n.º 10
0
def getNewX(x, y):
    return x - (f.f1(x, y) * f.f2_dy(x, y) - f.f2(x, y) * f.f1_dy(x, y)) / (
        f.f1_dx(x, y) * f.f2_dy(x, y) - f.f2_dx(x, y) * f.f1_dy(x, y))
Exemplo n.º 11
0
while (1):
    print(
        "Press i to access function number i(eg:-1 for fun1)\nPress 6 to exit")
    c = int(input())
    if c == 1:
        print("Enter x and y:")
        x, y = input().split()
        x = int(x)
        y = int(y)
        print("Result=", f.f1(x, y))
    elif c == 2:
        print("Enter n and r:")
        n, r = input().split()
        n = int(n)
        r = int(r)
        print("Result=", f.f2(n, r))
    elif c == 3:
        n = int(input("Enter n:"))
        print("Result=", f.f3(n))
    elif c == 4:
        print("Enter m and n")
        m, n = input().split()
        m = int(m)
        n = int(n)
        print("Result=", f.f4(m, n))
    elif c == 5:
        print("Enter m and x")
        m, x = input().split()
        m = int(m)
        x = int(n)
        print("Result=", f.f5(m, x))
Exemplo n.º 12
0
def u_next_half_step(u_last, delta_t, delta_x, j, time, position):
    return (u_last[j+1] + u_last[j] - delta_t /delta_x*(func.f2(u_last[j+1]) - func.f2(u_last[j])) \
           + delta_t*func.g2(u_last, delta_x, j)\
           + delta_t/2*(func.s(time, position, u_last, j+1)\
           +func.s(time, position, u_last, j)))/2
def aggsub(x, prob, eps, mit, tau, sig1, delta, sig2, c1, c2, tlimit):
    """ Aggregate subgradient method for nonsmooth DC optimization.
    
    Input:
        x       - a starting point;
        prob    - selection of problem:
                0 - PWLRL1 problem
                1 - auxiliary min-problem
        eps     - optimality tolerance;
        mit     - maximum number of inner iterations;   
        tau     - proximity parameter, tau > 0;
        sig1    - decrease parameter for tau, sig1 in (0,1);
        delta   - inner iteration tolerance, delta > 0; 
        sig2    - decrease parameter for delta, sig2 in (0,1];
        c1,c2   - line search parameters, 0 < c2 <= c1 < 1; 
        tlimit  - time limit for aggsub.
        
    Global parameters:    
        a       - an input matrix;
        
    Output:
        x       - a solution obtained;
        f       - the objective value at x;   
        nit     - number of iterations;
        nf      - number of function evaluations;        
        ng      - number of subgradient evaluations;
    """

    # Import DC functions and their subgradients
    import functions
    #print(config.a)   # just testing
    # import time
    import time

    # Starting time for aggsub
    usedtime0 = time.clock()

    fdiff = -99.0
    # Initialization
    if prob == 0:
        f1 = functions.f1(x)
        f2 = functions.f2(x)
    else:
        f1 = functions.auxminf1(x)
        f2 = functions.auxminf2(x)

    fold = f1 - f2  # Original function value
    print "f original is", fold
    print "Computing..."
    nf = 1  # Number of function evaluations.
    ng = 0  # Number of subgradient evaluations.
    nit = 0  # Number of iterations.
    maxii = max(min(x.size, 500), 50)  # Maximum number of inner iterations.
    stopii = -1  # reason to stop the inner iteration:
    # stopii =-1 - not stopped yet;
    # stopii = 0 - small gradient: canditate solution;
    # stopii = 1 - decent direction found;
    # stopii = 2 - too many inner iterations without progress.
    small = 1e-5
    nrmnew = 1e+10
    ii = 0

    # Outer iteration
    while True:
        # Step 1
        nii = 0  # Number of inner iterations.
        dd = np.ones_like(x) / np.sqrt(
            x.size)  # Initialization of the direction dd.
        if prob == 0:
            f1 = functions.f1(x)  # Needed for correct confic tables.
            g2 = functions.df2(x)  # Subgradient of the DC component f2.
            f1 = functions.f1(x + tau * dd)  # test for bug fix
            g1 = functions.df1(x +
                               tau * dd)  # Subgradient of the DC component f1.
        else:
            f1 = functions.auxminf1(x)  # Needed for correct confic tables.
            g2 = functions.dauxminf2(x)  # Subgradient of the DC component f2.
            f1 = functions.auxminf1(x + tau * dd)  # test for bug fix
            g1 = functions.dauxminf1(
                x + tau * dd)  # Subgradient of the DC component f1.

        # No need to recompute g2 if returning from Step 6.
        ng += 1
        sg = g1 - g2  # Approx. subgradient of the function.
        asg = sg  # Aggregate subgradient.
        nrmasg = 0.0  # Norm of the aggregate subgradient.

        # Inner iteration
        while True:
            nii += 1

            # Step 2
            sgdiff = np.dot(sg - asg, sg - asg)
            if sgdiff > small:
                #lam = (nrmasg*nrmasg - np.dot(sg,asg))/sgdiff
                lam = -np.dot(
                    asg, sg - asg) / sgdiff  # this should be the same as above
            else:
                lam = 0.50

            if lam > 1.0 or lam < 0.0:
                print "Projecting lambda = ", lam, "to [0,1]."
                if lam < 0:
                    lam = 0.0
                else:
                    lam = 1.0

            # If lam=0 nothing changes and we end up to inner iteration termination 2.

            # Step 3
            asg = lam * sg + (1.0 -
                              lam) * asg  # The new aggregate subgradient.
            nrmasg = np.linalg.norm(asg)  # Norm of the aggregate subgradient.

            if nrmasg < delta:  # Inner iteration termination
                stopii = 0
                break  # With this we should go to Step 6.

            if nii % 5 == 0:  # Inner iteration termination 2
                nrmold = nrmnew
                nrmnew = nrmasg
                if np.abs(nrmold - nrmnew) < 0.0001:
                    #print "Norm is not changing."
                    stopii = 0
                    break  # With this we should go to Step 6.

            # Step 4: Search direction
            dd = -asg / nrmasg

            # Step 5
            xtau = x + tau * dd
            if prob == 0:
                f1 = functions.f1(xtau)
                f2 = functions.f2(xtau)
            else:
                f1 = functions.auxminf1(xtau)
                f2 = functions.auxminf2(xtau)

            fnew = f1 - f2
            nf += 1
            dt = fnew - fold

            #if (dt > -c1*tau*nrmasg and -dt/fold > 0.1): # makes results worse (limited testing)
            if (dt > -c1 * tau * nrmasg):  # Not a descent direction
                #if (dt < 0 and -dt/fold > 0.05): # just for testing purposes
                #    print "No descent but should it be?",-dt/fold,dt,-c1*tau*nrmasg,fold,fnew
                if prob == 0:
                    g1 = functions.df1(xtau)
                else:
                    g1 = functions.dauxminf1(xtau)
                sg = g1 - g2
                ng += 1
            else:
                stopii = 1
                break  # with this we should go to Step 7.

            # Additional termination from inner iteration
            if nii > maxii:
                if tau > eps:
                    #print "Too many inner iterations. Adjusting tau."
                    stopii = 2
                else:
                    print "Too many inner iterations with no descent direction found."  #,nit,fnew,fold
                    if ii == 0:
                        ii = 1
                        nii = 0  # Number of inner iterations starts again from zero.
                        #dd = -np.ones_like(x)/np.sqrt(x.size)  # Initialization of the direction dd.
                        dd = -dd  # Opposite of the direction dd.
                        if config.nfea < 200:  # tau > 0, default = 10, if n<200,
                            tau = 10.0  #                    50, otherwise.
                        else:
                            tau = 50.0
                        if prob == 0:
                            f1 = functions.f1(x + tau * dd)  # test for bug fix
                            g1 = functions.df1(
                                x + tau *
                                dd)  # Subgradient of the DC component f1.
                        else:
                            f1 = functions.auxminf1(
                                x + tau * dd)  # test for bug fix
                            g1 = functions.dauxminf1(
                                x + tau *
                                dd)  # Subgradient of the DC component f1.

                        ng += 1
                        sg = g1 - g2  # Approx. subgradient of the function.
                        asg = sg  # Aggregate subgradient.
                        nrmasg = 0.0  # Norm of the aggregate subgradient.
                        print "Trying opposite direction."
                        print "Computing..."
                        continue

                    else:
                        print "Exit."
                        stopii = 3
                break

        # Step 6: Stopping criterion

        if stopii == 0:  # Small norm
            if tau <= eps:
                print "Critical point found."
                break  # Critical point found
            else:
                tau *= sig1
                delta *= sig2
                stopii = -1
                nit += 1
                continue

        # Step 7: Line search
        elif stopii == 1:  # Descent direction
            if ii == 1:
                print "Descent direction found."
                print "Computing..."
                ii = 0

            step = tau  # Here fnew = f(xtau), fold = f(x)
            count = 0
            while count < 10:  # Maximum of 10 search are made
                count += 1
                step *= 2.0
                xnew = x + step * dd
                if prob == 0:
                    f1 = functions.f1(xnew)
                    f2 = functions.f2(xnew)
                else:
                    f1 = functions.auxminf1(xnew)
                    f2 = functions.auxminf2(xnew)

                nf += 1
                fdiff = f1 - f2 - fold

                if fdiff <= -c2 * step * nrmasg:
                    xtau = xnew
                    fnew = f1 - f2
                else:
                    break

            # Step 8: Update step
            # The last f1 and f2 are computed at xnew =/ xtau, need to compute f1(xtau) to get max_line correctly

            x = xtau
            fold = fnew

        if tau > eps:
            tau *= sig1  # tau too small is not good, needs safeguard
        elif np.abs(fdiff) < 1e-7:
            print "Termination with small change in function values."  #,fdiff
            break

        delta *= sig2

        nit += 1

        # Termination with maximum number of iterations.
        if nit >= mit:  # Number of iterations > mit
            print "Termination with maximum number of iterations."
            break

        # Termination with the time limit for AggSub.
        usedtime = time.clock() - usedtime0
        if usedtime > tlimit:
            print "Termination with the time limit for AggSub."
            break  # with this we should go to Step 7.

        if stopii == 3:

            #print "No decent direction found in inner iteration."
            break

        stopii = -1  # To next inner iteration.

    # Outer iteration ends

    f = fold

    return x, f, nit, nf, ng