def get_step(w_new,w_old,x,error_old,error_old_old,lamb):
    grad_new=grad_beta.get_gradient(error_old,x,w_new,lamb)
    grad_old=grad_beta.get_gradient(error_old,x,w_old,lamb)
    R=grad_new-grad_old
    S=w_new-w_old
    #pdb.set_trace()
    A=R.transpose()*R
    B=R.transpose()*S
    if A==0 or B==0:
        return 0
    else:
        xpto=(A)/(B)
        xpto=xpto.todense()[0,0]     
        return xpto
        
        
        error=(Xtrain*w_old)-Y[3:7,0]
        if i==0:
            step_size=0.0000001
        else:
            error_old=error=(Xtrain*w_old)-Y[3:7,0]
            error_old_old=(Xtrain*w_old_old)-Y[3:7,0]
            alpha=get_step(w_old,w_old_old,Xtrain,error_old,error_old_old,lamb)
            if alpha==0:
                print "**ERRO**"
                print "aplha=0, impossivel continuar o algorimto"
                break
            step_size=sigma/alpha
        #print "fora:",step_size
        grad1=grad_beta.get_gradient(error,Xtrain,w_old,lamb)
        
        w_new = w_old - (step_size * grad1)
        error=(Xtrain*w_new)-Y[3:7,0]
        dif=w_new-w_old
        dif=dif.transpose()*dif
        y_new=grad_beta.get_func(error,w_new,lamb)[0,0] #funcao de erro
        
        
        count=0
        if i!=0:
            
            while y_new>=y_old-sigma2*alpha*dif[0,0]:
                count=count+1