Example #1
0
    def __init__(self,
                 affinity='euclidean',
                 linkage='complete',
                 cut_mode='n_clusters',
                 cut_value=3):
        assert affinity in ['euclidean', 'l2', 'l1', 'cosine']
        self.affinity = affinity
        if affinity == 'euclidean':
            self.affinity_func = lambda a, b: npnorm(a - b)
        elif affinity == 'l2':
            self.affinity_func = lambda a, b: npnorm(a - b)**2
        elif affinity == 'l1':
            self.affinity_func = lambda a, b: npnorm(a - b, ord=1)
        elif affinity == 'cosine':
            self.affinity_func = lambda a, b: 1. - (np.dot(a, b) /
                                                    (npnorm(a) * npnorm(b)))

        assert linkage in [
            'complete',
            'single',
            'average',
            #'ward'
        ]
        self.linkage = linkage

        assert cut_mode in ['n_clusters', 'dist_thres']
        self.cut_mode = cut_mode

        assert cut_value > 0
        self.cut_value = cut_value
Example #2
0
 def test_sparse_vector_norms(self):
     for sparse_type in self._sparse_types:
         for M in self._test_matrices:
             S = sparse_type(M)
             for axis in (0, 1, -1, -2, (0,), (1,), (-1,), (-2,)):
                 assert_allclose(spnorm(S, axis=axis), npnorm(M, axis=axis))
                 for ord in None, 2, np.inf, -np.inf, 1, 0.5, 0.42:
                     assert_allclose(spnorm(S, ord, axis=axis), npnorm(M, ord, axis=axis))
Example #3
0
def _angle_between(a, b):
    """Compute angle between two 3d vectors."""
    import math
    from numpy.linalg import norm as npnorm
    arccosInput = np.dot(a, b) / npnorm(a) / npnorm(b)
    arccosInput = 1.0 if arccosInput > 1.0 else arccosInput
    arccosInput = -1.0 if arccosInput < -1.0 else arccosInput
    return math.acos(arccosInput)
Example #4
0
 def test_sparse_vector_norms(self):
     for sparse_type in self._sparse_types:
         for M in self._test_matrices:
             S = sparse_type(M)
             for axis in (0, 1, -1, -2, (0, ), (1, ), (-1, ), (-2, )):
                 assert_allclose(spnorm(S, axis=axis), npnorm(M, axis=axis))
                 for ord in None, 2, np.inf, -np.inf, 1, 0.5, 0.42:
                     assert_allclose(spnorm(S, ord, axis=axis),
                                     npnorm(M, ord, axis=axis))
Example #5
0
 def test_sparse_matrix_norms_with_axis(self):
     for sparse_type in self._sparse_types:
         for M in self._test_matrices:
             S = sparse_type(M)
             for axis in None, (0, 1), (1, 0):
                 assert_allclose(spnorm(S, axis=axis), npnorm(M, axis=axis))
                 for ord in "fro", np.inf, -np.inf, 1, -1:
                     assert_allclose(spnorm(S, ord, axis=axis), npnorm(M, ord, axis=axis))
             # Some numpy matrix norms are allergic to negative axes.
             for axis in (-2, -1), (-1, -2), (1, -2):
                 assert_allclose(spnorm(S, axis=axis), npnorm(M, axis=axis))
                 assert_allclose(spnorm(S, "f", axis=axis), npnorm(M, "f", axis=axis))
                 assert_allclose(spnorm(S, "fro", axis=axis), npnorm(M, "fro", axis=axis))
Example #6
0
    def init(cls,
             colors,
             image_size,
             max_shift_percent=0.1,
             max_radius_percent=0.5,
             alpha_range=(127, 255),
             max_alpha_shift=5):
        cls.colors = colors
        cls.image_size = image_size

        cls.polygon_max_shift = int(npnorm(image_size) * max_shift_percent)
        cls.polygon_max_radius = int(npnorm(image_size) * max_radius_percent)
        cls.alpha_range = alpha_range
        cls.max_alpha_shift = max_alpha_shift
Example #7
0
def calc_en_gaussian(pos, pos_cm, a, b, sigma, epsilon, u, u_inv):
    from numpy.linalg import norm as npnorm
    F = np.zeros(shape=(pos.shape[0], 2))
    # Unit cell mapping
    posp = np.dot(u, pos.T).T
    posp -= np.floor(posp + 0.5)
    pospp = np.dot(u_inv, posp.T).T
    posR = npnorm(pospp, axis=1)
    # Mask positions
    bulk = posR <= a
    tail = np.logical_and(posR > a, posR < b)
    Rtail = posR[tail]
    xx = (Rtail - a) / (b - a)  # Reduce coordinate rho in [0,1]
    ftail = (1 - 10 * xx**3 + 15 * xx**4 - 6 * xx**5)  # Damping f [1,0]
    dftail = (-30 * xx**2 + 60 * xx**3 - 30 * xx**4) / (b - a
                                                        )  # Derivative of f
    # Energy
    en = -epsilon * np.sum(gaussian(posR[bulk], 0, sigma))
    en += -epsilon * np.sum(gaussian(Rtail, 0, sigma) * ftail)
    # Forces bulk
    bulk = np.logical_and(
        posR <= a, posR != 0)  # Exclude singular point in origin where F=0
    F[bulk, 0] = -epsilon * gaussian(posR[bulk], 0, sigma) * (
        posR[bulk] / np.power(sigma, 2.)) * pospp[bulk, 0] / posR[bulk]
    F[bulk, 1] = -epsilon * gaussian(posR[bulk], 0, sigma) * (
        posR[bulk] / np.power(sigma, 2.)) * pospp[bulk, 1] / posR[bulk]
    # Forces tail F = d(E*f)/dx = E'*f + E*f'
    f1 = epsilon * gaussian(Rtail, 0, sigma) * dftail  # E f
    f2 = -ftail * epsilon * gaussian(Rtail, 0, sigma) * (
        Rtail / np.power(sigma, 2.))  # E' f
    F[tail, 0] = (f1 + f2) * pospp[tail, 0] / posR[tail]
    F[tail, 1] = (f1 + f2) * pospp[tail, 1] / posR[tail]
    # Torque
    tau = np.cross(pos - pos_cm, F)
    return en, np.sum(F, axis=0), np.sum(tau)
Example #8
0
 def test_sparse_matrix_norms_with_axis(self):
     for sparse_type in self._sparse_types:
         for M in self._test_matrices:
             S = sparse_type(M)
             for axis in None, (0, 1), (1, 0):
                 assert_allclose(spnorm(S, axis=axis), npnorm(M, axis=axis))
                 for ord in 'fro', np.inf, -np.inf, 1, -1:
                     assert_allclose(spnorm(S, ord, axis=axis),
                                     npnorm(M, ord, axis=axis))
             # Some numpy matrix norms are allergic to negative axes.
             for axis in (-2, -1), (-1, -2), (1, -2):
                 assert_allclose(spnorm(S, axis=axis), npnorm(M, axis=axis))
                 assert_allclose(spnorm(S, 'f', axis=axis),
                                 npnorm(M, 'f', axis=axis))
                 assert_allclose(spnorm(S, 'fro', axis=axis),
                                 npnorm(M, 'fro', axis=axis))
Example #9
0
 def test_sparse_matrix_norms(self):
     for sparse_type in self._sparse_types:
         for M in self._test_matrices:
             S = sparse_type(M)
             assert_allclose(spnorm(S), npnorm(M))
             assert_allclose(spnorm(S, 'fro'), npnorm(M, 'fro'))
             assert_allclose(spnorm(S, np.inf), npnorm(M, np.inf))
             assert_allclose(spnorm(S, -np.inf), npnorm(M, -np.inf))
             assert_allclose(spnorm(S, 1), npnorm(M, 1))
             assert_allclose(spnorm(S, -1), npnorm(M, -1))
Example #10
0
 def test_sparse_matrix_norms(self):
     for sparse_type in self._sparse_types:
         for M in self._test_matrices:
             S = sparse_type(M)
             assert_allclose(spnorm(S), npnorm(M))
             assert_allclose(spnorm(S, 'fro'), npnorm(M, 'fro'))
             assert_allclose(spnorm(S, np.inf), npnorm(M, np.inf))
             assert_allclose(spnorm(S, -np.inf), npnorm(M, -np.inf))
             assert_allclose(spnorm(S, 1), npnorm(M, 1))
             assert_allclose(spnorm(S, -1), npnorm(M, -1))
Example #11
0
 def forward(self):
     """
     Calculates the mean squared error.
     """
     # NOTE: We reshape these to avoid possible matrix/vector broadcast
     # errors.
     #
     # For example, if we subtract an array of shape (3,) from an array of shape
     # (3,1) we get an array of shape(3,3) as the result when we want
     # an array of shape (3,1) instead.
     #
     # Making both arrays (3,1) insures the result is (3,1) and does
     # an elementwise subtraction as expected.
     y = self.inbound_nodes[0].value.reshape(-1, 1)
     a = self.inbound_nodes[1].value.reshape(-1, 1)
     # TODO: your code here
     from numpy import square as npsquare
     from numpy.linalg import norm as npnorm
     self.value = npsquare(npnorm(y - a)) / len(y)
Example #12
0
def calc_en_tan(pos, pos_cm, a, b, ww, epsilon, u, u_inv):
    """Calculate energy and forces. Well is approximated with tanh function."""
    from numpy.linalg import norm as npnorm
    en = 0
    F = np.zeros(shape=(pos.shape[0], 2))
    # map to substrate cell
    posp = np.dot(
        u, pos.T).T  # Fast numpy dot with different convention on row/cols
    posp -= np.floor(posp + 0.5)
    # back to real space
    pospp = np.dot(u_inv, posp.T).T
    posR = npnorm(pospp, axis=1)
    # colloids inside the curve region
    # mask and relative R
    inside = np.logical_and(posR < b, posR > a)
    Rin = posR[inside]
    # colloids inside the curve region
    inside = np.logical_and(posR < b, posR > a)  # numpy mask vector
    # energy inside flat bottom region
    en = -epsilon * np.sum(posR <= a)
    # calculation of energy and force. See X. Cao Phys. Rev. E 103, 1 (2021).
    xx = (Rin - a) / (b - a)  # Reduce coordinate rho in [0,1]
    # energy
    en += np.sum(epsilon / 2. * (np.tanh((xx - ww) / xx / (1 - xx)) - 1.))
    # force F = - grad(E)
    ff = (xx - ww) / xx / (1 - xx)
    ass = (np.cosh(ff) * (1 - xx) * xx) * (np.cosh(ff) * (1 - xx) * xx)
    vecF = -epsilon / 2 * (xx * xx + ww - 2 * ww * xx) / ass
    # Go from rho to r again
    vecF /= (b - a)
    # Project to x and y
    F[inside, 0] = vecF * pospp[inside, 0] / Rin
    F[inside, 1] = vecF * pospp[inside, 1] / Rin
    # Torque = r vec F. Applied to CM pos
    tau = np.cross(pos - pos_cm, F)
    # Return energy, F and torque on CM
    return en, np.sum(F, axis=0), np.sum(tau)
Example #13
0
def minimize_cg(self,x0, epsilon, args=(), jac=None, callback=None,
                 gtol=1e-5, norm=Inf, maxiter=None,
                 disp=False, return_all=False):
    """
    Minimization of scalar function of one or more variables using the
    conjugate gradient algorithm.
    Options
    -------
    disp : bool
        Set to True to print convergence messages.
    maxiter : int
        Maximum number of iterations to perform.
    gtol : float
        Gradient norm must be less than `gtol` before successful
        termination.
    norm : float
        Order of norm (Inf is max, -Inf is min).
    eps : float or ndarray
        If `jac` is approximated, use this value for the step size.
        
    BCH note:  in nonlinear CG minimization, -grad(f(x)) takes the place of the 
    residual rk in en.wikipedia.org/wiki/Conjugate_gradient_method
    """

    retall = return_all
    xk = asarray(x0).flatten() #initial
    if maxiter is None:
        maxiter = len(x0) * 20
#         maxiter = 10
#     print 'Forcing maxiter to be maxiter = len(x0) * 20!!!!!!!!!!!!!!!!!!!!!!!!1'
#     maxiter = len(x0) * 20
#     func_calls, f = wrap_function(f, args)
#     grad_calls, myfprime = approx_fprime, (f, epsilon))
#     grad = self.approx_fprime(x0,epsilon)
    print 'Start Minimization';self.IBZ.mesh = self.points; self.facetsMeshMathPrint(self.IBZ); print ';Show[p,q]\n'
    tryAgain = True
    while tryAgain: #if error occurs, start again without any history
        self.error = None
        k = 1
        self.points = xk.reshape((len(self.points),3))
        old_fval,grad = self.enerGrad(xk)
        old_old_fval = old_fval + npnorm(grad) / 2
    
        if retall:
            allvecs = [xk]
        warnflag = 0
        self.pk = -grad #intial search direction
        gnorm = vecnorm(grad, ord=norm)
        methodMin = 'conjGrad'
        while (gnorm > gtol) and (k < maxiter) and self.error == None:# and (abs(old_fval - old_old_fval)>0.01):
            if k==2:
                'pause'
            if methodMin == 'conjGrad':       
                deltak = dot(grad, grad)
                stp_k, old_fval, old_old_fval, gradp1 = \
                             self.line_search_wolfe1(xk, epsilon, grad, old_fval,
                                                  old_old_fval, c2=0.4, amin=1e-100, amax=1e100) 
                print '\n++k,energy/NN,gnorm,grad ',k,old_fval/len(self.points),npnorm(gradp1)#,gradp1
   
#                 if stp_k is None: #BCH
#     #                 return
                xk = xk + stp_k * self.pk
    #             print 'delta_r', (stp_k * self.pk).reshape((len(self.points),3))
                self.points = xk.reshape((len(self.points),3))
                self.IBZ.mesh = self.points; self.facetsMeshMathPrint(self.IBZ); print ';Show[p,q]\n'
#                 print 'new points',self.points
                if retall:
                    allvecs.append(xk)
                yk = gradp1 - grad
                beta_k = max(0, dot(yk, gradp1) / deltak)
                self.pk = -gradp1 + beta_k * self.pk
#                 print 'new pk',k, self.pk
#                 N=100
#                 for i in range(N):
#                     step = i*stp_k/N
#                     en1,gr1 = self.enerGrad(xk+step * self.pk)
#                     print '\tstep',step,'energy/N',en1/len(self.points)
                    
                grad = gradp1
            elif methodMin == 'steepest':
                stp_k = 0.0001
    #             print 'force step to be', stp_k
                xk = xk + stp_k * self.pk
    #             print 'xk  ',k, xk
                self.points = xk.reshape((len(self.points),3))
                self.IBZ.mesh = self.points; self.facetsMeshMathPrint(self.IBZ); print ';Show[p,q]\n'
    #             self.plotPos(array(self.points),len(self.points),'_{}'.format(str(k)))
                fnew,grad = self.enerGrad(xk)  
    #             print 'grad', grad  
    #             print
                self.pk = -grad         
            gnorm = vecnorm(grad, ord=norm)
            if callback is not None:
                callback(xk)
            k += 1
        if self.error is None:
            tryAgain = False
            
#         else:
#             'pause'
    fval = old_fval
    if warnflag == 2:
        msg = _status_message['pr_loss']
        if disp:
            print("Warning: " + msg)
            print("         Current function value: %f" % fval)
            print("         Iterations: %d" % k)
            print("         Function evaluations: %d" % func_calls[0])
            print("         Gradient evaluations: %d" % grad_calls[0])

    elif k >= maxiter:
        warnflag = 1
        msg = _status_message['maxiter']
        if disp:
            print("Warning: " + msg)
            print("         Current function value: %f" % fval)
            print("         Iterations: %d" % k)
            print("         Function evaluations: %d" % func_calls[0])
            print("         Gradient evaluations: %d" % grad_calls[0])
    else:
        msg = _status_message['success']
        if disp:
            print(msg)
            print("         Current function value: %f" % fval)
            print("         Iterations: %d" % k)
Example #14
0
def view_gradient_descent(expression,
                          position,
                          epsilon,
                          alpha_bar=3,
                          rho=0.4,
                          sigma=0.7):
    """
	carry out the gradient descent method to find the minimal point of the given function
	para::expression: the expression of the function
	para::position: the initial search point
	para::epsilon: the termination error
	para::alpha_bar: the initial length of the line search
	para::rho: parameter in line search, in (0, 1/2)
	para::sigma: parameter in line search, in (rho, 1)
	..note: to use this method, you'd better construct the expression in this way:
		claim all the variables
		write the expression

		for example:
		>>> import sympy 
		>>> import numpy as np
		>>> x, y, z = sympy.symbols('x y z')
		>>> expr = sympy.cos(x) + sympy.exp(y*z)
	"""
    # convert the function for show
    show_func = sympy.lambdify(expression.free_symbols, expression, 'numpy')
    # the new points in ech iteration
    show_points = [position.tolist()]
    iter_time = 0
    while True:
        expr_gradient = get_gradient(expression, position)
        if npnorm(expr_gradient) < epsilon:
            break
        direction = -expr_gradient
        # get the step length by line search
        alpha = line_search(expression, position, direction, alpha_bar, rho,
                            sigma)
        position = position + alpha * direction
        show_points.append(position.tolist())
        iter_time += 1
    print 'iterations: ', iter_time
    print 'point: ', show_points[-1]
    # draw the contour and line segments between the iterate points
    fig = plt.figure()
    ccx = np.linspace(-50, 50, 1000)
    ccy = np.linspace(-50, 50, 1000)
    X, Y = np.meshgrid(ccx, ccy)
    Z = show_func(X, Y)
    plt.contour(X, Y, Z, colors='black')
    """
	for k in range(len(show_points) -1):
		plt.plot((show_points[k], show_points[k+1]),
			color='brown', marker='o')
	"""
    # draw the lines
    segs = [[k, k + 1] for k in range(len(show_points) - 1)]
    lines = [[tuple(show_points[j]) for j in i] for i in segs]
    lc = matplotlib.collections.LineCollection(lines)
    ax = fig.add_subplot(111)
    ax.add_collection(lc)
    plt.xlim([-50, 50])
    plt.ylim([-50, 50])
    plt.show()
Example #15
0
def ramp_F_fixTau(driving_FsT, MD_inputs, ramp_inputs, name=None, debug=False):
    """        """

    t0 = time()

    # Unpack esternal forces
    F_range, Tau = driving_FsT
    NF = len(F_range)

    if name == None: name = 'ramp_F-Tau_%.4g' % Tau

    #-------- SET UP LOGGER -------------
    # For this threads and children
    c_log = logging.getLogger(name)
    c_log.setLevel(logging.INFO)
    if debug: c_log.setLevel(logging.DEBUG)
    # Adopted format: level - current function name - message. Width is fixed as visual aid.
    log_format = logging.Formatter(
        '[%(levelname)5s - %(funcName)10s] %(message)s')
    console = open('console-%s.log' % name, 'w')
    handler = logging.StreamHandler(console)
    handler.setFormatter(log_format)
    c_log.addHandler(handler)

    #-------- READ INPUTS -------------
    if type(
            MD_inputs
    ) == str:  # Inputs passed as path to json file instead of dictionary
        with open(MD_inputs) as inj:
            MD_inputs = json.load(inj)
    else:
        MD_inputs = MD_inputs.copy()  # Get a copy so all threads can edit
    MD_inputs['Tau'] = Tau  # Set current torque in all MD of this process
    Nsteps = MD_inputs['Nsteps']
    # First run might be longer
    Nsteps0 = Nsteps  # Default
    try:
        Nsteps0 = ramp_inputs['Nsteps0']  # Update if found
        c_log.debug('Update Nstep0: %i -> %i' % (Nsteps, Nsteps0))
    except KeyError:
        pass
    # Angle of the driving force in xy
    Fphi = ramp_inputs['Fphi']
    # To be extra careful, make MD breking conditions stricter
    MD_min_mobfrac, MD_max_mobfrac = 0.08, 0.5

    iF = 0
    for F in F_range:
        c_log.info("On F=%.3g (%i of %i %.2f%%)" % (F, iF, NF, iF / NF * 100))

        if F == 0 and Tau == 0:
            c_log.info("Skip not driven config")
            continue

        # Adjust Nsteps
        if iF == 0: MD_inputs['Nsteps'] = Nsteps0
        else: MD_inputs['Nsteps'] = Nsteps

        # Set min/max vel and omega to exit MD run at current torque and force
        MD_inputs['vel_min'], MD_inputs[
            'vel_max'] = F * tmob0 * MD_min_mobfrac, F * tmob0 * MD_max_mobfrac
        MD_inputs['omega_min'], MD_inputs[
            'omega_max'] = Tau * rmob0 * MD_min_mobfrac, Tau * rmob0 * MD_max_mobfrac
        # Set correct logical function to check break
        if F == 0:
            c_log.debug("Check roto dep only")
            break_check = lambda rmob, tmob: rmob > rmob0 * rmob_frac
            MD_inputs['vel_min'], MD_inputs['vel_max'] = -1, 1e30
        elif Tau == 0:
            c_log.debug("Check trasl dep only")
            break_check = lambda rmob, tmob: tmob > tmob0 * tmob_frac
            MD_inputs['omega_min'], MD_inputs['omega_max'] = -1, 1e30
        else:
            c_log.debug("Check both dep")
            break_check = lambda rmob, tmob: np.logical_or(
                rmob > rmob0 * rmob_frac, tmob > tmob0 * tmob_frac)

        # Set current force in MD inputs
        MD_inputs['Fx'], MD_inputs['Fy'] = F * np.cos(
            Fphi * np.pi / 180), F * np.sin(Fphi * np.pi / 180)

        # ------ Start MD ------
        c_log.propagate = False  # Don't print update in driver logger
        c_log.info('-' * 50)
        c_outf = "out-%s-F_%.4g.dat" % (name, F)
        c_outinfo = "info-%s-F_%.4g.json" % (name, F)
        with open(c_outf, 'w') as c_out:
            MD_rigid_rototrasl([MD_inputs],
                               outstream=c_out,
                               info_fname=c_outinfo,
                               logger=c_log,
                               debug=debug)

        c_log.propagate = True

        # ------ Get stationary Omega(T) ------
        pos_cm0, th0 = MD_inputs['pos_cm'], MD_inputs['angle']
        Rcm0 = npnorm(pos_cm0)
        data = pd.read_fwf(
            c_outf, infer_nrows=1e30
        )  # Pandas has the tendency of underestimate column width
        tail_len = 100  # Average over this prints. Oscillates in a depinned config makes this tricky.
        # Careful with the labels here, if they change, this breaks. Weird syntax needed from Panda. Am I doing something wrong?
        pos_cm1 = np.reshape(
            [data['02)pos_cm[0]'].tail(1), data['03)pos_cm[1]'].tail(1)],
            newshape=(2))
        Rcm1 = npnorm(pos_cm1)
        th1, omega1 = data['06)angle'].tail(1).mean(), data['07)omega'].tail(
            tail_len).mean()
        Vx1, Vy1 = data['04)Vcm[0]'].tail(1).mean(), data['05)Vcm[1]'].tail(
            tail_len).mean()
        V1 = npnorm([Vx1, Vy1])

        rmob, tmob = omega1 / Tau, V1 / F
        c_log.debug(
            "rmob %10.4g (thold %10.4g, break: %s) tmob %10.4g (thold %10.4g, break: %s)."
            % (rmob, rmob0 * rmob_frac, rmob > rmob0 * rmob_frac, tmob,
               tmob0 * tmob_frac, tmob > tmob0 * tmob_frac))
        c_log.debug("Break condition %s" % break_check(rmob, tmob))

        if break_check(rmob, tmob):
            c_log.info("Above treshold. Exit.")
            if (Rcm1 - Rcm0 < MD_inputs['R']):
                c_log.warning('Cluster tranlate less than R: %.3g < %.3g' %
                              (Rcm1 - Rcm0, MD_inputs['R']))
            th_warn = 3  # [deg] Arbitrary
            if (th1 - th0 < th_warn):
                c_log.warning('Cluster rotated: %.3g < %.3g' %
                              (th1 - th0, th_warn))
            break

        # ------ UPDATE MD INPUTS ------
        # Update angle and CM pos for next run. Only variable determining the sys config in Overdamped Langevin.
        MD_inputs['angle'] = float(th1)
        c_log.debug(pos_cm1)
        MD_inputs['pos_cm'] = pos_cm1.tolist(
        )  # Json doesn't like numpy arrays
        iF += 1

    # Print execution time
    t_exec = time() - t0
    c_log.info("Done in %is (%.2fmin or %.2fh)" %
               (t_exec, t_exec / 60, t_exec / 3600))

    console.close()
    return 0
Example #16
0
            except FileNotFoundError:
                c_log.info("Not computed")
                c_log.info("-"*30+'\n')
                continue
                pass
            transient = int(np.floor(len(data)/3)) # Discard first third of simulation

            omega, Tau, theta = [data[label].to_numpy()
                                 for label in ['07)omega', '10)torque', '06)angle']
            ]
            Vx, Vy, Fx, Fy, x, y = [data[label].to_numpy()
                                    for label in ['04)Vcm[0]', '05)Vcm[1]',
                                                  '08)forces[0]', '09)forces[1]',
                                                  '02)pos_cm[0]', '03)pos_cm[1]']
            ]
            V = npnorm([Vx, Vy], axis=0)
            F = npnorm([Fx, Fy], axis=0)
            R = npnorm([x, y], axis=0)
#            print(V.shape)
            rmobility = (omega/Tau)[transient:].mean()*bool(exTau)
            tmobility = (V/F)[transient:].mean()*bool(exF)
            rdep, tdep = rmobility>rmob0*rmob_frac, tmobility>tmob0*tmob_frac
            c_log.info("Rmob %.5g (thold %.5g), Tmob %.5g (thold %.5g)", rmobility, rmob0*rmob_frac, tmobility, tmob0*tmob_frac)
            if R[-1] > inputs['MD_inputs']['rcm_max']: tdep = 1
            if theta[-1] > inputs['MD_inputs']['theta_max']: rdep = 1
            TauF_grid[i,j] = rdep + 2*tdep # 0=pinned, 1=roto, dep 2=trasl dep, 3=full dep

            if TauF_grid[i,j] == 0: c_log.info('Pinned')
            elif TauF_grid[i,j] == 1: c_log.info('R dep')
            elif TauF_grid[i,j] == 2: c_log.info('T dep')
            elif TauF_grid[i,j] == 3: c_log.info('RT dep')
def MD_rigid_rototrasl(argv, outstream=sys.stdout, name=None, info_fname=None, pos_fname=None, logger=None, debug=False):
    """Overdamped Langevin Molecular Dynamics of rigid cluster over a substrate"""

    t0 = time() # Start clock

    if name == None: name = 'MD_rigid_rototrasl'
    if info_fname == None: info_fname = "info-%s.json" % name
    if pos_fname == None: pos_fname = "pos-%s.dat" % name

    #-------- SET UP LOGGER -------------
    if logger == None:
        c_log = logging.getLogger("MD_rigid_rototrasl") # Set name of the function
        # Adopted format: level - current function name - message. Width is fixed as visual aid.
        logging.basicConfig(format='[%(levelname)5s - %(funcName)10s] %(message)s')
        c_log.setLevel(logging.INFO)
        if debug: c_log.setLevel(logging.DEBUG)
    else:
        c_log = logger

    #-------- INPUTS -------------
    if type(argv[0]) == dict: # Inputs passed as python dictionary
        inputs = argv[0]
    elif type(argv[0]) == str: # Inputs passed as path to json file
        with open(argv[0]) as inj:
            inputs = json.load(inj)
    else:
        raise TypeError('Unrecognized input structure (no dict or filename str)', inputs)
    c_log.debug("Input dict \n%s", "\n".join(["%10s: %10s" % (k, str(v)) for k, v in inputs.items()]))

    # -- Cluster --
    input_cluster = inputs['cluster_hex'] # Geom as lattice and Bravais points
    pos_cm = np.zeros(2, dtype=float) # If not given, start from centre
    angle = 0 # If not given, start aligned
    try: angle = inputs['angle'] # Starting angle [deg]
    except KeyError: pass
    try: pos_cm = np.array(inputs['pos_cm'], dtype=float) # Start pos [micron]
    except KeyError: pass
    # create cluster
    pos = create_cluster(input_cluster, angle)[:,:2]

    np.savetxt(pos_fname, pos)
    N = pos.shape[0] # Size of the cluster
    c_log.info("Cluster N=%i start at (x,y,theta)=(%.3g,%.3g,%.3g)" % (N, *pos_cm, angle))

    # -- Substrate --
    # define substrate metric
    sub_symmetry = inputs['sub_symm'] # Substrate symmetry (triangle or square)
    well_shape = inputs['well_shape'] # Substrate well shape (Gaussian or Tanh)
    R = inputs['R'] # Well lattice spacing [micron]
    epsilon =  inputs['epsilon'] # Well depth [zJ]

    if sub_symmetry == 'square':
        calc_matrices = calc_matrices_square
    elif sub_symmetry == 'triangle':
        calc_matrices = calc_matrices_triangle
    else:
        raise ValueError("Symmetry %s not implemented" % sub_symmetry)
    u, u_inv = calc_matrices(R)

    if well_shape == 'tanh':
        # Realistic well energy landscape
        calc_en_f = calc_en_tan
        a = inputs['a'] # Well end radius [micron]
        b = inputs['b'] # Well slope radius [micron]
        wd = inputs['wd'] # Well asymmetry. 0.29 is a good value
        en_params = [a, b, wd, epsilon, u, u_inv]
    elif well_shape == 'gaussian':
        # Gaussian energy landscape
        #sigma = inputs['sigma'] # Width of Gaussian
        #en_params = [sigma, epsilon, u, u_inv]
        #calc_en_f = calc_en_gaussian
        # Gaussian energy landscape
        #a = R/2*inputs['at'] # Tempered tail as fraction of R
        #b = R/2*inputs['bt'] # Flat end as fraction of R
        a = inputs['a'] # Well end radius [micron]
        b = inputs['b'] # Well slope radius [micron]
        sigma = inputs['sigma'] # Width of Gaussian
        en_params = [a, b, sigma, epsilon, u, u_inv]
        calc_en_f = calc_en_gaussian
    else:
        raise ValueError("Form %s not implemented" % well_shape)
    c_log.info("%s substrate R=%.3g %s well shape depth=%.3g" % (sub_symmetry, R, well_shape, epsilon))

    # -- MD params --
    T = inputs['T'] # kBT [zJ]
    Tau = inputs['Tau'] # [fN*micron]
    Fx, Fy = inputs['Fx'], inputs['Fy'] # [fN]
    F = np.array([Fx, Fy])
    Nsteps = inputs['Nsteps']
    dt = inputs['dt'] # [ms]
    print_skip = 100 # Timesteps to skip between prints
    try: print_skip = inputs['print_skip']
    except KeyError: pass
    printprog_skip = int(Nsteps/20) # Progress output frequency
    c_log.debug("Print every %i timesteps. Status update every %i." % (print_skip, printprog_skip))
    # initialise variable
    forces, torque = np.zeros(2), 0.

    # -- Simulation break conditions --
    both_breaks = True # Break if both V and omega satisfy conditions
    try: both_breaks = bool(inputs['both_breaks'])
    except KeyError: pass
    break_omega, break_V = False, False
    omega_avg, vel_avg = 0, 0 # store average of omega and velox over given timesteps
    avglen = 100 # timesteps
    min_Nsteps = 1e30 # min steps for average. E.g. 1e5
    omega_min, omega_max = -1, 1e30 # tolerance (>0) to consider the system stuck or depinned.
    vel_min, vel_max = -1, 1e30     # If not given, continue indefinitely: max huge, min <0
    rcm_max, theta_max = 1e30, 1e30

    # Set Stuck config exit
    try: min_Nsteps = inputs['min_Nsteps']
    except KeyError: pass # If not given, continue indefinitely: min steps huge.
    try: omega_min = inputs['omega_min']
    except KeyError: pass
    try: vel_min = inputs['vel_min']
    except KeyError: pass

    c_log.debug("Stuck %s: Nmin=%g (tmin=%g) avglen %i omega_min=%.4g deg/ms velox_min=%.4g micron/ms" % ('both' if both_breaks else 'single', min_Nsteps, min_Nsteps*dt, avglen, omega_min, vel_min))
    if min_Nsteps < avglen: raise ValueError("Omega/Velocity average length larger them minimum number of steps!")

    # Set pinning config exit
    try: theta_max = inputs['theta_max']+angle # Exit if cluster rotates more than this
    except KeyError: pass
    try: rcm_max = inputs['rcm_max']+npnorm(pos_cm) # Exit if cluster slides more than this
    except KeyError: pass
    try: omega_max = inputs['omega_max'] # Exit if cluster rotates faster than this
    except KeyError: pass
    try: vel_max = inputs['vel_max'] # Exit if cluster rotates more than this
    except KeyError: pass
    c_log.debug("Depin: theta_max=%.4g omega_max=%.4g rcm_max=%.4g vel_max = %.4g" % (theta_max, rcm_max, omega_max, vel_max))

    #-------- LANGEVIN ----------------
    # Assumes rotation and translation indipendent. We just care about the scaling, not exact number.
    eta = 1   # Translational damping of single colloid
    try: eta = inputs['eta']
    except KeyError: pass
    # CM translational viscosity [fKg/ms], CM rotational viscosity [micron^2*fKg/ms]
    etat_eff, etar_eff = calc_cluster_langevin(eta, pos)
    # Aplitude of random numbers. Translational follows nicely from CLT, rotational is assumed from A. E. Filippov, M. Dienwiebel, J. W. M. Frenken, J. Klafter, and M. Urbakh, Phys. Rev. Lett. 100, 046102 (2008).
    brandt, brandr = np.sqrt(2*T*etat_eff), np.sqrt(2*T*etar_eff)
    kBTroom = 4.069767441860465 #zj
    c_log.info("Number of particles %i Eta trasl %.5g Eta tras eff %.5g Eta roto eff %.5g Ratio roto/tras %.5g kBT=%.3g (kBT/kBTroom=%.3g)" % (N, eta, etat_eff, etar_eff, etar_eff/etat_eff, T, T/kBTroom))

    c_log.info("Tau = %.4g fN*micron (Tau/N=%.4g)" % (Tau, Tau/N))
    c_log.info("Fx=%.4g fN (Fx/N=%.4g), Fy=%.4g fN (Fy/N=%.4g), |F| = %.4g fN (|F|/N=%.4g)",
               Fx, Fx/N, Fy, Fy/N, np.sqrt(Fx**2+Fy**2), np.sqrt(Fx**2+Fy**2)/N)
    c_log.info("Omega free=%.4g  Vfree=(%.4g,%.4g) |Vfree|=%.4g" % (Tau/etar_eff, Fx/etat_eff, Fy/etat_eff, np.sqrt(Fx**2+Fy**2)/etat_eff))
    c_log.debug("Free cluster would rotate %.2f deg", Tau/etar_eff * Nsteps * dt)
    c_log.debug("Free cluster would translate %.2f micron", np.sqrt(Fx**2+Fy**2)/etat_eff * Nsteps * dt)
    c_log.debug("Amplitude of random number trasl %.2g and roto %.2g" % (brandt, brandr))

    #-------- INFO FILE ---------------
    with open(info_fname, 'w') as infof:
        infod = {'eta': eta, 'etat_eff': etat_eff, 'etar_eff': etar_eff, 'brandt': brandt, 'brandr': brandr,
                 'N': N, 'theta_max': theta_max, 'print_skip': print_skip,
                 'min_Nsteps': min_Nsteps, 'avglen': avglen, 'omega_min': omega_min, 'omega_max': omega_max,
                 'pos_cm': pos_cm.tolist()
        }
        infod.update(inputs)
        #if debug: c_log.debug("Info dict\n %s" % ("\n".join(["%s: %s" % (k, type(v)) for k, v in infod.items()])))
        json.dump(infod, infof, indent=True)

    #-------- OUTPUT SETUP -----------
    # !! Labels and print_status data structures must be coherent !!
    num_space = 30 # Width printed numerical values
    indlab_space = 2 # Header index width
    lab_space = num_space-indlab_space-1 # Match width of printed number, including parenthesis
    header_labels = ['e_pot', 'pos_cm[0]', 'pos_cm[1]', 'Vcm[0]', 'Vcm[1]',
                     'angle', 'omega', 'forces[0]', 'forces[1]', 'torque']
    # Gnuplot-compatible (leading #) fix-width output file
    first = '#{i:0{ni}d}){s: <{n}}'.format(i=0, s='dt*it', ni=indlab_space, n=lab_space-1,c=' ')
    print(first+"".join(['{i:0{ni}d}){s: <{n}}'.format(i=il+1, s=lab, ni=indlab_space, n=lab_space,c=' ')
                       for il, lab in zip(range(len(header_labels)), header_labels)]), file=outstream)

    # Inner-scope shortcut for printing
    def print_status():
        data = [dt*it, e_pot, pos_cm[0], pos_cm[1], Vcm[0], Vcm[1],
                angle, omega, forces[0], forces[1], torque]
        print("".join(['{n:<{nn}.16g}'.format(n=val, nn=num_space)
                       for val in data]), file=outstream)

    # Print setup time
    t_exec = time()-t0
    c_log.debug("Setup in %is (%.2fmin or %.2fh)", t_exec, t_exec/60, t_exec/3600)

    #-------- START MD ----------------
    t0 = time() # Start clock
    for it in range(Nsteps):

        # ENERGY LANDSCAPE
        e_pot, forces, torque = calc_en_f(pos + pos_cm, pos_cm, *en_params)

        # UPDATE VELOCITIES
        # First order Langevin equation
        noise = normal(0, 1, size=3)
        Vcm = (forces + F + brandt*noise[0:2])/etat_eff
        omega = (torque + Tau + brandr*noise[2])/etar_eff

        # Print progress
        if it % printprog_skip == 0:
            c_log.info("t=%8.3g of %5.2g (%2i%%) E=%10.3g  x=%9.3g y=%9.3g theta=%9.3g omega=%10.3g |Vcm|=%8.3g",
                       it*dt, Nsteps*dt, 100.*it/Nsteps, e_pot, pos_cm[0], pos_cm[1], angle, omega, npnorm(Vcm))
            #c_log.debug("Noise %.2g %.2g %.2g, thermal kick Fxy=(%.2g %.2g) torque=%.2g" % (noise[0],noise[1], noise[2], *(brandt*noise[0:2]),brandr*noise[2]))
            #c_log.debug("Thermal displ scaled (Fx, Fy)/etat=(%.2g %.2g) torque/etar=%.2g" % (*(brandt*noise[0:2]/etat_eff),brandr*noise[2]/etar_eff))
            c_log.debug("Break: v %.5g (vmin %.5g vmax %.5g); omega %.5g (omegamin %.5g omegamax %.5g)" % (npnorm(Vcm), vel_min, vel_max,
                                                                                               omega, omega_min, omega_max))
        # Print step results
        if it % print_skip == 0: print_status()

        # UPDATE DEGREES OF FREEDOM
        # center of mass follows local forces.
        pos_cm += dt * Vcm
        # angle of cluster follows local torque.
        dangle = dt*omega
        angle += dangle
        # positions are further rotated
        pos = rotate(pos,dangle)

        # CHECK FOR STOPPING CONDITIONS
        # Compute omega average and check for exit conditions
        omega_avg += omega # Average omega to check if system is stuck. See avglen.
        vel_avg += npnorm(Vcm) # Average omega to check if system is stuck. See avglen.
        if it % avglen == 0:
            omega_avg /= avglen
            vel_avg /= avglen
            rcm = npnorm(pos_cm)

            # If system is stuck, set flag to exit
            if np.abs(omega_avg) < omega_min and it >= min_Nsteps: break_omega = True
            if vel_avg < vel_min and it >= min_Nsteps: break_V = True

            # If system is rotating or sliding without stopping, set flag to exit
            if (angle >= theta_max or np.abs(omega_avg) >= omega_max) and it >= min_Nsteps: break_omega = True
            if (rcm >= rcm_max or vel_avg >= vel_max) and it >= min_Nsteps: break_V = True

            # Break if either or both condistions are satisfied
            if ((break_omega and break_V) and both_breaks) or ((break_omega or break_V) and not both_breaks):
                # Values
                c_log.info("Rotational: angle=%10.4g <omega>_%i=%10.4g" % (angle, avglen, omega_avg))
                c_log.info("Translationa: rcm=%10.4g <v>_%i=%10.4g" % (rcm, avglen, vel_avg))
                # Pinned check
                if np.abs(omega_avg) < omega_min: c_log.info("System is roto-pinned (omega min=%.4g)." % (omega_min))
                if np.abs(vel_avg) < vel_min: c_log.info("System is trasl-pinned (vel_min=%.4g)." % (vel_min))
                # Depinned check
                if (angle >= theta_max or np.abs(omega_avg) >= omega_max):
                    c_log.info("System is roto-depinned (theta max=%10.4g omega max=%10.4g)" % (theta_max, omega_max))
                if (rcm >= rcm_max or vel_avg >= vel_max):
                    c_log.info("System is trasl-depinned (rcm max=%10.4g vel max=%10.4g)" % (rcm_max, vel_max))
                # Exit conditions
                c_log.info("Breaking condition (%s) satisfied. Exit" % ('both omega and Vcm' if both_breaks else 'single'))
                break

            omega_avg = 0 # Reset average
            vel_avg = 0
    #-----------------------

    # Print last step, if needed
    c_log.info("t=%7.3g of %5.2g (%2i%%) E=%10.7g  x=%9.3g y=%9.3g theta=%9.3g omega=%8.3g |Vcm|=%8.3g",
               it*dt, Nsteps*dt, 100.*it/Nsteps, e_pot, pos_cm[0], pos_cm[1], angle, omega, npnorm(Vcm))
    print_status()

    # Print execution time
    t_exec = time()-t0
    c_log.info("Done in %is (%.2fmin or %.2fh)" % (t_exec, t_exec/60, t_exec/3600))