def IMPLICATION(condition, *args): if condition is False: return True if len(args) == 1 and isinstance(args[0], (tuple, set, list, ndarray)): return ooarray([IMPLICATION(condition, elem) for elem in args[0]]) if condition is not True else args[0] elif len(args) > 1: return ooarray([IMPLICATION(condition, elem) for elem in args]) if condition is not True else args return NOT(condition & NOT(args[0])) if condition is not True else args[0]
def oovars(*args, **kw): if isPyPy: raise FuncDesignerException(''' for PyPy using oovars() is impossible yet. You could use oovar(size=n), also you can create list or tuple of oovars in a cycle, e.g. a = [oovar('a'+str(i)) for i in range(100)] but you should ensure you haven't operations like k*a or a+val in your code, it may work in completely different way (e.g. k*a will produce Python list of k a instances) ''') lb = kw.pop('lb', None) ub = kw.pop('ub', None) if len(args) == 1: if type(args[0]) in (int, int16, int32, int64): r = ooarray([oovar(**kw) for i in range(args[0])]) elif type(args[0]) in [list, tuple]: r = ooarray( [oovar(name=args[0][i], **kw) for i in range(len(args[0]))]) elif type(args[0]) == str: r = ooarray([oovar(name=s, **kw) for s in args[0].split()]) else: raise FuncDesignerException( 'incorrect args number for oovars constructor') else: r = ooarray([oovar(name=args[i], **kw) for i in range(len(args))]) if lb is not None: if np.isscalar(lb) or (isinstance(lb, np.ndarray) and lb.size == 1): for v in r.view(np.ndarray): v.lb = lb else: assert type(lb) in (list, tuple, ndarray) for i, v in enumerate(r): v.lb = lb[i] if ub is not None: if np.isscalar(ub) or (isinstance(ub, np.ndarray) and ub.size == 1): for v in r.view(np.ndarray): v.ub = ub else: assert type(ub) in (list, tuple, ndarray) for i, v in enumerate(r): v.ub = ub[i] r._is_array_of_oovars = True return r
def exp(inp): if isinstance(inp, ooarray): return ooarray([exp(elem) for elem in inp]) if hasStochastic and isinstance(inp, distribution.stochasticDistribution): return distribution.stochasticDistribution(exp(inp.values), inp.probabilities.copy())._update(inp) if not isinstance(inp, oofun): return np.exp(inp) return oofun(st_exp, inp, d = lambda x: Diag(np.exp(x)), vectorized = True, criticalPoints = False)
def arcsinh(inp): if isinstance(inp, ooarray) and any([isinstance(elem, oofun) for elem in atleast_1d(inp)]): return ooarray([arcsinh(elem) for elem in inp]) if hasStochastic and isinstance(inp, distribution.stochasticDistribution): return distribution.stochasticDistribution(arcsinh(inp.values), inp.probabilities.copy())._update(inp) if not isinstance(inp, oofun): return np.arcsinh(inp) return oofun(st_arcsinh, inp, d = lambda x: Diag(1.0/np.sqrt(1+x**2)), vectorized = True, criticalPoints = False)
def oovars(*args, **kw): if isPyPy: raise FuncDesignerException(''' for PyPy using oovars() is impossible yet. You could use oovar(size=n), also you can create list or tuple of oovars in a cycle, e.g. a = [oovar('a'+str(i)) for i in range(100)] but you should ensure you haven't operations like k*a or a+val in your code, it may work in completely different way (e.g. k*a will produce Python list of k a instances) ''') lb = kw.pop('lb', None) ub = kw.pop('ub', None) if len(args) == 1: if type(args[0]) in (int, int16, int32, int64): r = ooarray([oovar(**kw) for i in range(args[0])]) elif type(args[0]) in [list, tuple]: r = ooarray([oovar(name=args[0][i], **kw) for i in range(len(args[0]))]) elif type(args[0]) == str: r = ooarray([oovar(name=s, **kw) for s in args[0].split()]) else: raise FuncDesignerException('incorrect args number for oovars constructor') else: r = ooarray([oovar(name=args[i], **kw) for i in range(len(args))]) if lb is not None: if np.isscalar(lb) or (isinstance(lb, np.ndarray) and lb.size == 1): for v in r.view(np.ndarray): v.lb = lb else: assert type(lb) in (list, tuple, ndarray) for i, v in enumerate(r): v.lb = lb[i] if ub is not None: if np.isscalar(ub) or (isinstance(ub, np.ndarray) and ub.size == 1): for v in r.view(np.ndarray): v.ub = ub else: assert type(ub) in (list, tuple, ndarray) for i, v in enumerate(r): v.ub = ub[i] r._is_array_of_oovars = True return r
def floor(inp): if isinstance(inp, ooarray) and any([isinstance(elem, oofun) for elem in atleast_1d(inp)]): return ooarray([floor(elem) for elem in inp]) if not isinstance(inp, oofun): return np.floor(inp) r = oofun(lambda x: np.floor(x), inp, vectorized = True) r._D = lambda *args, **kwargs: raise_except('derivative for FD floor is unimplemented yet') r.criticalPoints = False#lambda arg_infinum, arg_supremum: [np.floor(arg_infinum)] return r
def abs(inp): if isinstance(inp, ooarray) and any([isinstance(elem, oofun) for elem in atleast_1d(inp)]): return ooarray([abs(elem) for elem in inp]) if hasStochastic and isinstance(inp, distribution.stochasticDistribution): return distribution.stochasticDistribution(abs(inp.values), inp.probabilities.copy())._update(inp) if not isinstance(inp, oofun): return np.abs(inp) return oofun(st_abs, inp, d = lambda x: Diag(np.sign(x)), vectorized = True, _interval_ = ZeroCriticalPointsInterval(inp, np.abs))
def log2(inp): if isinstance(inp, ooarray) and any([isinstance(elem, oofun) for elem in atleast_1d(inp)]): return ooarray([log2(elem) for elem in inp]) if hasStochastic and isinstance(inp, distribution.stochasticDistribution): return distribution.stochasticDistribution(log2(inp.values), inp.probabilities.copy())._update(inp) if not isinstance(inp, oofun): return np.log2(inp) r = oofun(st_log2, inp, d = lambda x: Diag(INV_LOG_2/x), vectorized = True, _interval_ = log_interval(np.log2, inp)) r.attach((inp>1e-300)('log2_domain_zero_bound_%d' % r._id, tol=-1e-7)) return r
def sin(inp): if isinstance(inp, ooarray) and any([isinstance(elem, oofun) for elem in atleast_1d(inp)]): return ooarray([sin(elem) for elem in inp]) elif hasStochastic and isinstance(inp, distribution.stochasticDistribution): return distribution.stochasticDistribution(sin(inp.values), inp.probabilities.copy())._update(inp) elif not isinstance(inp, oofun): return np.sin(inp) return oofun(st_sin, inp, d = lambda x: Diag(np.cos(x)), vectorized = True, criticalPoints = TrigonometryCriticalPoints)
def sign(inp): if isinstance(inp, ooarray) and any([isinstance(elem, oofun) for elem in atleast_1d(inp)]): return ooarray([sign(elem) for elem in inp]) if hasStochastic and isinstance(inp, distribution.stochasticDistribution): return distribution.stochasticDistribution(sign(inp.values), inp.probabilities.copy())._update(inp) if not isinstance(inp, oofun): return np.sign(inp) r = oofun(st_sign, inp, vectorized = True, d = lambda x: 0.0) r.criticalPoints = False return r
def arccosh(inp): if isinstance(inp, ooarray) and any([isinstance(elem, oofun) for elem in atleast_1d(inp)]): return ooarray([arccosh(elem) for elem in inp]) if hasStochastic and isinstance(inp, distribution.stochasticDistribution): return distribution.stochasticDistribution(arccosh(inp.values), inp.probabilities.copy())._update(inp) if not isinstance(inp, oofun): return np.arccosh(inp) r = oofun(st_arccosh, inp, d = lambda x: Diag(1.0/np.sqrt(x**2-1.0)), vectorized = True) F0, shift = 0.0, 1.0 r._interval_ = lambda domain, dtype: nonnegative_interval(inp, np.arccosh, domain, dtype, F0, shift) return r
def arctanh(inp): if isinstance(inp, ooarray) and any([isinstance(elem, oofun) for elem in atleast_1d(inp)]): return ooarray([arctanh(elem) for elem in inp]) if hasStochastic and isinstance(inp, distribution.stochasticDistribution): return distribution.stochasticDistribution(arctanh(inp.values), inp.probabilities.copy())._update(inp) if not isinstance(inp, oofun): return np.arctanh(inp) r = oofun(st_arctanh, inp, d = lambda x: Diag(1.0/(1.0-x**2)), vectorized = True, criticalPoints = False) r.getDefiniteRange = get_box1_DefiniteRange r._interval_ = lambda domain, dtype: box_1_interval(inp, np.arctanh, domain, dtype, -np.inf, np.inf) return r
def tan(inp): if isinstance(inp, ooarray) and any([isinstance(elem, oofun) for elem in atleast_1d(inp)]): return ooarray([tan(elem) for elem in inp]) if hasStochastic and isinstance(inp, distribution.stochasticDistribution): return distribution.stochasticDistribution(tan(inp.values), inp.probabilities.copy())._update(inp) if not isinstance(inp, oofun): return np.tan(inp) # TODO: move it outside of tan definition def interval(*args): raise 'interval for tan is unimplemented yet' r = oofun(st_tan, inp, d = lambda x: Diag(1.0 / np.cos(x) ** 2), vectorized = True, interval = interval) return r
def arccos(inp): if isinstance(inp, ooarray) and any([isinstance(elem, oofun) for elem in atleast_1d(inp)]): return ooarray([arccos(elem) for elem in inp]) if hasStochastic and isinstance(inp, distribution.stochasticDistribution): return distribution.stochasticDistribution(arccos(inp.values), inp.probabilities.copy())._update(inp) if not isinstance(inp, oofun): return np.arccos(inp) r = oofun(st_arccos, inp, d = lambda x: Diag(-1.0 / np.sqrt(1.0 - x**2)), vectorized = True) r.getDefiniteRange = get_box1_DefiniteRange F_l, F_u = np.arccos((-1, 1)) r._interval_ = lambda domain, dtype: box_1_interval(inp, np.arccos, domain, dtype, F_l, F_u) r.attach((inp>-1)('arccos_domain_lower_bound_%d' % r._id, tol=-1e-7), (inp<1)('arccos_domain_upper_bound_%d' % r._id, tol=-1e-7)) return r
def sqrt(inp, attachConstraints = True): if isinstance(inp, ooarray) and any([isinstance(elem, oofun) for elem in atleast_1d(inp)]): return ooarray([sqrt(elem) for elem in inp]) if hasStochastic and isinstance(inp, distribution.stochasticDistribution): return distribution.stochasticDistribution(sqrt(inp.values), inp.probabilities.copy())._update(inp) if not isinstance(inp, oofun): return np.sqrt(inp) # def fff(x): # print x # return np.sqrt(x) r = oofun(st_sqrt, inp, d = lambda x: Diag(0.5 / np.sqrt(x)), vectorized = True) F0 = 0.0 r._interval_ = lambda domain, dtype: nonnegative_interval(inp, np.sqrt, domain, dtype, F0) if attachConstraints: r.attach((inp>0)('sqrt_domain_zero_bound_%d' % r._id, tol=-1e-7)) return r
def hstack(tup): # overload for oofun[ind] c = [isinstance(t, (oofun, ooarray)) for t in tup] if any([isinstance(t, ooarray) for t in tup]): return ooarray(np.hstack(tup)) if not any(c): return np.hstack(tup) #an_oofun_ind = np.where(c)[0][0] f = lambda *x: np.hstack(x) # def d(*x): # # r = [elem.d(x[i]) if c[i] else None for i, elem in enumerate(tup)] # size = atleast_1d(r[an_oofun_ind]).shape[0] # r2 = [elem if c[i] else Zeros(size) for elem in r] # return r2 #= lambda *x: np.hstack([elem.d(x) if c[i] else elem for elem in tup]) # f = lambda x: x[ind] # def d(x): # Xsize = Len(x) # condBigMatrix = Xsize > 100 # if condBigMatrix and scipyInstalled: # r = SparseMatrixConstructor((1, x.shape[0])) # r[0, ind] = 1.0 # else: # if condBigMatrix and not scipyInstalled: self.pWarn(scipyAbsentMsg) # r = zeros_like(x) # r[ind] = 1 # return r def getOrder(*args, **kwargs): orders = [0]+[inp.getOrder(*args, **kwargs) for inp in tup] return np.max(orders) r = oofun(f, tup, getOrder = getOrder) #!!!!!!!!!!!!!!!!! TODO: sparse def _D(*args, **kwargs): # TODO: rework it, especially if sizes are fixed and known # TODO: get rid of fixedVarsScheduleID sizes = [(t(args[0], fixedVarsScheduleID = kwargs.get('fixedVarsScheduleID', -1)) if c[i] else np.asarray(t)).size for i, t in enumerate(tup)] tmp = [elem._D(*args, **kwargs) if c[i] else None for i, elem in enumerate(tup)] res = {} for v in r._getDep(): Temp = [] for i, t in enumerate(tup): if c[i]: temp = tmp[i].get(v, None) if temp is not None: Temp.append(temp if type(temp) != DiagonalType else temp.resolve(kwargs['useSparse'])) else: # T = next(iter(tmp[i].values())) # sz = T.shape[0] if type(T) == DiagonalType else np.atleast_1d(T).shape[0] Temp.append((Zeros if sizes[i] * np.asarray(args[0][v]).size > 1000 else np.zeros)((sizes[i], np.asarray(args[0][v]).size))) else: sz = np.atleast_1d(t).shape[0] Temp.append(Zeros((sz, 1)) if sz > 100 else np.zeros(sz)) rr = Vstack([elem for elem in Temp]) #print type(rr) res[v] = rr if not isspmatrix(rr) or 0.3 * prod(rr.shape) > rr.size else rr.toarray() #print type(res[v]) return res r._D = _D return r