Пример #1
0
def maximize(params,npts,bounds):

  from mystic.math.measures import split_param
  from mystic.math.discrete import product_measure
  from mystic.math import almostEqual
  from numpy import inf
  atol = 1e-18 # default is 1e-18
  rtol = 1e-7  # default is 1e-7
  target,error = params
  lb,ub = bounds

  # split lower & upper bounds into weight-only & sample-only
  w_lb, x_lb = split_param(lb, npts)
  w_ub, x_ub = split_param(ub, npts)

  # NOTE: rv, lb, ub are of the form:
  #    rv = [wxi]*nx + [xi]*nx + [wyi]*ny + [yi]*ny + [wzi]*nz + [zi]*nz

  # generate primary constraints function
  from mystic import suppressed
  @suppressed(1e-2)
  def constraints(rv):
    c = product_measure()
    c.load(rv, npts)
    # NOTE: bounds wi in [0,1] enforced by filtering
    # impose norm on each discrete measure
    for measure in c:
      if not almostEqual(float(measure.mass), 1.0, tol=atol, rel=rtol):
        measure.normalize()
    # impose expectation on product measure
    ##################### begin function-specific #####################
    E = float(c.expect(model))
    if not (E <= float(target[0] + error[0])) \
    or not (float(target[0] - error[0]) <= E):
#     if debug: print(c)
      c.set_expect((target[0],error[0]), model, (x_lb,x_ub))
    ###################### end function-specific ######################
    # extract weights and positions
    return c.flatten()

  # generate maximizing function
  def cost(rv):
    c = product_measure()
    c.load(rv, npts)
    E = float(c.expect(model))
    if E > (target[0] + error[0]) or E < (target[0] - error[0]):
      if debug: print("skipping expect: %s" % E)
      return inf  #XXX: FORCE TO SATISFY E CONSTRAINTS
    return MINMAX * c.pof(model)

  # maximize
  solved, func_max, func_evals = optimize(cost,(lb,ub),constraints)

  if MINMAX == 1:
    print("func_minimum: %s" % func_max)  # inf
  else:
    print("func_maximum: %s" % func_max)  # sup
  print("func_evals: %s" % func_evals)

  return solved, func_max
Пример #2
0
def maximize(params, npts, bounds):

    from mystic.math.measures import split_param
    from mystic.math.discrete import product_measure
    from mystic.math import almostEqual
    from numpy import inf
    atol = 1e-18  # default is 1e-18
    rtol = 1e-7  # default is 1e-7
    target, error = params
    lb, ub = bounds

    # split lower & upper bounds into weight-only & sample-only
    w_lb, x_lb = split_param(lb, npts)
    w_ub, x_ub = split_param(ub, npts)

    # NOTE: rv, lb, ub are of the form:
    #    rv = [wxi]*nx + [xi]*nx + [wyi]*ny + [yi]*ny + [wzi]*nz + [zi]*nz

    # generate primary constraints function
    from mystic import suppressed

    @suppressed(1e-2)
    def constraints(rv):
        c = product_measure().load(rv, npts)
        # NOTE: bounds wi in [0,1] enforced by filtering
        # impose norm on each discrete measure
        for measure in c:
            if not almostEqual(float(measure.mass), 1.0, tol=atol, rel=rtol):
                measure.normalize()
        # impose expectation on product measure
        ##################### begin function-specific #####################
        E = float(c.expect(model))
        if not (E <= float(target[0] + error[0])) \
        or not (float(target[0] - error[0]) <= E):
            #     if debug: print(c)
            c.set_expect(target[0], model, (x_lb, x_ub), tol=error[0])
        ###################### end function-specific ######################
        # extract weights and positions
        return c.flatten()

    # generate maximizing function
    def cost(rv):
        c = product_measure().load(rv, npts)
        E = float(c.expect(model))
        if E > (target[0] + error[0]) or E < (target[0] - error[0]):
            if debug: print("skipping expect: %s" % E)
            return inf  #XXX: FORCE TO SATISFY E CONSTRAINTS
        return MINMAX * c.pof(model)

    # maximize
    solved, func_max, func_evals = optimize(cost, (lb, ub), constraints)

    if MINMAX == 1:
        print("func_minimum: %s" % func_max)  # inf
    else:
        print("func_maximum: %s" % func_max)  # sup
    print("func_evals: %s" % func_evals)

    return solved, func_max
Пример #3
0
def maximize(params, npts, bounds):

    from mystic.math.measures import split_param
    from mystic.math.discrete import product_measure
    from mystic.math import almostEqual
    from numpy import inf
    atol = 1e-18  # default is 1e-18
    rtol = 1e-7  # default is 1e-7
    target, error = params
    lb, ub = bounds

    # split lower & upper bounds into weight-only & sample-only
    w_lb, x_lb = split_param(lb, npts)
    w_ub, x_ub = split_param(ub, npts)

    # NOTE: rv, lb, ub are of the form:
    #    rv = [wxi]*nx + [xi]*nx + [wyi]*ny + [yi]*ny + [wzi]*nz + [zi]*nz

    # generate secondary constraints function
    def more_constraints(c):  #XXX: move within 'def constraints'?
        ##################### begin function-specific #####################
        #   E = float(c[0].var)  # var(h)
        #   if not (E <= float(target[1] + error[1])) \
        #   or not (float(target[1] - error[1]) <= E):
        #     c[0].var = target[1]

        E = float(c[0].mean)  # mean(h)
        if not (E <= float(target[2] + error[2])) \
        or not (float(target[2] - error[2]) <= E):
            c[0].mean = target[2]

        E = float(c[2].mean)  # mean(v)
        if not (E <= float(target[3] + error[3])) \
        or not (float(target[3] - error[3]) <= E):
            c[2].mean = target[3]
        ###################### end function-specific ######################
        return c

    # generate primary constraints function
    def constraints(rv):
        c = product_measure().load(rv, npts)
        # NOTE: bounds wi in [0,1] enforced by filtering
        # impose norm on each discrete measure
        for measure in c:
            if not almostEqual(float(measure.mass), 1.0, tol=atol, rel=rtol):
                measure.normalize()
        # impose expectation on product measure
        ##################### begin function-specific #####################
        E = float(c.expect(model))
        if not (E <= float(target[0] + error[0])) \
        or not (float(target[0] - error[0]) <= E):
            c.set_expect(target[0],
                         model, (x_lb, x_ub),
                         more_constraints,
                         tol=error[0])

        # c = more_constraints(c) #XXX: impose constraints again (necessary ?)
        ###################### end function-specific ######################
        # extract weights and positions
        return c.flatten()

    # generate maximizing function
    def cost(rv):
        c = product_measure().load(rv, npts)
        #XXX: apply 'filters' to catch errors in constraints solver (necessary ?)
        ##################### begin function-specific #####################
        E = float(c.expect(model))
        if E > (target[0] + error[0]) or E < (target[0] - error[0]):
            if debug: print("skipping expect: %s" % E)
            return inf  #XXX: FORCE TO SATISFY E CONSTRAINTS

#   E = float(c[0].var)  # var(h)
#   if E > (target[1] + error[1]) or E < (target[1] - error[1]):
#     if debug: print("skipping variance: %s" % E)
#     return inf  #XXX: FORCE TO SATISFY E CONSTRAINTS

        E = float(c[0].mean)  # mean(h)
        if E > (target[2] + error[2]) or E < (target[2] - error[2]):
            if debug: print("skipping expect: %s" % E)
            return inf  #XXX: FORCE TO SATISFY E CONSTRAINTS

        E = float(c[2].mean)  # mean(v)
        if E > (target[3] + error[3]) or E < (target[3] - error[3]):
            if debug: print("skipping expect: %s" % E)
            return inf  #XXX: FORCE TO SATISFY E CONSTRAINTS
        ###################### end function-specific ######################
        return MINMAX * c.pof(model)

    # maximize
    solved, func_max, func_evals = optimize(cost, (lb, ub), constraints)

    if MINMAX == 1:
        print("func_minimum: %s" % func_max)  # inf
    else:
        print("func_maximum: %s" % func_max)  # sup
    print("func_evals: %s" % func_evals)

    return solved, func_max
def maximize(params,npts,bounds):

  from mystic.math.measures import split_param
  from mystic.math.discrete import product_measure
  from mystic.math import almostEqual
  from numpy import inf
  atol = 1e-18 # default is 1e-18
  rtol = 1e-7  # default is 1e-7
  target,error = params
  lb,ub = bounds

  # split lower & upper bounds into weight-only & sample-only
  w_lb, x_lb = split_param(lb, npts)
  w_ub, x_ub = split_param(ub, npts)

  # NOTE: rv, lb, ub are of the form:
  #    rv = [wxi]*nx + [xi]*nx + [wyi]*ny + [yi]*ny + [wzi]*nz + [zi]*nz

  # generate secondary constraints function
  def more_constraints(c): #XXX: move within 'def constraints'?
    ##################### begin function-specific #####################
#   E = float(c[0].var)  # var(h)
#   if not (E <= float(target[1] + error[1])) \
#   or not (float(target[1] - error[1]) <= E):
#     c[0].var = target[1]

    E = float(c[0].mean)  # mean(h)
    if not (E <= float(target[2] + error[2])) \
    or not (float(target[2] - error[2]) <= E):
      c[0].mean = target[2]

    E = float(c[2].mean)  # mean(v)
    if not (E <= float(target[3] + error[3])) \
    or not (float(target[3] - error[3]) <= E):
      c[2].mean = target[3]
    ###################### end function-specific ######################
    return c

  # generate primary constraints function
  def constraints(rv):
    c = product_measure().load(rv, npts)
    # NOTE: bounds wi in [0,1] enforced by filtering
    # impose norm on each discrete measure
    for measure in c:
      if not almostEqual(float(measure.mass), 1.0, tol=atol, rel=rtol):
        measure.normalize()
    # impose expectation on product measure
    ##################### begin function-specific #####################
    E = float(c.expect(model))
    if not (E <= float(target[0] + error[0])) \
    or not (float(target[0] - error[0]) <= E):
      c.set_expect(target[0],model,(x_lb,x_ub), more_constraints, tol=error[0])

    # c = more_constraints(c) #XXX: impose constraints again (necessary ?)
    ###################### end function-specific ######################
    # extract weights and positions
    return c.flatten()

  # generate maximizing function
  def cost(rv):
    c = product_measure().load(rv, npts)
    #XXX: apply 'filters' to catch errors in constraints solver (necessary ?)
    ##################### begin function-specific #####################
    E = float(c.expect(model))
    if E > (target[0] + error[0]) or E < (target[0] - error[0]):
      if debug: print("skipping expect: %s" % E)
      return inf  #XXX: FORCE TO SATISFY E CONSTRAINTS

#   E = float(c[0].var)  # var(h)
#   if E > (target[1] + error[1]) or E < (target[1] - error[1]):
#     if debug: print("skipping variance: %s" % E)
#     return inf  #XXX: FORCE TO SATISFY E CONSTRAINTS

    E = float(c[0].mean)  # mean(h)
    if E > (target[2] + error[2]) or E < (target[2] - error[2]):
      if debug: print("skipping expect: %s" % E)
      return inf  #XXX: FORCE TO SATISFY E CONSTRAINTS

    E = float(c[2].mean)  # mean(v)
    if E > (target[3] + error[3]) or E < (target[3] - error[3]):
      if debug: print("skipping expect: %s" % E)
      return inf  #XXX: FORCE TO SATISFY E CONSTRAINTS
    ###################### end function-specific ######################
    return MINMAX * c.pof(model)

  # maximize
  solved, func_max, func_evals = optimize(cost,(lb,ub),constraints)

  if MINMAX == 1:
    print("func_minimum: %s" % func_max)  # inf
  else:
    print("func_maximum: %s" % func_max)  # sup
  print("func_evals: %s" % func_evals)

  return solved, func_max