Example #1
0
File: util.py Project: wyolum/mmM
def optimize__test__():
    assert numpy.abs(optimize([1,-2, 1], -4) - 1) < 1e-4
    assert numpy.abs(optimize([0, 0, 1], -4) - 0) < 1e-4
    poly = [-1, 1, -1]
    dydx = poly_der(poly)
    x = optimize(poly,  2.2)
    assert abs(poly_eval(dydx, x)) < 1e-4
Example #2
0
 def doStep(w_p):
     
     n_tot = x.itervalues().next().shape[1]
     idx_minibatch = np.random.randint(0, n_tot, n_batch)
     x_minibatch = {i:x[i][:,idx_minibatch] for i in x}
     
     def optimize(w, gw, gw_ss, stepsize):
         if do_adagrad:
             for i in gw:
                 gw_ss[i] += gw[i]**2
                 if nsteps[0] > warmup:
                     w[i] += stepsize / np.sqrt(gw_ss[i]+reg) * gw[i]
                 #print (stepsize / np.sqrt(gw_ss[i]+reg)).mean()
         else:
             for i in gw:
                 w[i] += 1e-4 * gw[i]
     
     # Wake phase: use z ~ q(z|x) to update model_p
     _, z, _  = model_q.gen_xz(w_q, x_minibatch, {}, n_batch)
     _, logpz_q = model_q.logpxz(w_q, x_minibatch, z)
     logpx_p, logpz_p, gw_p, gz_p = model_p.dlogpxz_dwz(w_p, x_minibatch, z)
     _, gw_prior = model_p.dlogpw_dw(w_p)
     gw = {i: gw_p[i] + float(n_batch)/n_tot * gw_prior[i] for i in gw_p}
     optimize(w_p, gw, gw_p_ss, ada_stepsize)
     
     # Sleep phase: use x ~ p(x|z) to update model_q
     x_p, z_p, _ = model_p.gen_xz(w_p, {}, {}, n_batch)
     _, _, gw_q, _ = model_q.dlogpxz_dwz(w_q, x_p, z_p)
     _, gw_prior = model_q.dlogpw_dw(w_q)
     gw = {i: gw_q[i] + float(n_batch)/n_tot * gw_prior[i] for i in gw_q}
     optimize(w_q, gw, gw_q_ss, ada_stepsize)
     
     nsteps[0] += 1
     
     return z.copy(), logpx_p + logpz_p - logpz_q
Example #3
0
    def doStep(w_p):

        n_tot = x.itervalues().next().shape[1]
        idx_minibatch = np.random.randint(0, n_tot, n_batch)
        x_minibatch = {i: x[i][:, idx_minibatch] for i in x}

        def optimize(w, gw, gw_ss, stepsize):
            if do_adagrad:
                for i in gw:
                    gw_ss[i] += gw[i]**2
                    if nsteps[0] > warmup:
                        w[i] += stepsize / np.sqrt(gw_ss[i] + reg) * gw[i]
                    #print (stepsize / np.sqrt(gw_ss[i]+reg)).mean()
            else:
                for i in gw:
                    w[i] += 1e-4 * gw[i]

        # Wake phase: use z ~ q(z|x) to update model_p
        _, z, _ = model_q.gen_xz(w_q, x_minibatch, {}, n_batch)
        _, logpz_q = model_q.logpxz(w_q, x_minibatch, z)
        logpx_p, logpz_p, gw_p, gz_p = model_p.dlogpxz_dwz(w_p, x_minibatch, z)
        _, gw_prior = model_p.dlogpw_dw(w_p)
        gw = {i: gw_p[i] + float(n_batch) / n_tot * gw_prior[i] for i in gw_p}
        optimize(w_p, gw, gw_p_ss, ada_stepsize)

        # Sleep phase: use x ~ p(x|z) to update model_q
        x_p, z_p, _ = model_p.gen_xz(w_p, {}, {}, n_batch)
        _, _, gw_q, _ = model_q.dlogpxz_dwz(w_q, x_p, z_p)
        _, gw_prior = model_q.dlogpw_dw(w_q)
        gw = {i: gw_q[i] + float(n_batch) / n_tot * gw_prior[i] for i in gw_q}
        optimize(w_q, gw, gw_q_ss, ada_stepsize)

        nsteps[0] += 1

        return z.copy(), logpx_p + logpz_p - logpz_q
Example #4
0
File: util.py Project: wyolum/mmM
def optimize__test__():
    assert numpy.abs(optimize([1, -2, 1], -4) - 1) < 1e-4
    assert numpy.abs(optimize([0, 0, 1], -4) - 0) < 1e-4
    poly = [-1, 1, -1]
    dydx = poly_der(poly)
    x = optimize(poly, 2.2)
    assert abs(poly_eval(dydx, x)) < 1e-4
Example #5
0
    def doStep(w):

        grad = ndict.cloneZeros(v)
        gw = ndict.cloneZeros(w)

        for l in range(n_batch):
            n_tot = x.itervalues().next().shape[1]
            idx_minibatch = np.random.randint(0, n_tot, n_subbatch)
            x_minibatch = {i: x[i][:, idx_minibatch] for i in x}
            if convertImgs:
                x_minibatch = {i: x_minibatch[i] / 256. for i in x_minibatch}

            # Use z ~ q(z|x) to compute d[LB]/d[gw]
            _, z, _ = model_q.gen_xz(v, x_minibatch, {}, n_subbatch)
            _, logpz_q = model_q.logpxz(v, x_minibatch, z)
            logpx_p, logpz_p, _gw, gz_p = model_p.dlogpxz_dwz(
                w, x_minibatch, z)
            for i in _gw:
                gw[i] += _gw[i]

            # Compute d[LB]/d[gv]  where gv = v (variational params)
            _, _, gv, _ = model_q.dlogpxz_dwz(v, x_minibatch, z)
            weight = np.sum(logpx_p) + np.sum(logpz_p) - np.sum(logpz_q)

            for i in v:
                f = gv[i] * weight
                h = gv[i]
                cv_cov[i] = cv_cov[i] + cv_lr * (f * h - cv_cov[i])
                cv_var[i] = cv_var[i] + cv_lr * (h**2 - cv_var[i])
                grad[i] += f - (cv_cov[i] / (cv_var[i] + 1e-8)) * h

        _, gwprior = model_p.dlogpw_dw(w)
        for i in gw:
            gw[i] += float(n_subbatch * n_batch) / n_tot * gwprior[i]

        def optimize(_w, _gw, gw_ss, stepsize):
            reg = 1e-8
            for i in _gw:
                gw_ss[i] += _gw[i]**2
                if nsteps[0] > warmup:
                    _w[i] += stepsize / np.sqrt(gw_ss[i] + reg) * _gw[i]

        optimize(w, gw, gw_ss, ada_stepsize)
        optimize(v, grad, gv_ss, ada_stepsize)

        nsteps[0] += 1

        if ndict.hasNaN(grad):
            raise Exception()
        if ndict.hasNaN(v):
            raise Exception()

        return z.copy(), logpx_p + logpz_p - logpz_q
Example #6
0
    def doStep(w):
        
        grad = ndict.cloneZeros(v)
        gw = ndict.cloneZeros(w)

        for l in range(n_batch):
            n_tot = x.itervalues().next().shape[1]
            idx_minibatch = np.random.randint(0, n_tot, n_subbatch)
            x_minibatch = {i:x[i][:,idx_minibatch] for i in x}
            if convertImgs: x_minibatch = {i:x_minibatch[i]/256. for i in x_minibatch}
            
            # Use z ~ q(z|x) to compute d[LB]/d[gw]
            _, z, _  = model_q.gen_xz(v, x_minibatch, {}, n_subbatch)
            _, logpz_q = model_q.logpxz(v, x_minibatch, z)
            logpx_p, logpz_p, _gw, gz_p = model_p.dlogpxz_dwz(w, x_minibatch, z)
            for i in _gw: gw[i] += _gw[i]
            
            # Compute d[LB]/d[gv]  where gv = v (variational params)
            _, _, gv, _ = model_q.dlogpxz_dwz(v, x_minibatch, z)
            weight = np.sum(logpx_p) + np.sum(logpz_p) - np.sum(logpz_q)
            
            for i in v:
                f = gv[i] * weight
                h = gv[i]
                cv_cov[i] = cv_cov[i] + cv_lr * (f * h - cv_cov[i])
                cv_var[i] = cv_var[i] + cv_lr * (h**2 - cv_var[i])
                grad[i] += f - (cv_cov[i]/(cv_var[i] + 1e-8)) * h
        
        _, gwprior = model_p.dlogpw_dw(w)
        for i in gw: gw[i] += float(n_subbatch*n_batch)/n_tot * gwprior[i]

        def optimize(_w, _gw, gw_ss, stepsize):
            reg=1e-8
            for i in _gw:
                gw_ss[i] += _gw[i]**2
                if nsteps[0] > warmup:
                    _w[i] += stepsize / np.sqrt(gw_ss[i]+reg) * _gw[i]

        optimize(w, gw, gw_ss, ada_stepsize)
        optimize(v, grad, gv_ss, ada_stepsize)
        
        nsteps[0] += 1
        
        if ndict.hasNaN(grad):
            raise Exception()
        if ndict.hasNaN(v):
            raise Exception()
        
        return z.copy(), logpx_p + logpz_p - logpz_q
Example #7
0
    def doStep(w_p):

        #def fgrad(_z):
        #    logpx, logpz, gw, gz = model_p.dlogpxz_dwz(w, x, _z)
        #    return logpx + logpz, gz
        n_tot = x.itervalues().next().shape[1]
        idx_minibatch = np.random.randint(0, n_tot, n_batch)
        x_minibatch = {i: x[i][:, idx_minibatch] for i in x}
        if convertImgs:
            x_minibatch = {i: x_minibatch[i] / 256. for i in x_minibatch}

        # step 1A: sample z ~ p(z|x) from model_q
        _, z, _ = model_q.gen_xz(w_q, x_minibatch, {}, n_batch)

        # step 1B: update z using HMC
        def fgrad(_z):
            logpx, logpz, gw, gz = model_p.dlogpxz_dwz(w_p, _z, x_minibatch)
            return logpx + logpz, gz

        if (hmc_steps > 0):
            logpxz, _, _ = hmc_dostep(fgrad, z)

        def optimize(w, gw, gw_ss, stepsize):
            if do_adagrad:
                for i in gw:
                    gw_ss[i] += gw[i]**2
                    if nsteps[0] > warmup:
                        w[i] += stepsize / np.sqrt(gw_ss[i] + reg) * gw[i]
                    #print (stepsize / np.sqrt(gw_ss[i]+reg)).mean()
            else:
                for i in gw:
                    w[i] += 1e-4 * gw[i]

        # step 2: use z to update model_p
        logpx_p, logpz_p, gw_p, gz_p = model_p.dlogpxz_dwz(w_p, x_minibatch, z)
        _, gw_prior = model_p.dlogpw_dw(w_p)
        gw = {i: gw_p[i] + float(n_batch) / n_tot * gw_prior[i] for i in gw_p}
        optimize(w_p, gw, gw_p_ss, ada_stepsize)

        # step 3: use gradients of model_p to update model_q
        _, logpz_q, fd, gw_q = model_q.dfd_dw(w_q, x_minibatch, z, gz_p)
        _, gw_prior = model_q.dlogpw_dw(w_q)
        gw = {i: -gw_q[i] + float(n_batch) / n_tot * gw_prior[i] for i in gw_q}
        optimize(w_q, gw, gw_q_ss, ada_stepsize)

        nsteps[0] += 1

        return z.copy(), logpx_p + logpz_p - logpz_q
def optimizer(demand):
    # returns optimized order schedule + order_POD schedule
    def optimize_me(orders):
        pass
    
    orders = scipy.optimize(optimize_me, guess, bounds)
    pass
Example #9
0
def find_min(phys_params):
    
    Free_en = lambda var_s: Free_energy(var_s, phys_params, coefficients)

    optimize = lambda function, variable: minimize(function, variable, method='CG',
                                                        options={'gtol': 1e-12, 'disp': False, 'maxiter': 50000})
    
    # precalculations
    coefficients = get_prefactors(phys_params)
    
    # simulated annealing
    Energy_no_mem, solution_no_mem = init(phys_params, coefficients)
    
    # extra optimization
    num_of_attempts = 5 
    
    Energy = np.zeros([num_of_attempts])
    Solution = np.zeros([num_of_attempts, len(solution_no_mem)])
    
    # generate a bunch of initial conditions and minimize
    for i in range(num_of_attempts):
        try_min = optimize(Free_en, solution_no_mem + np.random.rand(len(solution_no_mem))/10 )
        Energy[i] = try_min.fun
        Solution[i,:] = try_min.x
    
    i_min = np.argmin(Energy)
    min_energy = Energy[i_min]
    min_sol = Solution[i_min]
    
    return min_energy, min_sol
Example #10
0
        def optimize_smaller(solve_smaller_factor, large_arr, large_Y0,
                             large_img_spatial_static_target):
            ## Terminate recursion if the image is too small.
            if large_arr.shape[
                    0] // solve_smaller_factor < too_small or large_arr.shape[
                        1] // solve_smaller_factor < too_small:
                return large_Y0

            ## small_arr = downsample( large_arr )
            small_arr = large_arr[::solve_smaller_factor, ::
                                  solve_smaller_factor]
            ## small_Y0 = downsample( large_Y0 )
            small_Y0 = large_Y0.reshape(
                large_arr.shape[0], large_arr.shape[1],
                -1)[::solve_smaller_factor, ::solve_smaller_factor].ravel()
            ## small_img_spatial_static_target = downsample( large_img_spatial_static_target )
            small_img_spatial_static_target = None
            if large_img_spatial_static_target is not None:
                small_img_spatial_static_target = large_img_spatial_static_target.reshape(
                    arr.shape[0], arr.shape[1],
                    -1)[::solve_smaller_factor, ::solve_smaller_factor].ravel(
                    )

            ## get an improved Y by recursively shrinking
            small_Y1 = optimize_smaller(solve_smaller_factor, small_arr,
                                        small_Y0,
                                        small_img_spatial_static_target)

            ## solve on the downsampled problem
            print '==> Optimizing on a smaller image:', small_arr.shape, 'instead of', large_arr.shape
            reset_saver(small_arr.shape)
            small_Y = optimize(
                small_arr,
                colors,
                small_Y1,
                weights,
                img_spatial_static_target=small_img_spatial_static_target,
                saver=saver)

            ## save the intermediate solution.
            saver(small_Y)

            ## large_Y1 = upsample( small_Y )
            ### 1 Make a copy
            large_Y1 = array(large_Y0).reshape(large_arr.shape[0],
                                               large_arr.shape[1], -1)
            ### 2 Fill in as much as will fit using numpy.repeat()
            small_Y = small_Y.reshape(small_arr.shape[0], small_arr.shape[1],
                                      -1)
            small_Y_upsampled = repeat(
                repeat(small_Y, solve_smaller_factor, 0), solve_smaller_factor,
                1)
            large_Y1[:, :] = small_Y_upsampled[:large_Y1.shape[0], :large_Y1.
                                               shape[1]]
            # large_Y1[ :small_Y.shape[0]*solve_smaller_factor, :small_Y.shape[1]*solve_smaller_factor ] = repeat( repeat( small_Y, solve_smaller_factor, 0 ), solve_smaller_factor, 1 )
            ### 3 The right and bottom edges may have been missed due to rounding
            # large_Y1[ small_Y.shape[0]*solve_smaller_factor:, : ] = large_Y1[ small_Y.shape[0]*solve_smaller_factor - 1 : small_Y.shape[0]*solve_smaller_factor, : ]
            # large_Y1[ :, small_Y.shape[1]*solve_smaller_factor: ] = large_Y1[ :, small_Y.shape[1]*solve_smaller_factor - 1 : small_Y.shape[1]*solve_smaller_factor ]

            return large_Y1.ravel()
def optimizer(demand):
    # returns optimized order schedule + order_POD schedule
    def optimize_me(orders):
        pass

    orders = scipy.optimize(optimize_me, guess, bounds)
    pass
Example #12
0
    def doStep(w_p):
        
        #def fgrad(_z):
        #    logpx, logpz, gw, gz = model_p.dlogpxz_dwz(w, x, _z)
        #    return logpx + logpz, gz
        n_tot = x.itervalues().next().shape[1]
        idx_minibatch = np.random.randint(0, n_tot, n_batch)
        x_minibatch = {i:x[i][:,idx_minibatch] for i in x}
        if convertImgs: x_minibatch = {i:x_minibatch[i]/256. for i in x_minibatch}
            
        # step 1A: sample z ~ p(z|x) from model_q
        _, z, _  = model_q.gen_xz(w_q, x_minibatch, {}, n_batch)
        
        # step 1B: update z using HMC
        def fgrad(_z):
            logpx, logpz, gw, gz = model_p.dlogpxz_dwz(w_p, _z, x_minibatch)
            return logpx + logpz, gz
        if (hmc_steps > 0):
            logpxz, _, _ = hmc_dostep(fgrad, z)

        def optimize(w, gw, gw_ss, stepsize):
            if do_adagrad:
                for i in gw:
                    gw_ss[i] += gw[i]**2
                    if nsteps[0] > warmup:
                        w[i] += stepsize / np.sqrt(gw_ss[i]+reg) * gw[i]
                    #print (stepsize / np.sqrt(gw_ss[i]+reg)).mean()
            else:
                for i in gw:
                    w[i] += 1e-4 * gw[i]
        
        # step 2: use z to update model_p
        logpx_p, logpz_p, gw_p, gz_p = model_p.dlogpxz_dwz(w_p, x_minibatch, z)
        _, gw_prior = model_p.dlogpw_dw(w_p)
        gw = {i: gw_p[i] + float(n_batch)/n_tot * gw_prior[i] for i in gw_p}
        optimize(w_p, gw, gw_p_ss, ada_stepsize)
        
        # step 3: use gradients of model_p to update model_q
        _, logpz_q, fd, gw_q = model_q.dfd_dw(w_q, x_minibatch, z, gz_p)
        _, gw_prior = model_q.dlogpw_dw(w_q)
        gw = {i: -gw_q[i] + float(n_batch)/n_tot * gw_prior[i] for i in gw_q}
        optimize(w_q, gw, gw_q_ss, ada_stepsize)
        
        nsteps[0] += 1
        
        return z.copy(), logpx_p + logpz_p - logpz_q
Example #13
0
    def doStep(w):

        n_tot = x.itervalues().next().shape[1]
        idx_minibatch = np.random.randint(0, n_tot, n_batch)
        x_minibatch = {i: x[i][:, idx_minibatch] for i in x}
        if convertImgs:
            x_minibatch = {i: x_minibatch[i] / 256. for i in x_minibatch}

        def optimize(w, gw, gw_ss, stepsize):
            if do_adagrad:
                for i in gw:
                    gw_ss[i] += gw[i]**2
                    if nsteps[0] > warmup:
                        w[i] += stepsize / np.sqrt(gw_ss[i] + reg) * gw[i]
                    #print (stepsize / np.sqrt(gw_ss[i]+reg)).mean()
            else:
                for i in gw:
                    w[i] += 1e-4 * gw[i]

        # Phase 1: use z ~ q(z|x) to update model_p
        _, z, _ = model_q.gen_xz(v, x_minibatch, {}, n_batch)
        _, logpz_q = model_q.logpxz(v, x_minibatch, z)
        logpx_p, logpz_p, gw, _ = model_p.dlogpxz_dwz(w, x_minibatch, z)
        _, gw_prior = model_p.dlogpw_dw(w)
        gw = {i: gw[i] + float(n_batch) / n_tot * gw_prior[i] for i in gw}

        # Phase 2: use x ~ p(x|z) to update model_q
        _, _, gv, _ = model_q.dlogpxz_dwz(v, x_minibatch, z)
        #_, gw_prior = model_q.dlogpw_dw(w_q)
        #gw_q = {i: gw_q[i] + float(n_batch)/n_tot * gw_prior[i] for i in gw_q}
        weight = np.sum(logpx_p) + np.sum(logpz_p) - np.sum(logpz_q) - float(
            n_batch)
        gv = {i: gv[i] * weight for i in gv}

        optimize(w, gw, gw_ss, ada_stepsize)
        optimize(v, gv, gv_ss, ada_stepsize)

        nsteps[0] += 1

        return z.copy(), logpx_p + logpz_p - logpz_q
def main():
    parser = build_parser()
    options = parser.parse_args()  #上面构建完就需要解析
    check_opts(options)  #检查

    #获取图片
    style_target = get_img(options.style)
    #判断是否为训练和测试
    if not options.slow:
        content_targets = _get_files(options.train_path)
    elif options.test:
        content_targets = [options.test]

    kwargs = {
        "slow": options.slow,
        "epochs": options.epochs,
        "print_iterations": options.checkpoint_iterations,
        "batch_size": options.batch_size,
        "save_path": os.path.join(options.checkpoint_dir, 'fns.ckpt'),
        "learning_rate": options.learning_rate
    }

    if options.slow:
        if options.epochs < 10:
            kwargs['epochs'] = 1000
        if options.learning_rate < 1:
            kwargs['learning_rate'] = 1e1

    args = [
        content_targets, style_target, options.content_weight,
        options.style_weight, options.tv_weight, options.vgg_path
    ]

    for preds, losses, i, epoch in optimize(*args, **kwargs):
        style_loss, content_loss, tv_loss, loss = losses

        print('Epoch %d, Iteration: %d, Loss: %s' % (epoch, i, loss))
        to_print = (style_loss, content_loss, tv_loss)
        print('style: %s, content:%s, tv: %s' % to_print)
        if options.test:
            assert options.test_dir != False
            preds_path = '%s/%s_%s.png' % (options.test_dir, epoch, i)
            if not options.slow:
                ckpt_dir = os.path.dirname(options.checkpoint_dir)
                evaluate.ffwd_to_img(options.test, preds_path,
                                     options.checkpoint_dir)
            else:
                save_img(preds_path, img)
    ckpt_dir = options.checkpoint_dir
    cmd_text = 'python evaluate.py --checkpoint %s ...' % ckpt_dir
    print("Training complete. For evaluation:\n    `%s`" % cmd_text)
Example #15
0
 def doStep(w):
     
     n_tot = x.itervalues().next().shape[1]
     idx_minibatch = np.random.randint(0, n_tot, n_batch)
     x_minibatch = {i:x[i][:,idx_minibatch] for i in x}
     if convertImgs: x_minibatch = {i:x_minibatch[i]/256. for i in x_minibatch}
     
     def optimize(w, gw, gw_ss, stepsize):
         if do_adagrad:
             for i in gw:
                 gw_ss[i] += gw[i]**2
                 if nsteps[0] > warmup:
                     w[i] += stepsize / np.sqrt(gw_ss[i]+reg) * gw[i]
                 #print (stepsize / np.sqrt(gw_ss[i]+reg)).mean()
         else:
             for i in gw:
                 w[i] += 1e-4 * gw[i]
     
     # Phase 1: use z ~ q(z|x) to update model_p
     _, z, _  = model_q.gen_xz(v, x_minibatch, {}, n_batch)
     _, logpz_q = model_q.logpxz(v, x_minibatch, z)
     logpx_p, logpz_p, gw, _ = model_p.dlogpxz_dwz(w, x_minibatch, z)
     _, gw_prior = model_p.dlogpw_dw(w)
     gw = {i: gw[i] + float(n_batch)/n_tot * gw_prior[i] for i in gw}
     
     # Phase 2: use x ~ p(x|z) to update model_q
     _, _, gv, _ = model_q.dlogpxz_dwz(v, x_minibatch, z)
     #_, gw_prior = model_q.dlogpw_dw(w_q)
     #gw_q = {i: gw_q[i] + float(n_batch)/n_tot * gw_prior[i] for i in gw_q}
     weight = np.sum(logpx_p) + np.sum(logpz_p) - np.sum(logpz_q) - float(n_batch)
     gv = {i: gv[i] * weight for i in gv}
     
     optimize(w, gw, gw_ss, ada_stepsize)
     optimize(v, gv, gv_ss, ada_stepsize)
     
     nsteps[0] += 1
     
     return z.copy(), logpx_p + logpz_p - logpz_q
Example #16
0
def is_feasible(model):

    constraints = []
    for symbol, value in model.iteritems():
        if isinstance(symbol, Constraint):
            if value:
                constraints.append(symbol)
            else:
                if isinstance(symbol, EQ):
                    raise ValueError(
                        'Something went wrong. The SAT solver should not assign False to EQ constraints. Encountered model {}.'
                        .format(model))
                constraints.append(~symbol)
    if len(constraints) == 0:
        return True

    return optimize(*constraints).status != 2
Example #17
0
def wrapper_function(image):
    bestguess = [0, 0, 0, 0]
    bestEnt = 0
    bestArea = 0
    for i in range(20):
        guess = [0, 0, 0, 0]

        guess[0] = np.random.choice(range(255))
        guess[1] = np.random.choice(range(255))
        guess[2] = np.random.choice(range(256, 511))
        guess[3] = np.random.choice(range(256, 511))
        tempguess, tempEnt, area = optimize(guess, image)
        if (tempEnt > bestEnt):
            init = guess
            bestEnt = tempEnt
            bestguess = tempguess
            bestArea = area
    return bestArea, bestguess
    def test_optimizer(self):
        """Generate random orthogonal CVs and a random separating surface.

        Accept CVs closer to the generated surface with a normally distributed
        probability, centered on the surface. Confirm that the optimizer is able
        to find this underlying surface a minimum. All CVs considered are
        significant.
        """
        np.random.seed(2)
        std = 0.3

        for i in range(5):
            n_states = np.random.choice(range(1000, 5000))
            m_colvars = np.random.choice(range(2, 4))

            # Use all CVs as significant for this test
            cvs, is_accepted, _, comparable_surf = _generate_test(n_states,
                                                                  m_colvars,
                                                                  m_colvars,
                                                                  std)

            sol = optimize(cvs, is_accepted, use_jac=True)[1]
            final_jac = obj_func(sol, cvs, is_accepted, True)[1]

            # Check that derivatives are close to 0, i.e. at a minimum
            # The p0 jacobian can be non zero because its bounded.
            self.assertFalse(np.any(np.abs(final_jac[1:]) > 1e-3),
                             msg=f"{n_states} states, {m_colvars} colvars failed"
                                 f" to have a 0 jacobian: {final_jac} (first entry okay)")

            # Both the constant offset and vector need to be normalized by the
            # distance of the vector to compare
            comparable_opt = sol[1:] / np.linalg.norm(sol[2:])

            # Compare that all values are close
            self.assertTrue(np.allclose(comparable_surf, comparable_opt,
                                        rtol=0.1, atol=1e-2),
                            msg=f"{n_states} states, {m_colvars} colvars failed, "
                                f"true: {comparable_surf}, actual: {comparable_opt}")
Example #19
0
def NNLS(Y, A):
    # ============================================ #
    # this NNLS from DSP package is different from #
    # that from NMF package.    NMF.NNLS returns 2 #
    # matrices namely A and S which is a complete  #
    # result of the NMF process NNLS from DSP      #
    # packages performs a 1-time NNLS process such #
    # that people can break down large scale of    #
    # matrix factorization into many small pieces  #
    # ============================================ #
    #
    # mathematical model : Y = A * S
    # S <- DSP.NNLS( || Y - A*S || )
    #
    # =============================================== #
    # for a mass data Y, when memory is insufficient, #
    # the following breakdown can solve this issue    #
    # S[k] = DSP.NNLS( Y[k] , A_init , S_init[k] )    #
    #                                                 #
    # if A is to be updated instead,                  #
    # reform the input such that the model shifts to  #
    # A' <- DSP.NNLS( || Y' - S'*A' || )              #
    # =============================================== #
    if Y.shape[0] != A.shape[0] or Y.shape[1] != A.shape[1]:
        print "Error @ DSP.NNLS() : dimensimismatch !!!"
        print "Press any key to stop ..."
        raw_input()
        err
        return -1
        # ===================== #
        # initialize modelOrder #
        # ===================== #
    modelOrder = A.shape[1]
    for j in range(0, Y.shape[1]):
        S = scipy.optimize(A, Y)[0]
    return S
Example #20
0
def NNLS(Y, A):
    # ============================================ #
    # this NNLS from DSP package is different from #
    # that from NMF package.    NMF.NNLS returns 2 #
    # matrices namely A and S which is a complete  #
    # result of the NMF process NNLS from DSP      #
    # packages performs a 1-time NNLS process such #
    # that people can break down large scale of    #
    # matrix factorization into many small pieces  #
    # ============================================ #
    #
    # mathematical model : Y = A * S
    # S <- DSP.NNLS( || Y - A*S || )
    #
    # =============================================== #
    # for a mass data Y, when memory is insufficient, #
    # the following breakdown can solve this issue    #
    # S[k] = DSP.NNLS( Y[k] , A_init , S_init[k] )    #
    #                                                 #
    # if A is to be updated instead,                  #
    # reform the input such that the model shifts to  #
    # A' <- DSP.NNLS( || Y' - S'*A' || )              #
    # =============================================== #
    if Y.shape[0] != A.shape[0] or Y.shape[1] != A.shape[1]:
        print "Error @ DSP.NNLS() : dimensimismatch !!!"
        print "Press any key to stop ..."
        raw_input()
        err
        return -1
    # ===================== #
    # initialize modelOrder #
    # ===================== #
    modelOrder = A.shape[1]
    for j in range(0, Y.shape[1]):
        S = scipy.optimize(A, Y)[0]
    return S
def finish_calibration(calib):
    mask = table_calibration.make_mask(config.bg['boundpts'])

    # Construct the homography from the camera to the XZ plane
    Hi = np.dot(config.bg['Ktable'], config.bg['KK'])
    Hi = np.linalg.inv(Hi)
    Hi = Hi[(0,1,3),:][:,(0,2,3)]

    L_table = []
    L_image = []
    for i, (L, rgb, depth) in enumerate(calib):
        Li = optimize(rgb)
        L_table.append(L)
        L_image.append(Li)
    L_table, L_image = map(np.array, (L_table, L_image))

    # We need to solve for M.

    #  P_image = Hi * M * P_table
    #  L_table * P_table = 0 when P_table is on L.
    #  L_image * P_image = 0 when P_image is on L.

    # So,
    #  L_image = L_table * np.linalg.inv(Hi * M)
    #  L_table = L_image * Hi * M
    
    # At this point, we could solve by constructing an Ax = 0 and
    # computing SVD (also known as the DLT method)

    # But since we know we're only looking for rigid transformations,
    # we can solve for rotation and translation separately.

    # To solve for rotation, lets look at the vanishing points.
    normalize2 = lambda x: x / np.sqrt((x[:2]**2).sum())

    V_table = np.array([normalize2(x[:2]) for x in L_table])
    V_image = np.array([normalize2(x[:2]) for x in np.dot(L_image, Hi)])

    V = np.dot(V_table.T, V_image)
    assert V.shape == (2,2)

    u,s,v = np.linalg.svd(V)
    R = np.dot(v,u.T)
    within_eps = lambda a, b: np.all(np.abs(a-b) < 1e-2)
    assert within_eps(np.dot(R, V_table.T), V_image.T)

    R_ = np.eye(3)
    R_[:2,:2] = R

    # Now we need to solve for the two translation parameters.
    # First lets normalize the lines so that the C's are the same.

    L_i = np.array(map(normalize2, np.dot(L_image, np.dot(Hi, R_))))
    L_t = np.array(map(normalize2, np.dot(L_table, np.eye(3))))

    A = L_i[:,:2]
    b = L_t[:,2] - L_i[:,2]

    x, _, _, _ = np.linalg.lstsq(A, b)

    R_ = R_.T
    R_[:2,2] = -x.T
    R_ = np.linalg.inv(R_)

    L_final = np.array(map(normalize2, np.dot(L_image, np.dot(Hi, R_))))
    L_gold = np.array(map(normalize2, L_table))
    assert within_eps(L_final, L_gold)

    M = np.eye(4)
    M[0,(0,2,3)] = R_[0,:]
    M[2,(0,2,3)] = R_[1,:]

    # If we've made it this far, then we can patch up the config
    print M
    config.bg['Ktable'] = np.dot(np.linalg.inv(M), config.bg['Ktable']).astype('f')
    config.save('data/newest_calibration')

    return M
Example #22
0
# Fraction of people vaccinated people who may transfer anyway
frac = 0.001

change_immunity_ratio = []
for repeat in range(1):
    Trace = {i: [] for i in range(z)}

    changeRate = False
    for t in range(Duration):
        print('Time:', t)

        for zone in range(z):
            z_total = opt(zone, z_total, repeat)

        A, score = optimize(z_total, score)
        z_total, Vaccines_Received = vaccinate(A, z_total, Vaccines_Received)
        # z_total, tot = recurrence(z_total, t, frac)

        # for zone in range(z):
        #     Trace[zone].append([z_total[zone][0], np.mean(rP)])

        # if t >= Duration/2 and not changeRate:
        #     changeRate = True
        #     for h in range(how_many_vac):
        #         l = rE[h]
        #         rE[h] = [0.6 for i in range(len(l))]

        # print (t, rP[0][0], rP[1][0])
        print(t, np.mean(rP[0]), np.std(rP[0]))
        print(t, np.mean(rP[1]), np.std(rP[1]))
Example #23
0
def optimize(loss,
             max_vals=[1 for _ in range(1)],
             min_vals=None,
             deviation_tol: float = 1.E-9,
             divide_range: float = 1.01,
             partitions=5,
             parameter_tol: float = float('inf'),
             depth: int = 1,
             coarse: float = 0,
             shrink_strategy: str = "divide",
             partition_strategy: str = "split",
             randomize: bool = False,
             weights=None,
             verbose: bool = True,
             validation_loss=None):
    """
    Implements a coordinate descent algorithm for optimizing the argument vector of the given loss function.
    Arguments:
        loss: The loss function. Could be an expression of the form `lambda p: f(p)' where f takes a list as an argument.
        max_vals: Optional. The maximum value for each parameter to search for. Helps determine the number of parameters.
            Default is a list of ones for one parameter.
        min_vals: Optional. The minimum value for each paramter to search for. If None (default) it becomes a list of
            zeros and equal length to max_vals.
        deviation_tol: Optional. The numerical tolerance of the loss to optimize to. Default is 1.E-8.
        divide_range: Optional. Value greater than 1 with which to divide the range at each iteration. Default is 1.01,
            which guarantees convergence even for difficult-to-optimize functions, but values such as 1.1, 1.2 or 2 may
            also be used for much faster, albeit a little coarser, convergence. If the *shrink_strategy* argument
            is set to "shrinking" instead, the range is scaled proportionally to
            *iteration<sup>divide_range</sup>/log(iteration)* per block coordinate descent.
        partitions: Optional. In how many pieces to break the search space on each iteration. Default is 5.
        parameter_tol: Optional. The numerical tolerance of parameter values to optimize to. **Both** this and
            deviation_tol need to be met. Default is infinity.
        depth: Optional. Declares the number of times to re-perform the optimization given the previous found solution.
            Default is 1, which only runs the optimization once. Larger depth values can help offset coarseness
            introduced by divide_range.
        coarse: Optional. Optional. Snaps solution to this precision. If 0 (default) then this behavior is ignored.
        shrink_strategy: Optional. The shrinking strategy towards convergence. If "divide" (default), then
            the search range is divided by the argument *divide_range*, but if "shrinking" then it is
            scaled based on block coordinate descent.
        partition_strategy: Optional. Strategy with which to traverse partitions. If "split" (default), then
            the partition is split to *partitions* parts. If "step", then the *partitions* argument is used as a fixed
            step and however many splits are needed to achieve this are performed. This last strategy helps
            force block coordinate descent traverse a finite set of values, as long as it holds that
            **coarse==partitions**.
        randomize: Optional. If True (default), then a random parameter is updated each time instead of moving
            though them in a cyclic order.
        weights: Optional. An estimation of parameters to start optimization from. The algorithm tries to center
            solution search around these - hence the usefulness of *depth* as an iterative scheme. If None (default),
            the center of the search range (max_vals+min_vals)/2 is used as a starting estimation.
        verbose: Options. If True, optimization outputs its intermediate steps. Default is False.
    Example:
        >>> import pygrank as pg
        >>> p = pg.optimize(loss=lambda p: (1.5-p[0]+p[0]*p[1])**2+(2.25-p[0]+p[0]*p[1]**2)**2+(2.625-p[0]+p[0]*p[1]**3)**2, max_vals=[4.5, 4.5], min_vals=[-4.5, -4.5])
        >>> # desired optimization point for the Beale function of this example is [3, 0.5]
        >>> print(p)
        [3.000000052836577, 0.5000000141895036]
    """
    if min_vals is None:
        min_vals = [0 for _ in max_vals]
    #if divide_range<=1:
    #    raise Exception("Need to have a divide_range parameter greater than 1 to actually reduce the search area")
    for min_val, max_val in zip(min_vals, max_vals):
        if min_val > max_val:
            raise Exception("Empty parameter range [" + str(min_val) + "," +
                            str(max_val) + "]")
    if str(divide_range) != "shrinking" and divide_range <= 1:
        raise Exception(
            "divide_range should be greater than 1, otherwise the search space never shrinks."
        )
    #weights = [1./dims for i in range(dims)]
    if weights is None:
        weights = [(min_val + max_val) / 2
                   for min_val, max_val in zip(min_vals, max_vals)]
    range_search = [(max_val - min_val) / 2
                    for min_val, max_val in zip(min_vals, max_vals)]
    curr_variable = 0
    iter = 0
    range_deviations = [float('inf')] * len(max_vals)
    #checkpoint_weights = weights
    best_weights = weights
    best_loss = float('inf')
    evals = 0
    while True:
        if randomize:
            curr_variable = int(random() * len(weights))
        if max(range_search) == 0:
            break
        assert max(
            range_search
        ) != 0, "Something went wrong and took too many iterations for optimizer to run (check for nans)"
        if shrink_strategy == "shrinking":
            range_search[curr_variable] = (
                max_vals[curr_variable] - min_vals[curr_variable]) / (
                    (iter + 1)**divide_range * log(iter + 2))
        elif shrink_strategy == "divide":
            range_search[curr_variable] /= divide_range
        else:
            raise Exception(
                "Invalid shrink strategy: either shrinking or divide expected")
        if range_search[curr_variable] == 0:
            range_deviations[curr_variable] = 0
            curr_variable += 1
            if curr_variable >= len(max_vals):
                curr_variable -= len(max_vals)
            continue
        if partition_strategy == "split":
            candidate_weights = [
                __add(weights,
                      curr_variable,
                      range_search[curr_variable] * (part * 2. /
                                                     (partitions - 1) - 1),
                      max_vals[curr_variable],
                      min_vals[curr_variable],
                      coarse=coarse) for part in range(partitions)
            ]
        elif partition_strategy == "step":
            candidate_weights = [
                __add(weights,
                      curr_variable,
                      part * partitions,
                      max_vals[curr_variable],
                      min_vals[curr_variable],
                      coarse=coarse) for part in range(
                          -int(range_search[curr_variable] / partitions), 1 +
                          int(range_search[curr_variable] / partitions))
            ]
        else:
            raise Exception(
                "Invalid partition strategy: either split or step expected")
        loss_pairs = [(w, loss(w)) for w in candidate_weights if w is not None]
        evals += len(loss_pairs)
        weights, weights_loss = min(loss_pairs, key=lambda pair: pair[1])
        prev_best_loss = best_loss
        if validation_loss is not None:
            weights_loss = validation_loss(weights)
            if weights_loss < best_loss:
                best_loss = weights_loss
                best_weights = weights
        else:
            best_loss = weights_loss
            best_weights = weights
        range_deviations[curr_variable] = abs(prev_best_loss - best_loss)
        if verbose:
            utils.log(
                f"Tuning evaluations {evals} loss {best_loss:.8f} +- {max(range_deviations):.8f}"
            )

        if max(range_deviations) <= deviation_tol and max(
                range_search) <= parameter_tol:
            break
        # move to next var
        iter += 1
        curr_variable += 1
        if curr_variable >= len(max_vals):
            curr_variable -= len(max_vals)
            #if sum(abs(w1-w2) for w1, w2 in zip(weights, checkpoint_weights)) == 0:
            #    break
            #checkpoint_weights = weights
    #print("trained weights in", iter, "iterations", weights, "final loss", loss(weights))
    weights = best_weights
    if verbose:
        utils.log()
    if depth > 1:
        return optimize(loss, max_vals, min_vals, deviation_tol, divide_range,
                        partitions, parameter_tol, depth - 1, coarse,
                        shrink_strategy, partition_strategy, randomize,
                        weights, verbose, validation_loss)
    return weights
Example #24
0
def main(options):
    if options.seed:
        random.seed(options.seed)

    routes = Routes(options.routeFiles)

    # store which routes are passing each counting location (using route index)
    countData = (parseTurnCounts(options.turnFiles, routes, options.turnAttr)
                 + parseEdgeCounts(options.edgeDataFiles, routes, options.edgeDataAttr))

    # store which counting locations are used by each route (using countData index)
    routeUsage = [set() for r in routes.unique]
    for i, cd in enumerate(countData):
        for routeIndex in cd.routeSet:
            routeUsage[routeIndex].add(i)

    if options.verbose:
        print("Loaded %s routes (%s distinct)" % (len(routes.all), routes.number))
        edgeCount = sumolib.miscutils.Statistics("route edge count", histogram=True)
        detectorCount = sumolib.miscutils.Statistics("route detector count", histogram=True)
        for i, edges in enumerate(routes.unique):
            edgeCount.add(len(edges), i)
            detectorCount.add(len(routeUsage[i]), i)
        print("input %s" % edgeCount)
        print("input %s" % detectorCount)

    # pick a random couting location and select a new route that passes it until
    # all counts are satisfied or no routes can be used anymore
    openRoutes = set(range(0, routes.number))
    openCounts = set(range(0, len(countData)))
    openRoutes = updateOpenRoutes(openRoutes, routeUsage, countData)
    openCounts = updateOpenCounts(openCounts, countData, openRoutes)

    usedRoutes = []
    if options.optimizeInput:
        usedRoutes = [routes.edges2index[e] for e in routes.all]
        resetCounts(usedRoutes, routeUsage, countData)
    else:
        while openCounts:
            cd = countData[random.sample(openCounts, 1)[0]]
            routeIndex = random.sample(cd.routeSet.intersection(openRoutes), 1)[0]
            usedRoutes.append(routeIndex)
            for dataIndex in routeUsage[routeIndex]:
                countData[dataIndex].count -= 1
            openRoutes = updateOpenRoutes(openRoutes, routeUsage, countData)
            openCounts = updateOpenCounts(openCounts, countData, openRoutes)

    hasMismatch = sum([cd.count for cd in countData]) > 0
    if hasMismatch and options.optimize is not None:
        optimize(options, countData, routes, usedRoutes, routeUsage)
        resetCounts(usedRoutes, routeUsage, countData)

    begin, end = parseTimeRange(options.turnFiles + options.edgeDataFiles)
    if usedRoutes:
        with open(options.out, 'w') as outf:
            sumolib.writeXMLHeader(outf, "$Id$", "routes")  # noqa
            period = (end - begin) / len(usedRoutes)
            depart = begin
            for i, routeIndex in enumerate(usedRoutes):
                outf.write('    <vehicle id="%s%s" depart="%s"%s>\n' % (
                    options.prefix, i, depart, options.vehattrs))
                outf.write('        <route edges="%s"/>\n' % ' '.join(routes.unique[routeIndex]))
                outf.write('    </vehicle>\n')
                depart += period
            outf.write('</routes>\n')

    underflow = sumolib.miscutils.Statistics("underflow locations")
    overflow = sumolib.miscutils.Statistics("overflow locations")
    gehStats = sumolib.miscutils.Statistics("GEH")
    numGehOK = 0.0
    hourFraction = (end - begin) / 3600.0
    totalCount = 0
    for cd in countData:
        localCount = cd.origCount - cd.count
        totalCount += localCount
        if cd.count > 0:
            underflow.add(cd.count, cd.edgeTuple)
        elif cd.count < 0:
            overflow.add(cd.count, cd.edgeTuple)
        origHourly = cd.origCount / hourFraction
        localHourly = localCount / hourFraction
        geh = sumolib.miscutils.geh(origHourly, localHourly)
        if geh < options.gehOk:
            numGehOK += 1
        gehStats.add(geh, "[%s] %s %s" % (
            ' '.join(cd.edgeTuple), int(origHourly), int(localHourly)))

    print("Wrote %s routes (%s distinct) achieving total count %s at %s locations. GEH<%s for %.2f%%" % (
        len(usedRoutes), len(set(usedRoutes)), totalCount, len(countData),
        options.gehOk, 100 * numGehOK / len(countData)))

    if options.verbose:
        edgeCount = sumolib.miscutils.Statistics("route edge count", histogram=True)
        detectorCount = sumolib.miscutils.Statistics("route detector count", histogram=True)
        for i, r in enumerate(usedRoutes):
            edgeCount.add(len(routes.unique[r]), i)
            detectorCount.add(len(routeUsage[r]), i)
        print("result %s" % edgeCount)
        print("result %s" % detectorCount)
        print(gehStats)

    if underflow.count() > 0:
        print("Warning: %s (total %s)" % (underflow, sum(underflow.values)))
    if overflow.count() > 0:
        print("Warning: %s (total %s)" % (overflow, sum(overflow.values)))

    if options.mismatchOut:
        with open(options.mismatchOut, 'w') as outf:
            sumolib.writeXMLHeader(outf, "$Id$")  # noqa
            outf.write('<data>\n')
            outf.write('    <interval id="deficit" begin="0" end="3600">\n')
            for cd in countData:
                if len(cd.edgeTuple) == 1:
                    outf.write('        <edge id="%s" measuredCount="%s" deficit="%s"/>\n' % (
                        cd.edgeTuple[0], cd.origCount, cd.count))
                elif len(cd.edgeTuple) == 2:
                    outf.write('        <edgeRelation from="%s" to="%s" measuredCount="%s" deficit="%s"/>\n' % (
                        cd.edgeTuple[0], cd.edgeTuple[1], cd.origCount, cd.count))
                else:
                    print("Warning: output for edge relations with more than 2 edges not supported (%s)" % cd.edgeTuple,
                          file=sys.stderr)
            outf.write('    </interval>\n')
            outf.write('</data>\n')
Example #25
0
def wavefunction(basis, coeffs, x, y, z):
    assert len(basis) == len(coeffs)

    f = zero_like(x)
    for i in range(len(basis)):
        f += coeffs[i] * basis[i](x, y, z)
    return f


def density(basis, coeffs, x, y, z):
    f = wavefunction(basis, coeffs, x, y, z)
    return f * f


params = optimize(numpy.random.uniform(0.5, 5.0, 5))

basis = make_basis(params)
energy, coeffs = solve(*equation(basis))
print("Energy:", list(energy))

L = 30
N = 1001
X = numpy.linspace(-L, L, N)

for k in range(3):
    state = density(basis, coeffs[:, k], X, 0, 0)
    pyplot.plot(X, state, label="E = {0:.5f}".format(energy[k]))

pyplot.grid()
pyplot.legend()
Example #26
0
		rho = estimate_rho(beta)

		# Check rho bound
		if (rho >= 1 or rho <= -1):
			raise ValueError('rho out of bound: %f' % (rho_new))
		# Convergence
		if ((rho - rho_last)**2 <= 10e-10):
			break
		# Max iteration
		if (iteration >= 1000):
			break

	# Done optimizing return result
	return (beta, rho)

(beta, rho) = optimize()

#
# Summary
#

print "Solution is:"
print "rho: %f " % rho
print "Sigma^2 using rho: %e" % (sum_of_squares(rho, beta) / (y.size - X.shape[1]))
print "durbin-watson: %f" % (durbin_watson(rho, beta))
error = np.asarray(y_plain - X_plain * beta).ravel()
print "MSR: %f" % (np.sum(error**2) / (y_plain.size - X_plain.shape[1]))


print "\nCompare with:"
plain_beta = grace.ols.theta_vector(y_plain.A.ravel())
Example #27
0
def arclength(x):
    x_spline = scipy.interpolate.CubicSpline(t, x[:, 0])
    y_spline = scipy.interpolate.CubicSpline(t, x[:, 1])
    results = scipy.integrate.quad(
        lambda t: (x_spline(t, 1)**2 + y_spline(t, 1)**2)**0.5, 0, T)
    print(results)
    return results[0]


def optimize(x0):
    result = scipy.optimize.minimize(
        C,
        pack(x0),
        jac=J,
        constraints=[c_vel(alpha), c_endpoints, c_sub],
        method='SLSQP')
    print('Optimized:')
    print(result)
    x = unpack(result.x)
    return x


x0 = np.stack([np.linspace(0, 1, TIMESTEPS), np.ones(TIMESTEPS)], axis=-1)
alpha = N + 1.5 + 1
x = optimize(x0)
rect_arclength = np.sum(np.sum((x[1:] - x[:-1])**2, axis=1)**0.5) / T
print(rect_arclength)
print(arclength(x) / T)
plt.plot(x[:, 0], x[:, 1])
plt.show()
Example #28
0
    def update_W(self):
        """Using the alpha-gamma algorithm.
        """
        def fwd_pass(w, x, y, z, var_Z):
            w = np.copy(w)
            tpts = w.shape[0]
            ntaxa = w.shape[1]
            A = self.A
            A_init = self.A_init

            # normalized forward probabilities
            alpha = np.zeros(w.shape)

            np.seterr(divide="ignore")  # zeros are appropriately handled here
            p = np.concatenate((z[0] + var_Z, [0]))
            p = np.tile(p, ntaxa).reshape(ntaxa, ntaxa)
            w0 = np.tile(np.log(w[0]), ntaxa).reshape(ntaxa, ntaxa)
            np.fill_diagonal(w0, 0)
            p += w0
            p = np.exp(p - logsumexp(p, axis=1, keepdims=True))
            n = y[0].sum()
            y0 = np.tile(y[0], ntaxa).reshape(ntaxa, ntaxa)
            alpha_w1 = np.log(A_init) + multinomial(y0, n, p)
            alpha_w1[:ntaxa - 1] += norm.logpdf(loc=x[0],
                                                scale=np.sqrt(var_Z),
                                                x=z[0])

            np.fill_diagonal(p, 0)
            p /= p.sum(axis=1, keepdims=True)
            alpha_w0 = np.zeros(ntaxa)
            alpha_w0[y[0] > 0] = -np.inf
            alpha_w0[y[0] == 0] = np.log(1 - A_init)[y[0] == 0] + multinomial(
                y0, y0.sum(), p)[y[0] == 0]
            alpha[0] = np.exp(
                alpha_w1 -
                logsumexp(np.vstack([alpha_w0, alpha_w1]).T, axis=1))
            assert np.all(alpha[0] >= 0) and np.all(alpha[0] <= 1)

            for t in range(1, tpts):
                alpha_w1 = np.zeros(ntaxa)
                alpha_w0 = np.zeros(ntaxa)
                at0 = alpha[t - 1]

                p = np.concatenate((z[t] + var_Z, [0]))
                p = np.tile(p, ntaxa).reshape(ntaxa, ntaxa)
                w0 = np.tile(np.log(w[t]), ntaxa).reshape(ntaxa, ntaxa)
                np.fill_diagonal(w0, 0)
                p += w0
                p = np.exp(p - logsumexp(p, axis=1, keepdims=True))
                n = y[t].sum()
                y0 = np.tile(y[t], ntaxa).reshape(ntaxa, ntaxa)
                alpha_w1 = np.log(A[:, 1, 1] * at0 + A[:, 0, 1] *
                                  (1 - at0)) + multinomial(y0, n, p)
                alpha_w1[:ntaxa - 1] += norm.logpdf(loc=x[t],
                                                    scale=np.sqrt(var_Z),
                                                    x=z[t])

                np.fill_diagonal(p, 0)
                p /= p.sum(axis=1, keepdims=True)
                alpha_w0 = np.zeros(ntaxa)
                alpha_w0[y[t] > 0] = -np.inf
                alpha_w0[y[t] ==
                         0] = np.log(A[:, 1, 0] * at0 + A[:, 0, 0] *
                                     (1 - at0))[y[t] == 0] + multinomial(
                                         y0, n, p)[y[t] == 0]

                np.set_printoptions(threshold=np.inf)
                assert np.all(
                    np.logical_or(np.isfinite(alpha_w0), np.isfinite(alpha_w1))
                    >= 1), str(p[0]) + "\n" + str(y[t])
                alpha[t] = np.exp(
                    alpha_w1 -
                    logsumexp(np.vstack([alpha_w0, alpha_w1]).T, axis=1))
                assert np.all(alpha[t] >= 0) and np.all(
                    alpha[t] <= 1), alpha[t]

            return alpha

        def bwd_pass(y, alpha):
            # posterior probabilities
            gamma = np.zeros(alpha.shape)
            A = self.A
            tpts = w.shape[0]
            ntaxa = w.shape[1]

            gamma[-1] = alpha[-1]
            np.seterr(divide="ignore")
            for t in range(tpts - 2, -1, -1):
                gt1 = gamma[t + 1]
                gamma[t, alpha[t] == 1] = 1
                log_p_gamma_w0 = np.log(1 -
                                        alpha[t]) + np.log(A[:, 0, 1] * gt1 +
                                                           A[:, 0, 0] *
                                                           (1 - gt1))
                log_p_gamma_w1 = np.log(
                    alpha[t]) + np.log(A[:, 1, 1] * gt1 + A[:, 1, 0] *
                                       (1 - gt1))
                gamma[t] = np.exp(log_p_gamma_w1 - logsumexp(
                    np.vstack([log_p_gamma_w1, log_p_gamma_w0]).T, axis=1))
                assert np.all(
                    np.logical_or(np.isfinite(log_p_gamma_w0),
                                  np.isfinite(log_p_gamma_w1)) >= 1)
                assert np.all(gamma[t] >= 0) and np.all(gamma[t] <= 1)
            np.seterr(divide="warn")

            return gamma

        def pairwise_pass(w, x, y, z, var_Z, alpha, gamma):
            w = np.copy(w)
            # w_{t-1}, w_t pairwise probabilities
            w0_w1 = np.zeros((w.shape[1], w.shape[0], 2, 2))
            A_init = self.A_init
            A = self.A
            p0 = gamma[0]
            ntaxa = w.shape[1]
            tpts = w.shape[0]

            # log of invalid values are dealt with automatically
            # so let's turn off the warnings here. the assertions
            # should catch any unexpected errors
            np.seterr(divide="ignore", invalid="ignore")

            for t in range(1, tpts):
                at0 = alpha[t - 1]
                at1 = alpha[t]
                gt1 = gamma[t]

                p = np.concatenate((z[t] + var_Z, [0]))
                p = np.tile(p, ntaxa).reshape(ntaxa, ntaxa)
                w0 = np.tile(np.log(w[t]), ntaxa).reshape(ntaxa, ntaxa)
                np.fill_diagonal(w0, 0)
                p += w0
                p = np.exp(p - logsumexp(p, axis=1, keepdims=True))
                n = y[t].sum()
                y0 = np.tile(y[t], ntaxa).reshape(ntaxa, ntaxa)
                obs_1 = multinomial(y0, n, p)
                obs_1[:ntaxa - 1] += norm.logpdf(loc=x[t],
                                                 scale=np.sqrt(var_Z),
                                                 x=z[t])

                np.fill_diagonal(p, 0)
                p /= p.sum(axis=1, keepdims=True)
                obs_0 = np.zeros(ntaxa)
                obs_0[y[t] > 0] = -np.inf
                obs_0[y[t] == 0] = multinomial(y0, n, p)[y[t] == 0]

                ids = np.logical_and(y[t] > 0, y[t - 1] > 0)
                if np.sum(ids) > 0:
                    w0_w1[ids, t, 0, 1] = 0
                    w0_w1[ids, t, 0, 0] = 0
                    w0_w1[ids, t, 1, 1] = 1
                    w0_w1[ids, t, 1, 0] = 0

                ids = np.logical_and(y[t] > 0, y[t - 1] == 0)
                if np.sum(ids) > 0:
                    w0_w1[ids, t, 0,
                          1] = np.log(1 - at0[ids]) + obs_1[ids] + np.log(
                              gt1[ids]) + np.log(A[ids, 0, 1]) - np.log(
                                  at1[ids])
                    w0_w1[ids, t, 0, 0] = -np.inf
                    w0_w1[ids, t, 1, 1] = np.log(
                        at0[ids]) + obs_1[ids] + np.log(gt1[ids]) + np.log(
                            A[ids, 1, 1]) - np.log(at1[ids])
                    w0_w1[ids, t, 1, 0] = -np.inf
                    log_denom = logsumexp(w0_w1[ids, t],
                                          axis=(1, 2),
                                          keepdims=True)
                    w0_w1[ids, t] = np.exp(w0_w1[ids, t] - log_denom)
                    assert np.all(
                        np.abs(w0_w1[ids, t].sum(axis=(1, 2)) -
                               1) < 1e-2), w0_w1[ids, t]

                ids = np.logical_and(y[t] == 0, y[t - 1] > 0)
                if np.sum(ids) > 0:
                    w0_w1[ids, t, 0, 1] = -np.inf
                    w0_w1[ids, t, 0, 0] = -np.inf
                    w0_w1[ids, t, 1, 1] = np.log(
                        at0[ids]) + obs_1[ids] + np.log(gt1[ids]) + np.log(
                            A[ids, 1, 1]) - np.log(at1[ids])
                    w0_w1[ids, t, 1, 0] = np.log(
                        at0[ids]) + obs_0[ids] + np.log(1 - gt1[ids]) + np.log(
                            A[ids, 1, 0]) - np.log(1 - at1[ids])
                    w0_w1[np.logical_and(ids, gt1 == 0), t, 1, 1] = -np.inf
                    w0_w1[np.logical_and(ids, gt1 == 1), t, 1, 0] = -np.inf
                    log_denom = logsumexp(w0_w1[ids, t],
                                          axis=(1, 2),
                                          keepdims=True)
                    w0_w1[ids, t] = np.exp(w0_w1[ids, t] - log_denom)
                    assert np.all(
                        np.abs(w0_w1[ids, t].sum(axis=(1, 2)) - 1) < 1e-2
                    ), str(w0_w1[ids, t]) + "\n" + str(log_denom)

                ids = np.logical_and(y[t] == 0, y[t - 1] == 0)
                if np.sum(ids) > 0:
                    w0_w1[ids, t, 0,
                          1] = np.log(1 - at0[ids]) + obs_1[ids] + np.log(
                              gt1[ids]) + np.log(A[ids, 0, 1]) - np.log(
                                  at1[ids])
                    w0_w1[ids, t, 0,
                          0] = np.log(1 - at0[ids]) + obs_0[ids] + np.log(
                              1 - gt1[ids]) + np.log(
                                  A[ids, 0, 0]) - np.log(1 - at1[ids])
                    w0_w1[ids, t, 1, 1] = np.log(
                        at0[ids]) + obs_1[ids] + np.log(gt1[ids]) + np.log(
                            A[ids, 1, 1]) - np.log(at1[ids])
                    w0_w1[ids, t, 1, 0] = np.log(
                        at0[ids]) + obs_0[ids] + np.log(1 - gt1[ids]) + np.log(
                            A[ids, 1, 0]) - np.log(1 - at1[ids])

                    w0_w1[np.logical_and(ids, gt1 == 0), t, 0, 1] = -np.inf
                    w0_w1[np.logical_and(ids, gt1 == 0), t, 1, 1] = -np.inf
                    w0_w1[np.logical_and(ids, gt1 == 1), t, 0, 0] = -np.inf
                    w0_w1[np.logical_and(ids, gt1 == 1), t, 1, 0] = -np.inf

                    log_denom = logsumexp(w0_w1[ids, t],
                                          axis=(1, 2),
                                          keepdims=True)
                    w0_w1[ids, t] = np.exp(w0_w1[ids, t] - log_denom)
                    assert np.all(
                        np.abs(w0_w1[ids, t].sum(axis=(1, 2)) - 1) < 1e-2
                    ), str(w0_w1[ids, t]) + "\n" + str(log_denom)

                assert np.all(w0_w1[:, t, 0, 1] >= 0) and np.all(
                    w0_w1[:, t, 0, 1] <= 1)
                assert np.all(w0_w1[:, t, 1, 1] >= 0) and np.all(
                    w0_w1[:, t, 1, 1] <= 1)

            # reset error messages
            np.seterr(divide="warn", invalid="warn")
            return w0_w1

        def post(w, x, y, z, var_Z):
            w_nxt = np.copy(w)
            w0_w1_nxt = np.zeros((w.shape[1], w.shape[0], 2, 2))
            ntaxa = w.shape[1]
            log_p_zy = 0
            alpha = fwd_pass(w, x, y, z, var_Z)
            gamma = bwd_pass(y, alpha)
            w_nxt = gamma
            w0_w1_nxt = pairwise_pass(w_nxt, x, y, z, var_Z, alpha, gamma)
            return w_nxt, w0_w1_nxt

        def optimize(w, w0_w1, x, y, z, gamma_inv_AA, var_Z, verbose=False):
            prv_w = np.copy(w)
            prv_w0_w1 = np.copy(w0_w1)
            w, w0_w1 = post(w, x, y, z, var_Z)

            while not np.allclose(prv_w, w) and not np.allclose(
                    prv_w0_w1, w0_w1):
                prv_w = np.copy(w)
                prv_w0_w1v = np.copy(w0_w1)
                w, w0_w1 = post(w, x, y, z, var_Z)

            return w, w0_w1

        for i in range(len(self.X)):
            y = self.Y[i]
            x = self.X[i]
            z = self.Z[i]
            w = np.copy(self.W[i])
            w0_w1 = np.copy(self.W0_W1[i])
            gamma_inv_AA = self.gamma_inv_AA[i]
            #var_Z = self.gamma2
            var_Z = np.ones(self.gamma2.shape)

            w, w0_w1 = optimize(w,
                                w0_w1,
                                x,
                                y,
                                z,
                                gamma_inv_AA,
                                var_Z,
                                verbose=False)
            self.W[i] = w
            self.W0_W1[i] = w0_w1
Example #29
0
def fit_sds(freq,
            omM,
            method='mean',
            fc=None,
            n=2,
            gamma=1,
            fc_lim=None,
            n_lim=(0.5, 10),
            gamma_lim=(0.5, 10),
            fc0=10,
            n0=2,
            gamma0=1,
            fall_back=5,
            num_points=None,
            **opt_kw):
    """Fit source displacement spectrum and calculate seismic moment

    :param freq,omM: frequencies, source displacement spectrum (same length)
    :param method: 'mean' - take mean of sds of frequencies below fc,
        'fit', 'robust_fit' - fit source model to obtain M0.
        If one or more of fc, n, gamma are None, M0 and these values are
        simultaneously determined.
        Robust version uses a robust linear model (which downweights outliers).
    :param fc,n,gamma: corner frequency and coefficients for source model
    :param fc_lim,gamma_lim: bounds for corner frequency and gamma
        (used for optimization if respective variable is set to None)
    :param fc0,gamma0: starting values of fc and gamma for optimization
        (only used for optimization for fc and gamma)
    :param fall_back: use robust fit only if number of data points >= fall_back
    :param num_points: determine M0 only if number of data points >= num_points
        All other kwargs are passed to scipy.optimization, e.g.
    :param tol: tolerance for optimization
    :return: dictionary with M0 and optimized variables fc, n, and gamma
        if applicable.
        If M0 is not determined the function will return None
    """
    if method == 'mean':
        if fc is None:
            msg = ("Border frequency fc must be given for "
                   "seismic_moment_method 'mean'")
            raise ValueError(msg)
        M0 = [o for f, o in zip(freq, omM) if f < fc and o is not None]
        if num_points is not None and len(M0) < num_points:
            return
        if len(M0) > 0:
            mean, err = gstat(M0, unbiased=False)
            return {'M0': np.exp(mean), 'fit_error': err}
    elif method in ('fit', 'robust_fit'):
        omM = np.array(omM, dtype=float)
        freq = np.array(freq)[~np.isnan(omM)]
        omM = omM[~np.isnan(omM)]
        if len(freq) == 0 or num_points is not None and len(freq) < num_points:
            return
        if method == 'robust_fit' and len(freq) >= fall_back:
            Model = RLM
        else:
            Model = OLS

        def lstsq(fc, n, gamma, opt=False):
            # Inversion for M0
            model = source_model(freq, 1, fc, n, gamma)
            y = np.log(omM) - np.log(model)
            X = np.ones(len(y))
            res = Model(y, X).fit()
            err = np.mean(res.resid**2)
            if opt:
                return err
            return {'M0': np.exp(res.params[0]), 'fit_error': err**0.5}

        def lstsqab(fc, a, opt=False):
            # Inversion for M0 and b
            model = _source_model_ab(freq, 1, fc, a, 1)
            y = np.log(omM)
            X = np.empty((len(y), 2))
            X[:, 0] = 1
            X[:, 1] = np.log(model)
            res = Model(y, X).fit()
            err = np.mean(res.resid**2)
            if opt:
                return err
            return {
                'M0': np.exp(res.params[0]),
                'b': res.params[1],
                'err': err**0.5
            }

        unknowns = ((fc is None) * ('fc', ) + (n is None) * ('n', ) +
                    (gamma is None) * ('gamma', ))
        if n is None and gamma is None:
            unknowns = (fc is None) * ('fc', ) + ('a', )
        wrapper = {
            'fc': lambda x, opt=False: lstsq(x, n, gamma, opt=opt),
            'n': lambda x, opt=False: lstsq(fc, x, gamma, opt=opt),
            'gamma': lambda x, opt=False: lstsq(fc, n, x, opt=opt),
            'fcn': lambda x, opt=False: lstsq(x[0], x[1], gamma, opt=opt),
            'fcgamma': lambda x, opt=False: lstsq(x[0], n, x[1], opt=opt),
            'a': lambda x, opt=False: lstsqab(fc, x, opt=opt),
            'fca': lambda x, opt=False: lstsqab(x[0], x[1], opt=opt),
        }
        a_lim = None
        if n_lim and gamma_lim:
            a_lim = [n_lim[0] * gamma_lim[0], n_lim[1] * gamma_lim[1]]
        bounds = {
            'fc': fc_lim or (freq[0], freq[-1]),
            'n': n_lim,
            'gamma': gamma_lim,
            'a': a_lim
        }
        start = {'fc': fc0, 'n': n0, 'gamma': gamma0, 'a': gamma0 * n0}

        result = {}
        if len(unknowns) == 0:
            return lstsq(fc, n, gamma)
        elif len(unknowns) == 1 and len(freq) > 1:
            optimize = scipy.optimize.minimize_scalar
            x = unknowns[0]
            lstsq2 = wrapper[x]
            opt = optimize(lstsq2,
                           args=(True, ),
                           bounds=bounds[x],
                           method='bounded',
                           **opt_kw)
            result = {x: opt.x}
            result.update(lstsq2(opt.x))
        elif len(freq) > len(unknowns) >= 2:
            optimize = scipy.optimize.minimize
            lstsq2 = wrapper[''.join(unknowns)]
            bounds = [bounds[u] for u in unknowns]
            x0 = [start[u] for u in unknowns]
            opt = optimize(lstsq2, x0, args=(True, ), bounds=bounds, **opt_kw)
            result = {u: opt.x[i] for i, u in enumerate(unknowns)}
            result.update(lstsq2(opt.x))
            msg = 'Optimization for M0 and %s terminated because of %s'
            log = logging.getLogger('qopen.source')
            log.debug(msg, unknowns, opt.message.lower())
        if 'a' in result:
            a = result.pop('a')
            b = result.pop('b')
            result['gamma'] = 1 / b
            result['n'] = a * b
        return result
Example #30
0

def ground_state(x):
    return 1.0 / math.pow(math.pi, 0.25) * numpy.exp(-x**2 / 2.0)


def target(basis):
    energy, coeffs = solve(*equation(basis))
    return energy[0]


def optimize(basis):
    result = scipy.optimize.minimize(target, basis)
    if not result.success:
        raise RuntimeError("Energy minimization failed")
    return result.x


basis = optimize(range(-5, 6))
energy, coeffs = solve(*equation(basis))
print("Energy:", energy)

x = numpy.linspace(-5, 5, 1001)

for k in range(3):
    state = wavefunction(basis, coeffs[:, k], x)
    pyplot.plot(x, state, label="E = {0:.5f}".format(energy[k]))

pyplot.grid()
pyplot.legend()
pyplot.show()
Example #31
0
def make_new_spectrum(locus_index, plot=False):
    filters = get_filters()
    import pickle
    f = open('picklelocus_MACS', 'r')
    m = pickle.Unpickler(f)
    stars = m.load()

    import string
    spectra_complete = load_spectra()
    locus_list = locus()
    comp_list = filter(
        lambda x: string.find(x.replace('SDSS_', ''), 'SDSS') != -1 and string.
        find(x, 'SDSS_') != -1, locus_list.keys())
    print comp_list

    import pylab

    gmr_all = locus_list['GSDSS_RSDSS'][:]
    rmi_all = locus_list['RSDSS_ISDSS'][:]
    umg_all = locus_list['USDSS_GSDSS'][:]
    imz_all = locus_list['ISDSS_ZSDSS'][:]

    #locus_index = 13
    print 'locus_index', locus_index
    gmr = locus_list['GSDSS_RSDSS'][locus_index]
    rmi = locus_list['RSDSS_ISDSS'][locus_index]
    umg = locus_list['USDSS_GSDSS'][locus_index]
    imz = locus_list['ISDSS_ZSDSS'][locus_index]

    print gmr, rmi

    if plot:
        pylab.clf()
        pylab.scatter(gmr_all, rmi_all, color='blue')
        pylab.scatter(gmr, rmi, color='red')
        pylab.show()

    if False:
        closest = closest_pickles(stars, locus_list, locus_index, comp_list)
        closest_index = closest[1][1]
        import pylab
        print 'plotting'
        print spectra_complete[closest_index][0][:, 0]
        print spectra_complete[closest_index][0][:, 1]
        pylab.plot(spectra_complete[closest_index][0][:, 0],
                   spectra_complete[closest_index][0][:, 1])
        pylab.xlim(3000, 11000)
        pylab.show()

    print 'plotted'

    import pickle
    f = open('picklelocus_MACS', 'r')
    m = pickle.Unpickler(f)
    stars = m.load()
    locus_list = locus()

    good = False
    gmr_off = 0
    rmi_off = 0
    trys = 0
    tol = 0.01
    while not good:
        trys += 1
        print gmr, rmi
        dicts = get_sdss_spectra(umg,
                                 imz,
                                 gmr - gmr_off,
                                 rmi - rmi_off,
                                 tol=tol)

        if len(dicts):
            print dicts
            gmr_diffs = []
            rmi_diffs = []
            for dict in dicts:
                spectrum = download_sdss_spectrum(dict, plot=False)
                mags = synth([1.], [[spectrum]], filters, show=False)
                print mags
                gmr_diffs.append(mags['GSDSS'] - mags['RSDSS'] - gmr)
                rmi_diffs.append(mags['RSDSS'] - mags['ISDSS'] - rmi)
                print mags['GSDSS'] - mags['RSDSS'], gmr
                print float(dict['mag_0']) - float(dict['mag_1'])
                print mags['RSDSS'] - mags['ISDSS'], rmi
                print float(dict['mag_1']) - float(dict['mag_2'])

            gmr_diffs.sort()
            rmi_diffs.sort()

            median_gmr = gmr_diffs[int(len(gmr_diffs) / 2)]
            median_rmi = rmi_diffs[int(len(rmi_diffs) / 2)]

            if abs(median_gmr) > tol or abs(median_rmi) > tol:
                gmr_off += median_gmr
                rmi_off += median_rmi
            else:
                good = True

            print gmr_diffs, rmi_diffs
            print median_gmr, median_rmi
            print gmr, rmi
        else:
            tol += 0.01

    print spectrum
    print comp_list

    if plot:
        max = spectrum[:, 1].max()
        pylab.plot(spectrum[:, 0], spectrum[:, 1] / max)
        #pylab.plot(spectra_complete[closest_index][0][:,0],spectra_complete[closest_index][0][:,1])
        pylab.xlim(3000, 11000)
        pylab.show()

    sdssSpec, pickleSpec, pickleName = similar(spectrum)

    info = pickleName + ' pickleName ' + str(gmr) + ' gmr ' + str(rmi) + ' rmi'
    stitchSpec = optimize(sdssSpec,
                          pickleSpec,
                          locus_index,
                          info=info,
                          plot=plot)
    print stitchSpec

    return stitchSpec, pickleSpec, info
    'odds_book': 234.9,
    'odds_fair': 4166.667
})
selections.append({
    'name': 'Callan Rydz',
    'odds_book': 199.15,
    'odds_fair': 4166.667
})
selections.append({
    'name': 'Derk Telnekes',
    'odds_book': 192,
    'odds_fair': 7142.857
})
selections.append({
    'name': 'Darren Penhall',
    'odds_book': 296.43,
    'odds_fair': 16666.667
})

# ADD YOUR EXISTING BETS HERE
existing_bets = list()
existing_bets.append({
    'name': 'Michael van Gerwen',
    'odds_book': 2.85,
    'stake': 22
})
existing_bets.append({'name': 'Ian White', 'odds_book': 29, 'stake': 50})

# CALL THE FUNCTION WITH GIVEN BANKROLL
optimize(selections=selections, existing_bets=existing_bets, bankroll=2500.00)
#     #             pair_matches[1] = np.zeros((new_matches_num, 2))
#         # Remove any pair of matched tiles that don't have matches
#         to_remove_keys = []
#         for pair_name, pair_matches in matches.items():
#             if len(pair_matches[0]) == 0:
#                 print("Removing no matches for pair: {} -> {}".format(os.path.basename(pair_name[0]), os.path.basename(pair_name[1])))
#                 to_remove_keys.append(pair_name)
#
#         for k in to_remove_keys:
#             del matches[k]

if __name__ == '__main__':
    #     in_orig_locs_fname = 'data/W05_Sec001_ROI466_mfovs_475_476_orig_locs.pkl'
    #     in_matches_fname = 'data/W05_Sec001_ROI466_mfovs_475_476.pkl'
    #     in_ts_fname = 'data/W05_Sec001_ROI466_mfovs_475_476.json'
    #     out_ts_fname = 'montaged_optimize3_W05_Sec001_ROI466_mfovs_475_476.json'
    in_orig_locs_fname = 'data/W05_Sec001_ROI466_orig_locs.pkl'
    in_matches_fname = 'data/W05_Sec001_ROI466.pkl'
    in_ts_fname = 'data/W05_Sec001_ROI466.json'
    out_ts_fname = 'montaged_optimize3_W05_Sec001_ROI466.json'

    # Read the files
    with open(in_orig_locs_fname, 'rb') as in_f:
        orig_locs = pickle.load(in_f)
    with open(in_matches_fname, 'rb') as in_f:
        matches = pickle.load(in_f)

    fix_matches(orig_locs, matches)
    solution = optimize(orig_locs, matches, pre_translate=True)
    #common.export_rigid_tilespec(in_ts_fname, out_ts_fname, solution)
    nlc = NonlinearConstraint(constraint, -np.inf, bankroll)
    res = scipy.optimize.differential_evolution(func=f, bounds=bounds, constraints=(nlc))

    runtime = time.time() - start_time
    print(f"\n{datetime.now().replace(microsecond=0)} - Optimization finished. Runtime --- {round(runtime, 3)} seconds ---\n")
    print(f"Objective: {round(res.fun, 5)}")
    print(f"Certainty Equivalent: {round(math.exp(-res.fun), 3)}\n")

    # CONSOLE OUTPUT
    for index_bet, bet in enumerate(bets):
        bet_strings = list()
        for index_sel, sel in enumerate(bet):
            if sel == 1:
                bet_strings.append(selections[index_sel]['name'])

        stake = res.x[index_bet]
        if stake >= 0.50:
            print(f"{(' / ').join(bet_strings)} @{round(book_odds[index_bet], 3)} - € {int(round(stake, 0))}")

selections = list()
selections.append({'name': 'BET 1', 'odds_book': 2.05, 'odds_fair': 1.735})
selections.append({'name': 'BET 2', 'odds_book': 1.95, 'odds_fair': 1.656})
selections.append({'name': 'BET 3', 'odds_book': 1.75, 'odds_fair': 1.725})
selections.append({'name': 'BET 4', 'odds_book': 1.88, 'odds_fair': 1.757})
selections.append({'name': 'BET 5', 'odds_book': 1.99, 'odds_fair': 1.787})
selections.append({'name': 'BET 6', 'odds_book': 2.11, 'odds_fair': 1.794})

optimize(selections=selections, bankroll=2500, max_multiple=2)


Example #35
0
def run_one(imgpath,
            orderpath,
            colorpath,
            outprefix,
            weightspath=None,
            save_every=None,
            solve_smaller_factor=None,
            too_small=None):
    '''
    Given a path `imgpath` to an image,
    a path `colorpath` to a JSON file containing an array of RGB triplets of layer colors (the 0-th color is the background color),
    a prefix `outprefix` to use for saving files,
    an optional path `weightspath` to a JSON file containing a dictionary of weight values,
    an optional positive number `save_every` which specifies how often to save progress,
    an optional positive integer `solve_smaller_factor` which, if specified,
    will first solve on a smaller image whose dimensions are `1/solve_smaller_factor` the full size image,
    and an optional positive integer `too_small` which, if specified, determines
    the limit of the `solve_smaller_factor` recursion as the minimum image size (width or height),
    runs optimize() on it and saves the output to e.g. `outprefix + "-layer01.png"`.
    '''

    import json, os
    from PIL import Image

    arr = asfarray(Image.open(imgpath).convert('RGB'))
    arr_backup = arr.copy()
    arr = arr / 255.0
    order = asarray(json.load(open(orderpath)))
    # print order
    colors = asfarray(json.load(open(colorpath))['vs'])
    colors_backup = colors.copy()
    # print colors
    colors = colors[order, :] / 255.0
    # print colors*255.0

    assert solve_smaller_factor is None or int(
        solve_smaller_factor) == solve_smaller_factor

    if save_every is None:
        save_every = 100.

    if solve_smaller_factor is None:
        solve_smaller_factor = 2

    if too_small is None:
        too_small = 5

    # arr = arr[:1,:1,:]
    # colors = colors[:3]

    kSaveEverySeconds = save_every
    ## [ number of iterations, time of last save, arr.shape ]
    last_save = [None, None, None]

    def reset_saver(arr_shape):
        last_save[0] = 0
        last_save[1] = clock()
        last_save[2] = arr_shape

    def saver(xk):
        arr_shape = last_save[2]

        last_save[0] += 1
        now = clock()
        ## Save every 10 seconds!
        if now - last_save[1] > kSaveEverySeconds:
            print('Iteration', last_save[0])
            save_results(xk, colors, arr_shape, outprefix)
            ## Get the time again instead of using 'now', because that doesn't take into
            ## account the time to actually save the images, which is a lot for large images.
            last_save[1] = clock()

    Ylen = arr.shape[0] * arr.shape[1] * (len(colors) - 1)

    # Y0 = random.random( Ylen )
    # Y0 = zeros( Ylen ) + 0.0001
    Y0 = .5 * ones(Ylen)
    # Y0 = ones( Ylen )

    static = None
    if weightspath is not None:
        weights = json.load(open(weightspath))
    else:
        weights = {
            'w_polynomial': 375,
            'w_opaque': 1.,
            'w_spatial_dynamic': 100.
        }
        # weights = { 'w_polynomial': 1., 'w_opaque': 100. }
        # weights = { 'w_opaque': 100. }
        # weights = { 'w_spatial_static': 100. }
        # static = 0.75 * ones( Ylen )
        # weights = { 'w_spatial_dynamic': 100. }
        # weights = { 'w_spatial_dynamic': 100., 'w_opaque': 100. }

    num_layers = len(colors) - 1
    ### adjust the weights:
    if 'w_polynomial' in weights:
        # weights['w_polynomial'] *= 50000.0 #### old one is 255*255
        weights['w_polynomial'] /= arr.shape[2]

    if 'w_opaque' in weights:
        weights['w_opaque'] /= num_layers

    if 'w_spatial_static' in weights:
        weights['w_spatial_static'] /= num_layers

    if 'w_spatial_dynamic' in weights:
        weights['w_spatial_dynamic'] /= num_layers

    if solve_smaller_factor != 1:
        assert solve_smaller_factor > 1

        def optimize_smaller(solve_smaller_factor, large_arr, large_Y0,
                             large_img_spatial_static_target):
            ## Terminate recursion if the image is too small.
            if large_arr.shape[
                    0] // solve_smaller_factor < too_small or large_arr.shape[
                        1] // solve_smaller_factor < too_small:
                return large_Y0

            ## small_arr = downsample( large_arr )
            small_arr = large_arr[::solve_smaller_factor, ::
                                  solve_smaller_factor]
            ## small_Y0 = downsample( large_Y0 )
            small_Y0 = large_Y0.reshape(
                large_arr.shape[0], large_arr.shape[1],
                -1)[::solve_smaller_factor, ::solve_smaller_factor].ravel()
            ## small_img_spatial_static_target = downsample( large_img_spatial_static_target )
            small_img_spatial_static_target = None
            if large_img_spatial_static_target is not None:
                small_img_spatial_static_target = large_img_spatial_static_target.reshape(
                    arr.shape[0], arr.shape[1],
                    -1)[::solve_smaller_factor, ::solve_smaller_factor].ravel(
                    )

            ## get an improved Y by recursively shrinking
            small_Y1 = optimize_smaller(solve_smaller_factor, small_arr,
                                        small_Y0,
                                        small_img_spatial_static_target)

            ## solve on the downsampled problem
            print('==> Optimizing on a smaller image:', small_arr.shape,
                  'instead of', large_arr.shape)
            reset_saver(small_arr.shape)
            small_Y = optimize(
                small_arr,
                colors,
                small_Y1,
                weights,
                img_spatial_static_target=small_img_spatial_static_target,
                saver=saver)

            ## save the intermediate solution.
            saver(small_Y)

            ## large_Y1 = upsample( small_Y )
            ### 1 Make a copy
            large_Y1 = array(large_Y0).reshape(large_arr.shape[0],
                                               large_arr.shape[1], -1)
            ### 2 Fill in as much as will fit using numpy.repeat()
            small_Y = small_Y.reshape(small_arr.shape[0], small_arr.shape[1],
                                      -1)
            small_Y_upsampled = repeat(
                repeat(small_Y, solve_smaller_factor, 0), solve_smaller_factor,
                1)
            large_Y1[:, :] = small_Y_upsampled[:large_Y1.shape[0], :large_Y1.
                                               shape[1]]
            # large_Y1[ :small_Y.shape[0]*solve_smaller_factor, :small_Y.shape[1]*solve_smaller_factor ] = repeat( repeat( small_Y, solve_smaller_factor, 0 ), solve_smaller_factor, 1 )
            ### 3 The right and bottom edges may have been missed due to rounding
            # large_Y1[ small_Y.shape[0]*solve_smaller_factor:, : ] = large_Y1[ small_Y.shape[0]*solve_smaller_factor - 1 : small_Y.shape[0]*solve_smaller_factor, : ]
            # large_Y1[ :, small_Y.shape[1]*solve_smaller_factor: ] = large_Y1[ :, small_Y.shape[1]*solve_smaller_factor - 1 : small_Y.shape[1]*solve_smaller_factor ]

            return large_Y1.ravel()

        Y0 = optimize_smaller(solve_smaller_factor, arr, Y0, static)

    reset_saver(arr.shape)
    Y = optimize(arr,
                 colors,
                 Y0,
                 weights,
                 img_spatial_static_target=static,
                 saver=saver)

    composite_img = save_results(Y, colors, arr.shape, outprefix)
    img_diff = composite_img - arr_backup
    RMSE = sqrt(
        square(img_diff).sum() /
        (composite_img.shape[0] * composite_img.shape[1]))

    print('img_shape is: ', img_diff.shape)
    print('max dist: ', sqrt(square(img_diff).sum(axis=2)).max())
    print('median dist', median(sqrt(square(img_diff).sum(axis=2))))
    print('RMSE: ', RMSE)

    ##### save alphas as barycentric coordinates
    alphas = 1. - Y.reshape((arr.shape[0] * arr.shape[1], -1))
    extend_alphas = ones((alphas.shape[0], alphas.shape[1] + 1))
    extend_alphas[:, 1:] = alphas
    #### first columns of extend_alphas are all 1.0
    barycentric_weights = covnert_from_alphas_to_barycentricweights(
        extend_alphas)

    origin_order_barycentric_weights = ones(barycentric_weights.shape)
    #### to make the weights order is same as orignal input vertex order
    origin_order_barycentric_weights[:, order] = barycentric_weights

    # test_weights_diff1=origin_order_barycentric_weights-barycentric_weights
    # test_weights_diff2=barycentric_weights-barycentric_weights
    # print len(test_weights_diff1[test_weights_diff1==0])
    # print len(test_weights_diff2[test_weights_diff2==0])

    ####assert
    temp = sum(origin_order_barycentric_weights.reshape(
        (origin_order_barycentric_weights.shape[0],
         origin_order_barycentric_weights.shape[1], 1)) * colors_backup,
               axis=1)
    diff = temp - arr_backup.reshape((-1, 3))
    # assert(abs(diff).max()<0.5)
    print(abs(diff).max())
    print(diff.shape[0])
    print(sqrt(square(diff).sum() / diff.shape[0]))

    origin_order_barycentric_weights = origin_order_barycentric_weights.reshape(
        (arr.shape[0], arr.shape[1], -1))

    import json
    output_all_weights_filename = outprefix + "-layer_optimization_all_weights.js"
    with open(output_all_weights_filename, 'w') as myfile:
        json.dump({'weights': origin_order_barycentric_weights.tolist()},
                  myfile)

    for i in range(origin_order_barycentric_weights.shape[-1]):
        output_all_weights_map_filename = outprefix + "-layer_optimization_all_weights_map-%02d.png" % i
        Image.fromarray(
            (origin_order_barycentric_weights[:, :, order[i]] *
             255).round().clip(
                 0, 255).astype(uint8)).save(output_all_weights_map_filename)
    return Y
Example #36
0
def run_one(Y0, arr, json_data, outprefix, color_vertices ,save_every = None, solve_smaller_factor = None, too_small = None):
    #print imgpath
    '''
    Given a path `imgpath` to an image,
    a path `colorpath` to a JSON file containing an array of RGB triplets of layer colors (the 0-th color is the background color),
    a prefix `outprefix` to use for saving files,
    an optional path `weightspath` to a JSON file containing a dictionary of weight values,
    an optional positive number `save_every` which specifies how often to save progress,
    an optional positive integer `solve_smaller_factor` which, if specified,
    will first solve on a smaller image whose dimensions are `1/solve_smaller_factor` the full size image,
    and an optional positive integer `too_small` which, if specified, determines
    the limit of the `solve_smaller_factor` recursion as the minimum image size (width or height),
    runs optimize() on it and saves the output to e.g. `outprefix + "-layer01.png"`.
    '''

    #from PIL import Image

#    with open(param_path) as json_file:
#        json_data = json.load(json_file)


    input_image=json_data["stack_path"]
    order = range(color_vertices.shape[0]) # no order necessary for our purposes
    w_fidelity_lsl2=json_data["w_fidelity_lsl2"]
    w_ridge=json_data["w_ridge"]
    w_tvl2=json_data["w_tvl2"]
    threshold_opacity = json_data["threshold_opacity"]
    weights = {'w_fidelity_lsl2':w_fidelity_lsl2, 'w_ridge':w_ridge, 'w_tvl2':w_tvl2}
    #order=json_data["vertex_order"]
    #colorpath = json_data["color_path"]

    #arr = asfarray(imgpath)
    arr_backup=arr.copy()
    #if is_uint8 ==1:
    #    arr = arr/255.0
    #else:
    #    arr = arr/65535.0


    #colors = asfarray(json.load(open(colorpath))['vs'])
    colors =  asfarray(color_vertices.reshape(color_vertices.shape[0],3))
    colors_backup=colors.copy()
    colors=colors[order,:]/255.0

    assert solve_smaller_factor is None or int( solve_smaller_factor ) == solve_smaller_factor

    if save_every is None:
        save_every = 100.

    if solve_smaller_factor is None:
        solve_smaller_factor = 2

    if too_small is None:
        too_small = 5

    # arr = arr[:1,:1,:]
    # colors = colors[:3]

    kSaveEverySeconds = save_every
    ## [ number of iterations, time of last save, arr.shape ]
    last_save = [ None, None, None ]
    def reset_saver( arr_shape ):
        last_save[0] = 0
        last_save[1] = time.clock()
        last_save[2] = arr_shape
    def saver( xk ):
        arr_shape = last_save[2]

        last_save[0] += 1
        now = time.clock()
        ## Save every 10 seconds!
        if now - last_save[1] > kSaveEverySeconds:
            print 'Iteration', last_save[0]
            save_results( xk, colors, arr, arr_shape, outprefix, order, threshold_opacity ) # MIGHT CAUSE TROUBLE when saving smaller image [arr is input nwo]
            ## Get the time again instead of using 'now', because that doesn't take into
            ## account the time to actually save the images, which is a lot for large images.
            last_save[1] = time.clock()

    Ylen = arr.shape[0]*arr.shape[1]*( len(colors) - 1 )

    # Y0 = random.random( Ylen )
    # Y0 = zeros( Ylen ) + 0.0001
    #Y0 = .5*ones( Ylen )
    # Y0 = ones( Ylen )

    static = None
   # if weightspath is not None:
    #    weights = json.load( open( weightspath ) )
    #else:
    #    weights = { 'w_fidelity_lsl2': 375, 'w_ridge': 1., 'w_tvl2': 100. }
        # weights = { 'w_fidelity_lsl2': 1., 'w_ridge': 100. }
        # weights = { 'w_ridge': 100. }
        # weights = { 'w_spatial_static': 100. }
        # static = 0.75 * ones( Ylen )
        # weights = { 'w_tvl2': 100. }
        # weights = { 'w_tvl2': 100., 'w_ridge': 100. }

    num_layers=len(colors)-1
    ### adjust the weights:
    if 'w_fidelity_lsl2' in weights:
        # weights['w_fidelity_lsl2'] *= 50000.0 #### old one is 255*255
        weights['w_fidelity_lsl2'] /= arr.shape[2]

    if 'w_ridge' in weights:
        weights['w_ridge'] /= num_layers

    if 'w_spatial_static' in weights:
        weights['w_spatial_static'] /= num_layers

    if 'w_tvl2' in weights:
        weights['w_tvl2'] /= num_layers

    reset_saver( arr.shape )
    Y = optimize( arr, colors, Y0, weights, img_spatial_static_target = static, saver = saver )

    composite_img=save_results( Y, colors, arr, arr.shape, outprefix, order, threshold_opacity )
    img_diff=composite_img-arr_backup
    RMSE=sqrt(square(img_diff).sum()/(composite_img.shape[0]*composite_img.shape[1]))

    print 'img_shape is: ', img_diff.shape
    #print 'max dist: ', sqrt(square(img_diff).sum(axis=2)).max()
    #print 'median dist', median(sqrt(square(img_diff).sum(axis=2)))
    #print 'RMSE: ', RMSE
    return Y
Example #37
0
def run_inverse(direct_fn, model, measurements, optimfn='leastsq'):
    available_solvers = ['leastsq', 'fmin', 'fmin_powell', 'fmin_cg',
                         'fmin_bfgs', 'raster', 'fmin_slsqp']
    if not optimfn in available_solvers:
        print("Unknown inverse method solver for 'optimfn': ", optimfn)
        print("Available solvers are: ", available_solvers)
        exit(1)

    untransform  = model._untransform
    lbounds      = model._lbounds
    ubounds      = model._ubounds
    conditions   = model._conditions
    optim_params = {}
    global last_good_res
    last_good_res = None

    global ITERATION # Python 2.7 hack (no support for nonlocal variables)
    ITERATION = 0

    def optimfn_wrapper(optimargs):
        global ITERATION # Python 2.7 hack (no support for nonlocal variables)
        global last_good_res

        print(15 * '*', ' Iteration: {:4d}'.format(ITERATION), ' ', 15 * '*')
        ITERATION += 1

        print ( "True arg", optimargs)
        untransform_dict(optim_names, optimargs, optim_par_length,
                         optim_params, untransform)

        if model.verbosity > 0: print_params(optim_params)

        penalization = penalize(optim_params, lbounds, ubounds,
                                when='out_of_bounds')
        penalization += penalize_cond(optim_params, conditions)
#        if penalization > 0:
#            print ('not ascending par', penalization)

        if penalization > 0.0:
            if penalization < 1e3: penalization += 1e3
            if model.verbosity > 0:
                print('Optimized arguments are out of bounds or not satisfying conditions ... Penalizing by ',
                      penalization)

            #error = measurements.get_penalized(penalization,
            #                scalar=(optimfn not in ['leastsq']))
            if optimfn in ['leastsq']:
                error = np.abs(last_good_res) + penalization/len(last_good_res)
            else:
                error = np.abs(last_good_res) + penalization
            #if model.verbosity > 0:
            #    print('Penalized error is', error)
            return error

        print ( "Used arg", optim_params)
        model.set_parameters(optim_params)

        # reset if previously stored values of measurements (it's re-set also
        # inside the simulate_direct() but user may supply his own direct
        # function, so we need to be sure measurements were re-setted
        measurements.reset_calc_measurements()

        flag = direct_fn(model, measurements)

        if flag:
            # direct computation went O.K.
            if model.verbosity > 0:
                measurements.display_error()

            error = measurements.get_error() # computed - measured

            total_LSQ_error = np.sum(np.power(error, 2))
            print('\nTotal LSQ error:', total_LSQ_error)

            if optimfn in ['leastsq']:
                result = error
            else:
                result = total_LSQ_error

            last_good_res = result

        else:
            # something is wrong, so penalize
            penalization = \
              min(penalize(optim_params, lbounds, ubounds, when='always'),
                  1e10)
            if model.verbosity > 1:
                print('Direct problem did not converge for given optimization '
                      'parameters... Penalizing by ', penalization)

            result = measurements.get_penalized(penalization,
                            scalar=(optimfn not in ['leastsq']))

        return result

    def optimineq_wrapper(optimargs):
        #inequality conditions

        untransform_dict(optim_names, optimargs, optim_par_length,
                         optim_params, untransform)
        ineqs = np.empty((0,), float)
        for (name, values) in optim_params.items():
            if not np.iterable(values):
                values = [values]
            for cond in conditions[name]:
                if cond == '':
                    continue
                if cond.lower() in ['ascending', 'asc']:
                    #parameters must be ascending
                    ineqs = np.append(ineqs, values[1:]-values[:-1])
                else:
                    raise Exception('Condition on parameters not recoginized: %s' % cond)
        if len(ineqs) == 0:
            raise Exception('Use inequality solver only if there are are inequality'
                            'conditions! None found.')
        return ineqs

    # Further it is assumed that we are using OrderedDict so that the
    # order of values is stable
    from collections import OrderedDict
    assert type(model.init_values) is OrderedDict, \
        'The type of ''model.init_values'' is not OrderedDict.'

    optim_names  = model.init_values.keys()   # it's an ordered dict
    optim_par_length = np.ones((len(optim_names), ), dtype=int)

    # Determine lengths of each parameter (in case value is an array)
    for (ind, val) in enumerate(model.init_values.values()):
        if np.iterable(val):
            optim_par_length[ind] = len(val)

    transform = model._transform

    if bool(transform is None) == bool(untransform is None):
        if not transform is None:
            from .saturation_curve import default_transformation

            for name in optim_names:
                # Add missing [un]transform functions
                if bool(name in transform) == bool(name in untransform):
                    if not name in transform:
                        (transform[name], untransform[name]) = \
                          default_transformation(-np.inf, np.inf)
                else:
                    raise Exception('Name ''', name, ' '' is specified in one of '
                        '[un]transform but not in the other:\n'
                        'Transform:   ', transform,
                        '\nUntransform: ', untransform)
    else:
        raise Exception('One of transform/untransform is ''None'' while the other not.')

    init_values = np.empty((np.sum(optim_par_length), ), dtype=float)
    iv_ind = 0

    for (ind, name) in enumerate(optim_names):
        # Update init_values
        iv_ind_next = iv_ind + optim_par_length[ind]
        if transform and name in transform:
            init_values[iv_ind:iv_ind_next] = \
            transform[name](np.asarray(model.init_values[name]))
        else:
            init_values[iv_ind:iv_ind_next] = np.asarray(model.init_values[name])
        iv_ind = iv_ind_next

    import scipy.optimize

    optimize = getattr(scipy.optimize, optimfn)

    # Initialize output variables that are not present for every solver
    msg = None
    cov = None
    gopt = None
    gcalls = None
    fcalls = None
    iters  = ITERATION

    # Run optimization
    for run in range(1 + (model.transform_params and model.untransformed_cov)):
        # if untransformed covariance is desired but we computed with
        # transformed parameters, we run the computation again, starting
        # in optimal value of previous optimization
        if run == 1:
            print('Running optimization to obtain the covariance of '
                  'untransformed parameters...')
            untransform = None
            init_values = list(flatten([optim_params[name] for name in optim_names]))

        if optimfn == 'leastsq':
            (opt_params, cov, infodic, msg, ier) = \
              optimize(optimfn_wrapper, init_values,
                       epsfcn=model.epsfcn, factor=model.factor,
                       xtol=model.xtol, ftol=model.ftol,
                       full_output=True)
            fcalls = infodic['nfev']
        elif optimfn == 'fmin':
            (opt_params, fopt, iters, fcalls, warnflag) = \
              optimize(optimfn_wrapper, init_values,
                       xtol=model.xtol, ftol=model.ftol, maxfun=model.max_fev,
                       maxiter=model.max_inv_iter, disp=model.disp_inv_conv,
                       full_output=True, retall=False)
        elif optimfn == 'fmin_powell':
            (opt_params, fopt, direc, iters, fcalls, warnflag) = \
              optimize(optimfn_wrapper, init_values,
                       xtol=model.xtol, ftol=model.ftol, maxfun=model.max_fev,
                       maxiter=model.max_inv_iter, disp=model.disp_inv_conv,
                       full_output=True, retall=False)
        elif optimfn == 'fmin_cg':
            (opt_params, fopt, fcalls, gcalls, warnflag) = \
              optimize(optimfn_wrapper, init_values,
                       maxiter=model.max_inv_iter, gtol=model.gtol,
                       disp=model.disp_inv_conv,
                       full_output=True, retall=False)
        elif optimfn == 'fmin_bfgs':
            (opt_params, fopt, gopt, Bopt, fcalls, gcalls, warnflag) = \
              optimize(optimfn_wrapper, init_values,
                       maxiter=model.max_inv_iter, gtol=model.gtol,
                       disp=model.disp_inv_conv,
                       full_output=True, retall=False)
        elif optimfn == 'fmin_slsqp':
            (opt_params, fopt,fcalls, warnflag, msg) = \
                optimize(optimfn_wrapper, init_values,
                         f_eqcons=optimfn_wrapper,
                         f_ieqcons=optimineq_wrapper,
                         iter=model.max_inv_iter or 100,
                         acc=model.xtol, full_output=True,
                         epsilon=model.epsfcn, disp=model.disp_inv_conv
                         )
        print ("Final param", opt_params)
        untransform_dict(optim_names, opt_params, optim_par_length,
                         optim_params, untransform)
        print ("unstransf final param", optim_params)

    # run the direct once more to obtain correct covariance
    print ('rerunning for opt error')
    opt_error = optimfn_wrapper(opt_params)
    print ('finished, error:', opt_error)

    if cov is None:
        print('Warning: singular matrix for covariance  encountered '
              '(indicates very flat curvature in some direction).')
    else:
        s_sq = (np.sum(np.power(opt_error, 2))
                / (np.alen(opt_error) - np.alen(opt_params)))
        cov *= s_sq

    # Display inverse solver statistic
    print('\nInverse problem statistics:\n')
    if not msg is None:
        print('\n', msg)
    if not gopt is None:
        print('\nGradient at optimum:\n', gopt, '\n')

    results = [('iters', iters), ('fcalls', fcalls), ('gcalls', gcalls)]
    out = ''
    for (name, value) in results:
        if not value is None:
            out += ' |{:>8}'.format(name)
    out += ' |\n'
    for (name, value) in results:
        if not value is None:
            out += ' |{:8d}'.format(value)
    print(out, '|')
    print("Extra info:", opt_params, cov, infodic, msg, ier)

    return (optim_params, cov)
Example #38
0
def solveInterval(options, routes, begin, end, intervalPrefix, outf,
                  mismatchf):
    # store which routes are passing each counting location (using route index)
    countData = (parseDataIntervals(parseTurnCounts, options.turnFiles, begin,
                                    end, routes, options.turnAttr) +
                 parseDataIntervals(parseEdgeCounts, options.edgeDataFiles,
                                    begin, end, routes, options.edgeDataAttr))

    routeUsage = getRouteUsage(routes, countData)

    # pick a random couting location and select a new route that passes it until
    # all counts are satisfied or no routes can be used anymore
    openRoutes = set(range(0, routes.number))
    openCounts = set(range(0, len(countData)))
    openRoutes = updateOpenRoutes(openRoutes, routeUsage, countData)
    openCounts = updateOpenCounts(openCounts, countData, openRoutes)

    usedRoutes = []
    if options.optimizeInput:
        usedRoutes = [routes.edges2index[e] for e in routes.all]
        resetCounts(usedRoutes, routeUsage,
                    countData)  # set to original counts
    else:
        while openCounts:
            cd = countData[random.sample(openCounts, 1)[0]]
            routeIndex = random.sample(cd.routeSet.intersection(openRoutes),
                                       1)[0]
            usedRoutes.append(routeIndex)
            for dataIndex in routeUsage[routeIndex]:
                countData[dataIndex].count -= 1
            openRoutes = updateOpenRoutes(openRoutes, routeUsage, countData)
            openCounts = updateOpenCounts(openCounts, countData, openRoutes)

    totalMismatch = sum([cd.count for cd in countData])
    if abs(
            totalMismatch
    ) > 0 and options.optimize is not None:  # FIXME: this does not optimize undercounts
        if options.verbose:
            print("Starting optimization for interval [%s, %s] (mismatch %s)" %
                  (begin, end, totalMismatch))
        optimize(options, countData, routes, usedRoutes, routeUsage)
        resetCounts(usedRoutes, routeUsage, countData)
    # avoid bias from sampling order / optimization
    random.shuffle(usedRoutes)

    if usedRoutes:
        outf.write('<!-- begin="%s" end="%s" -->\n' % (begin, end))
        period = (end - begin) / len(usedRoutes)
        depart = begin  # TODO: find a way to maintain original departures? with some randomization?
        routeCounts = getRouteCounts(routes, usedRoutes)
        if options.writeRouteIDs:
            for routeIndex in sorted(set(usedRoutes)):
                outf.write(
                    '    <route id="%s%s" edges="%s"/> <!-- %s -->\n' %
                    (intervalPrefix, routeIndex, ' '.join(
                        routes.unique[routeIndex]), routeCounts[routeIndex]))
            outf.write('\n')
        elif options.writeRouteDist:
            outf.write('    <routeDistribution id="%s%s"/>\n' %
                       (intervalPrefix, options.writeRouteDist))
            for routeIndex in sorted(set(usedRoutes)):
                outf.write(
                    '        <route id="%s%s" edges="%s" probability="%s"/>\n'
                    % (intervalPrefix, routeIndex, ' '.join(
                        routes.unique[routeIndex]), routeCounts[routeIndex]))
            outf.write('    </routeDistribution>\n\n')

        routeID = options.writeRouteDist
        if options.writeFlows is None:
            for i, routeIndex in enumerate(usedRoutes):
                if options.writeRouteIDs:
                    routeID = routeIndex
                vehID = options.prefix + intervalPrefix + str(i)
                if routeID is not None:
                    outf.write(
                        '    <vehicle id="%s" depart="%.2f" route="%s%s"%s/>\n'
                        % (vehID, depart, intervalPrefix, routeID,
                           options.vehattrs))  # FIXME: maintain vehicle types
                else:
                    outf.write('    <vehicle id="%s" depart="%.2f"%s>\n' %
                               (vehID, depart, options.vehattrs))
                    outf.write('        <route edges="%s"/>\n' %
                               ' '.join(routes.unique[routeIndex]))
                    outf.write('    </vehicle>\n')
                depart += period  # TODO: use actual depart times
        else:
            routeDeparts = defaultdict(list)
            for routeIndex in usedRoutes:
                routeDeparts[routeIndex].append(depart)
                depart += period
            if options.writeRouteDist:
                totalCount = sum(routeCounts)
                probability = totalCount / (end - begin)
                flowID = options.prefix + intervalPrefix + options.writeRouteDist
                if options.writeFlows == "number" or probability > 1.001:
                    repeat = 'number="%s"' % totalCount
                    if options.writeFlows == "probability":
                        sys.stderr.write(
                            "Warning: could not write flow %s with probability %.2f\n"
                            % (flowID, probability))
                else:
                    repeat = 'probability="%s"' % probability
                outf.write(
                    '    <flow id="%s" begin="%.2f" end="%.2f" %s route="%s"%s/>\n'
                    % (flowID, begin, end, repeat, options.writeRouteDist,
                       options.vehattrs))
            else:
                # ensure flows are sorted
                flows = []
                for routeIndex in sorted(set(usedRoutes)):
                    outf2 = StringIO()
                    fBegin = min(routeDeparts[routeIndex])
                    fEnd = max(routeDeparts[routeIndex] + [fBegin + 1.0])
                    probability = routeCounts[routeIndex] / (fEnd - fBegin)
                    flowID = "%s%s%s" % (options.prefix, intervalPrefix,
                                         routeIndex)
                    if options.writeFlows == "number" or probability > 1.001:
                        repeat = 'number="%s"' % routeCounts[routeIndex]
                        if options.writeFlows == "probability":
                            sys.stderr.write(
                                "Warning: could not write flow %s with probability %.2f\n"
                                % (flowID, probability))
                    else:
                        repeat = 'probability="%s"' % probability
                    if options.writeRouteIDs:
                        outf2.write(
                            '    <flow id="%s" begin="%.2f" end="%.2f" %s route="%s%s"%s/>\n'
                            % (flowID, fBegin, fEnd, repeat, intervalPrefix,
                               routeIndex, options.vehattrs))
                    else:
                        outf2.write(
                            '    <flow id="%s" begin="%.2f" end="%.2f" %s%s>\n'
                            % (flowID, fBegin, fEnd, repeat, options.vehattrs))
                        outf2.write('        <route edges="%s"/>\n' %
                                    ' '.join(routes.unique[routeIndex]))
                        outf2.write('    </flow>\n')
                    flows.append((fBegin, outf2))
                flows.sort()
                for fBegin, outf2 in flows:
                    outf.write(outf2.getvalue())

    underflow = sumolib.miscutils.Statistics("underflow locations")
    overflow = sumolib.miscutils.Statistics("overflow locations")
    gehStats = sumolib.miscutils.Statistics("GEH")
    numGehOK = 0.0
    hourFraction = (end - begin) / 3600.0
    totalCount = 0
    for cd in countData:
        localCount = cd.origCount - cd.count
        totalCount += localCount
        if cd.count > 0:
            underflow.add(cd.count, cd.edgeTuple)
        elif cd.count < 0:
            overflow.add(cd.count, cd.edgeTuple)
        origHourly = cd.origCount / hourFraction
        localHourly = localCount / hourFraction
        geh = sumolib.miscutils.geh(
            origHourly, localHourly)  # TODO: check the GEH calculation
        if geh < options.gehOk:
            numGehOK += 1
        gehStats.add(
            geh, "[%s] %s %s" %
            (' '.join(cd.edgeTuple), int(origHourly), int(localHourly)))

    outputIntervalPrefix = "" if intervalPrefix == "" else "%s: " % int(begin)
    gehOKNum = (100 * numGehOK / len(countData)) if countData else 100
    gehOK = "%.2f%%" % gehOKNum if countData else "-"
    print(
        "%sWrote %s routes (%s distinct) achieving total count %s at %s locations. GEH<%s for %s"
        % (outputIntervalPrefix, len(usedRoutes), len(set(usedRoutes)),
           totalCount, len(countData), options.gehOk, gehOK))

    if options.verboseHistogram:
        edgeCount = sumolib.miscutils.Statistics("route edge count",
                                                 histogram=True)
        detectorCount = sumolib.miscutils.Statistics("route detector count",
                                                     histogram=True)
        for i, r in enumerate(usedRoutes):
            edgeCount.add(len(routes.unique[r]), i)
            detectorCount.add(len(routeUsage[r]), i)
        print("result %s" % edgeCount)
        print("result %s" % detectorCount)
        print(gehStats)

    if underflow.count() > 0:
        print("Warning: %s (total %s)" % (underflow, sum(underflow.values)))
    if overflow.count() > 0:
        print("Warning: %s (total %s)" % (overflow, sum(overflow.values)))

    if mismatchf:
        mismatchf.write('    <interval id="deficit" begin="%s" end="%s">\n' %
                        (begin, end))
        for cd in countData:
            if len(cd.edgeTuple) == 1:
                mismatchf.write(
                    '        <edge id="%s" measuredCount="%s" deficit="%s"/>\n'
                    % (cd.edgeTuple[0], cd.origCount, cd.count))
            elif len(cd.edgeTuple) == 2:
                mismatchf.write(
                    '        <edgeRelation from="%s" to="%s" measuredCount="%s" deficit="%s"/>\n'
                    %
                    (cd.edgeTuple[0], cd.edgeTuple[1], cd.origCount, cd.count))
            else:
                print(
                    "Warning: output for edge relations with more than 2 edges not supported (%s)"
                    % cd.edgeTuple,
                    file=sys.stderr)
        mismatchf.write('    </interval>\n')

    return sum(underflow.values), sum(overflow.values), gehOKNum
Example #39
0
    def run(self):

        self.coefficients, self.covariance = sp.optimize(self.f, self.inputs, self.outputs, self.guesses, self.sdeviation)
Example #40
0
def initialize_logging():
    """
    Initialize logging. Prints to both the console and a log file, at configurable levels
    """
    global now

    #set root logger to debug level        
    logging.getLogger().setLevel(logging.DEBUG)

    #formatter = logging.Formatter('%(asctime)s %(name)s:%(process)d %(levelname)s %(message)s')
    formatter = logging.Formatter('%(levelname)s %(message)s')

    # create console handler and attach to root logger
    ch = logging.StreamHandler()
    ch.setLevel(logging.DEBUG)
    ch.setFormatter(formatter)
    logging.getLogger().addHandler(ch)

    # create logfile and attach to local logger
    log_filename = 'optimize.log.'+now
    fh = logging.FileHandler(log_filename)
    fh.setLevel(logging.DEBUG)
    fh.setFormatter(formatter)
    logger.addHandler(fh)
    logger.info('Logging output to %s', log_filename)
 

if __name__ == "__main__":
    optimize()

Example #41
0
def solveInterval(options, routes, begin, end, intervalPrefix, outf, mismatchf, rng=random):
    # store which routes are passing each counting location (using route index)
    countData = (parseDataIntervals(parseTurnCounts, options.turnFiles, begin, end, routes, options.turnAttr,
                                    options=options)
                 + parseDataIntervals(parseEdgeCounts, options.edgeDataFiles, begin, end, routes,
                                      options.edgeDataAttr,
                                      options=options)
                 + parseDataIntervals(parseTurnCounts, options.odFiles, begin, end, routes, options.turnAttr,
                                      isOD=True,
                                      options=options)
                 )

    routeUsage = getRouteUsage(routes, countData)
    unrestricted = set([r for r, usage in enumerate(routeUsage) if len(usage) == 0])
    if options.verbose and len(unrestricted) > 0:
        print("Ignored %s routes which do not pass any counting location" % len(unrestricted))

    # pick a random counting location and select a new route that passes it until
    # all counts are satisfied or no routes can be used anymore
    openRoutes = set(range(0, routes.number))
    openCounts = set(range(0, len(countData)))
    openRoutes = updateOpenRoutes(openRoutes, routeUsage, countData)
    openCounts = updateOpenCounts(openCounts, countData, openRoutes)
    openRoutes = openRoutes.difference(unrestricted)

    usedRoutes = []
    if options.optimizeInput:
        usedRoutes = [routes.edges2index[e] for e in routes.all]
        resetCounts(usedRoutes, routeUsage, countData)
    else:
        while openCounts:
            if options.weighted:
                routeIndex = _sample_skewed(openRoutes, rng, routes.probabilities)
            else:
                # sampling equally among open counting locations appears to
                # improve GEH but it would also introduce a bias in the loaded
                # route probabilities
                cd = countData[_sample(openCounts, rng)]
                routeIndex = _sample(cd.routeSet.intersection(openRoutes), rng)
            usedRoutes.append(routeIndex)
            for dataIndex in routeUsage[routeIndex]:
                countData[dataIndex].count -= 1
            openRoutes = updateOpenRoutes(openRoutes, routeUsage, countData)
            openCounts = updateOpenCounts(openCounts, countData, openRoutes)

    totalMismatch = sum([cd.count for cd in countData])
    if totalMismatch > 0 and options.optimize is not None:
        if options.verbose:
            print("Starting optimization for interval [%s, %s] (mismatch %s)" % (
                begin, end, totalMismatch))
        optimize(options, countData, routes, usedRoutes, routeUsage)
        resetCounts(usedRoutes, routeUsage, countData)
    # avoid bias from sampling order / optimization
    random.shuffle(usedRoutes, rng.random)

    if usedRoutes:
        outf.write('<!-- begin="%s" end="%s" -->\n' % (begin, end))
        period = (end - begin) / len(usedRoutes)
        depart = begin
        routeCounts = getRouteCounts(routes, usedRoutes)
        if options.writeRouteIDs:
            for routeIndex in sorted(set(usedRoutes)):
                edges = routes.unique[routeIndex]
                routeIDComment = ""
                if edges in routes.edgeIDs:
                    routeIDComment = " (%s)" % routes.edgeIDs[edges]
                outf.write('    <route id="%s%s" edges="%s"/> <!-- %s%s -->\n' % (
                    intervalPrefix, routeIndex, ' '.join(edges),
                    routeCounts[routeIndex], routeIDComment))
            outf.write('\n')
        elif options.writeRouteDist:
            outf.write('    <routeDistribution id="%s%s">\n' % (intervalPrefix, options.writeRouteDist))
            for routeIndex in sorted(set(usedRoutes)):
                outf.write('        <route id="%s%s" edges="%s" probability="%s"/>\n' % (
                    intervalPrefix, routeIndex, ' '.join(routes.unique[routeIndex]), routeCounts[routeIndex]))
            outf.write('    </routeDistribution>\n\n')

        routeID = options.writeRouteDist
        if options.writeFlows is None:
            for i, routeIndex in enumerate(usedRoutes):
                if options.writeRouteIDs:
                    routeID = routeIndex
                vehID = options.prefix + intervalPrefix + str(i)
                if routeID is not None:
                    if options.pedestrians:
                        outf.write('    <person id="%s" depart="%.2f"%s>\n' % (
                            vehID, depart, options.vehattrs))
                        outf.write('        <walk route="%s%s"/>\n' % (intervalPrefix, routeID))
                        outf.write('    </person>\n')
                    else:
                        outf.write('    <vehicle id="%s" depart="%.2f" route="%s%s"%s/>\n' % (
                            vehID, depart, intervalPrefix, routeID, options.vehattrs))
                else:
                    if options.pedestrians:
                        outf.write('    <person id="%s" depart="%.2f"%s>\n' % (
                            vehID, depart, options.vehattrs))
                        outf.write('        <walk edges="%s"/>\n' % ' '.join(routes.unique[routeIndex]))
                        outf.write('    </person>\n')
                    else:
                        outf.write('    <vehicle id="%s" depart="%.2f"%s>\n' % (
                            vehID, depart, options.vehattrs))
                        outf.write('        <route edges="%s"/>\n' % ' '.join(routes.unique[routeIndex]))
                        outf.write('    </vehicle>\n')
                depart += period
        else:
            routeDeparts = defaultdict(list)
            for routeIndex in usedRoutes:
                routeDeparts[routeIndex].append(depart)
                depart += period
            if options.writeRouteDist:
                totalCount = sum(routeCounts)
                probability = totalCount / (end - begin)
                flowID = options.prefix + intervalPrefix + options.writeRouteDist
                if options.writeFlows == "number" or probability > 1.001:
                    repeat = 'number="%s"' % totalCount
                    if options.writeFlows == "probability":
                        sys.stderr.write("Warning: could not write flow %s with probability %.2f\n" %
                                         (flowID, probability))
                else:
                    repeat = 'probability="%s"' % probability
                outf.write('    <flow id="%s" begin="%.2f" end="%.2f" %s route="%s"%s/>\n' % (
                    flowID, begin, end, repeat,
                    options.writeRouteDist, options.vehattrs))
            else:
                # ensure flows are sorted
                flows = []
                for routeIndex in sorted(set(usedRoutes)):
                    outf2 = StringIO()
                    fBegin = min(routeDeparts[routeIndex])
                    fEnd = max(routeDeparts[routeIndex] + [fBegin + 1.0])
                    probability = routeCounts[routeIndex] / (fEnd - fBegin)
                    flowID = "%s%s%s" % (options.prefix, intervalPrefix, routeIndex)
                    if options.writeFlows == "number" or probability > 1.001:
                        repeat = 'number="%s"' % routeCounts[routeIndex]
                        if options.writeFlows == "probability":
                            sys.stderr.write("Warning: could not write flow %s with probability %.2f\n" % (
                                flowID, probability))
                    else:
                        repeat = 'probability="%s"' % probability
                    if options.writeRouteIDs:
                        if options.pedestrians:
                            outf2.write('    <personFlow id="%s" begin="%.2f" end="%.2f" %s%s>\n' % (
                                flowID, fBegin, fEnd, repeat, options.vehattrs))
                            outf2.write('        <walk route="%s%s"/>\n' % (intervalPrefix, routeIndex))
                            outf2.write('    </personFlow>\n')
                        else:
                            outf2.write('    <flow id="%s" begin="%.2f" end="%.2f" %s route="%s%s"%s/>\n' % (
                                flowID, fBegin, fEnd, repeat,
                                intervalPrefix, routeIndex, options.vehattrs))
                    else:
                        if options.pedestrians:
                            outf2.write('    <personFlow id="%s" begin="%.2f" end="%.2f" %s%s>\n' % (
                                flowID, fBegin, fEnd, repeat, options.vehattrs))
                            outf2.write('        <walk edges="%s"/>\n' % ' '.join(routes.unique[routeIndex]))
                            outf2.write('    </personFlow>\n')
                        else:
                            outf2.write('    <flow id="%s" begin="%.2f" end="%.2f" %s%s>\n' % (
                                flowID, fBegin, fEnd, repeat, options.vehattrs))
                            outf2.write('        <route edges="%s"/>\n' % ' '.join(routes.unique[routeIndex]))
                            outf2.write('    </flow>\n')
                    flows.append((fBegin, outf2))
                flows.sort()
                for fBegin, outf2 in flows:
                    outf.write(outf2.getvalue())

    underflow = sumolib.miscutils.Statistics("underflow locations")
    overflow = sumolib.miscutils.Statistics("overflow locations")
    gehStats = sumolib.miscutils.Statistics("GEH")
    numGehOK = 0.0
    hourFraction = (end - begin) / 3600.0
    totalCount = 0
    totalOrigCount = 0
    for cd in countData:
        localCount = cd.origCount - cd.count
        totalCount += localCount
        totalOrigCount += cd.origCount
        if cd.count > 0:
            underflow.add(cd.count, cd.edgeTuple)
        elif cd.count < 0:
            overflow.add(cd.count, cd.edgeTuple)
        origHourly = cd.origCount / hourFraction
        localHourly = localCount / hourFraction
        geh = sumolib.miscutils.geh(origHourly, localHourly)
        if geh < options.gehOk:
            numGehOK += 1
        gehStats.add(geh, "[%s] %s %s" % (
            ' '.join(cd.edgeTuple), int(origHourly), int(localHourly)))

    outputIntervalPrefix = "" if intervalPrefix == "" else "%s: " % int(begin)
    countPercentage = "%.2f%%" % (100 * totalCount / float(totalOrigCount)) if totalOrigCount else "-"
    gehOKNum = 100 * numGehOK / float(len(countData)) if countData else 100
    gehOK = "%.2f%%" % gehOKNum if countData else "-"
    print("%sWrote %s routes (%s distinct) achieving total count %s (%s) at %s locations. GEH<%s for %s" % (
        outputIntervalPrefix,
        len(usedRoutes), len(set(usedRoutes)),
        totalCount, countPercentage, len(countData),
        options.gehOk, gehOK))

    if options.verboseHistogram:
        edgeCount = sumolib.miscutils.Statistics("route edge count", histogram=True)
        detectorCount = sumolib.miscutils.Statistics("route detector count", histogram=True)
        for i, r in enumerate(usedRoutes):
            edgeCount.add(len(routes.unique[r]), i)
            detectorCount.add(len(routeUsage[r]), i)
        print("result %s" % edgeCount)
        print("result %s" % detectorCount)
        print(gehStats)

    if underflow.count() > 0:
        print("Warning: %s (total %s)" % (underflow, sum(underflow.values)))
    if overflow.count() > 0:
        print("Warning: %s (total %s)" % (overflow, sum(overflow.values)))
    sys.stdout.flush()  # needed for multiprocessing

    if mismatchf:
        mismatchf.write('    <interval id="deficit" begin="%s" end="%s">\n' % (begin, end))
        for cd in countData:
            if len(cd.edgeTuple) == 1:
                mismatchf.write('        <edge id="%s" measuredCount="%s" deficit="%s"/>\n' % (
                    cd.edgeTuple[0], cd.origCount, cd.count))
            elif len(cd.edgeTuple) == 2:
                mismatchf.write('        <edgeRelation from="%s" to="%s" measuredCount="%s" deficit="%s"/>\n' % (
                    cd.edgeTuple[0], cd.edgeTuple[1], cd.origCount, cd.count))
            else:
                print("Warning: output for edge relations with more than 2 edges not supported (%s)" % cd.edgeTuple,
                      file=sys.stderr)
        mismatchf.write('    </interval>\n')

    return sum(underflow.values), sum(overflow.values), gehOKNum, outf
Example #42
0
  def load(my):
    generatepython = 0
    generatedb = 0
    repressoutput = 0
    fname = None
    opts, args = getopt.getopt(sys.argv[1:], "dgrf:")
    for o, a in opts:
        if o == "-d": generatedb = 1
        if o == "-g": generatepython = 1
        if o == "-f": fname = a
        if o == "-r": repressoutput = 1

    global excel
    excel = None
    if fname:
        fname = normpath(abspath(fname))
        print "Loading %s..." % fname
    if not repressoutput:
        excel = ExcelCompiler(filename=fname)
        my.excel = excel

    if generatedb:
        z = Zoning()
        p = Parcels()
        cPickle.dump((z,p),open('databaseinfo.jar','w'))
    else:
        print "Reading db info from jar..."
        z,p = cPickle.load(open('databaseinfo.jar'))

    global sp
    if generatepython:
        print "Compiling..., starting from NPV"
        sp = c.gen_graph('B52',sheet='Proforma')
        print "Serializing to disk..."
        sp.save_to_file(fname + ".jar")
    
        # show the graph usisng matplotlib
        #print "Plotting using matplotlib..."
        #sp.plot_graph()

        # export the graph, can be loaded by a viewer like gephi
        print "Exporting to gexf..."
        sp.export_to_gexf(fname + ".gexf")
    else:
        print "Reading formula from jar..."
        sp = Spreadsheet.load_from_file(fname+'.jar')

    if generatedb or generatepython: 
        print "Done generating, run again for proforma"
        sys.exit(0)

    #set_value(excel,sp,'Revenue Model','B97',100.0) # artificially increase retail revenue for testing

    print "Num of parcels:", len(p.get_pids())
    for pid in p.get_pids()[:100]:
        #proforma_inputs['parcel']['parcel_id'] = pid

        print "parcel_id is %d" % pid
        v = float(p.get_attr(pid,'shape_area'))*10.7639
        set_value(excel,sp,"Bldg Form","C28",v)

        try: zoning = z.get_zoning(pid)
        except: 
            print "Can't find zoning for parcel: %d, skipping\n" % pid
            continue
        btypes = z.get_building_types(pid)
        if not zoning:
            print "NO ZONING FOR PARCEL\n"
            continue
        if not btypes:
            print "NO BUILDING TYPES FOR PARCEL\n"
            continue
        print "Parcel size is %f" % v
        far = z.get_attr(zoning,'max_far', 100)
        height = int(z.get_attr(zoning,'max_height', 1000))

        if far == 100 and height == 1000: far,height = .75,10
        set_value(excel,sp,"Bldg Form","C34",far) # far
        set_value(excel,sp,"Bldg Form","C33",height) # height

        if far > 1 or height > 15:
            set_value(excel,sp,"Bldg Form","C39",1) # multi-story

        print "ZONING BTYPES:", btypes

        # right now we can't have MF-CONDO (type 5)
        devmdl_btypes = []
        if 1 in btypes or 2 in btypes: devmdl_btypes+=[1,2]
        if 3 in btypes: 
        devmdl_btypes+=[3,5] # MF to MF-rental and MF-condo
        if 4 in btypes: devmdl_btypes.append(7) # office to office
        #if 5 in btypes: continue # hotel
        #if 6 in btypes: continue # schools
        if 7 in btypes or 8 in btypes: # light industrial and warehouse to warehouse
        devmdl_btypes.append(13) 
        if 9 in btypes: devmdl_btypes.append(12) # heavy industrial to manufacturing
        if 10 in btypes: devmdl_btypes.append(10) # strip mall to auto
        if 11 in btypes: devmdl_btypes.append(11) # big box to big box
        if 12 in btypes: devmdl_btypes+=[4,6] # residential-focused to MXD-MF and MXD-condo
        if 13 in btypes: devmdl_btypes.append(9) # retail focused to neighborhood retail
        if 14 in btypes: devmdl_btypes.append(8) # employment-focused to MXD-office

        btypes = devmdl_btypes

    print "DEVMDL BTYPES:", btypes

        for btype in btypes:
            print "building type = %s" % btype
            
            if btype in [1,2,3,4,5,6]: # RESIDENTIAL
                zone_id = p.get_attr(pid,'zone_id')
                lotsize = p.get_lotsize(zone_id)
                unitsize = p.get_unitsize(zone_id,'HS')
                unitsize2 = p.get_unitsize(zone_id,'MR')
            if not unitsize: unitsize = 1111
            if not unitsize2: unitsize2 = 888
        if not lotsize: lotsize = 11111
        print "zone:", zone_id, "lotsize:", lotsize, "HS size:", unitsize, "MF size:", unitsize2

            if btype in [4,6,8,9]:
                set_value(excel,sp,"Bldg Form","C39",1) # ground floor retail

            def unset_uses(e,s):
                for au in [58,59,60,61, 63,64,65,66, 68,69,70,71, 73,74,75,76, \
                           78,79,80,81,82,83,84]: 
                    set_value(excel,sp,"Bldg Form","C%d" % au,0)
                    set_value(excel,sp,"Bldg Form","K%d" % au,0)

            def allowable_uses(e,s,aus): 
                unset_uses(e,s)
                for au in aus: set_value(excel,sp,"Bldg Form","C%d" % au,1)

            if btype == 1: # this is single family one-off
                assert lotsize
                assert unitsize
                set_value(excel,sp,"Bldg Form","D57",lotsize) # lot size for single family
                set_value(excel,sp,"Bldg Form","E57",unitsize) # unit size for single family
                allowable_uses(excel,sp,[63,64,65,66])

            elif btype == 2: # this is single family builder
                assert lotsize
                assert unitsize
                set_value(excel,sp,"Bldg Form","D57",lotsize) # lot size for single family
                set_value(excel,sp,"Bldg Form","E57",unitsize) # unit size for single family
                allowable_uses(excel,sp,[58,59,60,61])

            elif btype == 3: # MF-rental
                assert unitsize2
                set_value(excel,sp,"Bldg Form","D67",unitsize2) # unit size for multi family
                allowable_uses(excel,sp,[68,69,70,71])

            elif btype == 4: # MXD-MF
                assert unitsize2
                set_value(excel,sp,"Bldg Form","D67",unitsize2) # unit size for multi family
                allowable_uses(excel,sp,[68,69,70,71,78])
            
            elif btype == 5: # MF-CONDO
                assert unitsize2
                set_value(excel,sp,"Bldg Form","D67",unitsize2) # unit size for multi family
                allowable_uses(excel,sp,[73,74,75,76])

            elif btype == 6: # MXD-CONDO
                assert unitsize2
                set_value(excel,sp,"Bldg Form","D67",unitsize2) # unit size for multi family
                allowable_uses(excel,sp,[73,74,75,76,78])
            
            elif btype == 8: # MXD-OFFICE
                assert unitsize2
                set_value(excel,sp,"Bldg Form","D67",unitsize2) # unit size for multi family
                allowable_uses(excel,sp,[78,82])

            elif btype == 14: # LODGING
                continue # skip fo now

            elif btype in COMMERCIALTYPES_D: # COMMERCIAL TYPES
                allowable_uses(excel,sp,[COMMERCIALTYPES_D[btype]])

            else: assert(0)

            X, npv = optimize(sp,btype)
            if npv == -1: continue # error code

            _objfunc2(X,btype,saveexcel=1,excelprefix='%d_%s' % (pid,btype))
            print