Beispiel #1
0
import objective, stack, fourier
import numpy as np
import cv2
from matplotlib import pyplot as plt

# All distances in meters

f_objective = 0.01  # Objective focal lens
specimen_distance = 0.0105  # Specimen to objective distance
NA = 0.4  # Numerical aperture
d_objective = 0  # Diameter of objective lens
field_of_view_x = 0  # Field of view in the image plane (Magnification*Actual specimen size)
field_of_view_y = 0

lens = objective.Objective(
    NA, f_objective)  # Lens object (implemented in objective.py)
array_distance, magnification = lens.distanceMagnification(
    specimen_distance
)  # Distance for the microlens_array such that there is a one to one mapping

microlens_pitch = 0  #Microlens pitch
f_microlens = 0  # Microlens focal length
pixel_ratio = 0  # Number of sensor_pixels per m
x_lenslet = 0  # Number of lenslets in x
y_lenslet = 0  # Number of lenslets in y
pixels = 10  #Pixels behind each microlens (in 1D)
''' Points to note
1. Pixel ratio should be fixed, as the sensor is a given, rather than pixels behind each microlens as it depends on the microlens diameter. However for convenience calibrating the other way round is better.
2. The microlens are assumed to be spherical in nature, so pixels behind each microlens can be given as 10 in 1D (Actual pixels would be 100 in 2D)
3. The actual fixed quantities before an experiment would only be the specimen dimensions. Then one would choose an objective, which fixes the objective focal
length, diameter and NA. This then fixes the field_of_view, which would then help to determine the number of lenslets to include in the array and where to place the array. This would 
Beispiel #2
0
import json
import sys
import objective
lift = float(sys.argv[1])
drag = float(sys.argv[2])
area = float(sys.argv[3])

with open('penalties.json') as f:
    penalties = json.load(f)

objective_function = objective.Objective(**penalties)
print(objective_function([lift, drag, area]))
Beispiel #3
0
        para[key] = temp[key]


readParameters(para)
'''Parameters dictionary
1. specimen_distance = Distance of specimen to objective
2. specimen_x,specimen_y = Dimensions of specimen 
3. NA,f_objective = Numerical aperture and focal length of objective 
4. x_lenslet,y_lenslet = Number of lenslets in the microlens array 
5. x_sensor,y_sensor = Dimensions of sensor 
6. array_spacing = Radius of a microlens in microlens array 
7. z_spacing = Spacing between two different sections in the stack 
8. x_tilt,y_tilt = cosine of the tilt of the array plane about origin in x and y direction (1,1 corresponds to no tilt)
'''

lens = objective.Objective(para['NA'], para['f_objective'])

specimen = cv2.imread("specimen.png", 0)

x_matrix = np.zeros([3, 3])  #Rotation about x_axis
y_matrix = np.zeros([3, 3])  # Rotation about y_axis
x_matrix[0][0] = x_matrix[1][1] = x_matrix[2][2] = y_matrix[0][0] = y_matrix[
    1][1] = y_matrix[2][2] = 1  #Identity Matrix

transform = np.zeros([
    3, 3
])  # Matrix to transform from non tilted plane to tilted plane coordinates
inverse_transform = np.zeros([3, 3])  # Inverse


# Calibration of the experimental details
Beispiel #4
0
    def load(self, filename):
        # If there is a mission loaded already close it
        if (self.is_loaded() == True):
            self.close()

        self.loaded = True
        self.mouseIsDown = False
        self.mouseRect = pygame.Rect(0, 0, 0, 0)
        self.selection = []

        # Get stuff from wrapper
        self.wrapper = missionwrapper.Wrapper()
        self.wrapper.load(filename)
        terrainList = self.wrapper.GetMap()
        playerList = self.wrapper.GetPlayer()
        unitList = self.wrapper.GetUnits()
        objectiveList = self.wrapper.GetObjective()

        #Construct the list of Objectives
        for objtive in objectiveList:
            self.objectiveList.append(objective.Objective(objtive))

        # Initialize the terrain grid
        holder = []
        for line in terrainList:
            for char in line:
                #don't add the trailing newline
                if (char == '\n'):
                    continue
                #append the char to the end of the list 'holder'
                holder[len(holder):] = [char]
            #all chars in 'line' are in holder
            self.terrainGrid.append(holder)
            #set holder to empty
            holder = []

        # Initialize the Player objects
        index = 0
        for player in playerList:
            player.replace('\n', '')
            aiA = ""
            strType = ''
            res1 = 1
            res2 = 3
            aiA, strType, res1, res2 = player.split()
            self.playerIdMap[index] = Player()
            self.playerIdMap[index].ai = aiA
            self.playerIdMap[index].type = strType
            self.playerIdMap[index].resource = (res1, res2)
            index = index + 1

        # Load in and spawn units
        for units in unitList:
            units.replace('\n', '')
            playerObject = self.playerIdMap[0]
            #print self.playerIdMap
            #print playerObject
            #read each individual unit's line
            xA = 1
            yA = 1
            unitId = 1
            playerId = 0
            xA, yA, unitId, playerId = units.split()
            xA = float(xA)
            yA = float(yA)
            unitId = int(unitId)
            playerId = int(playerId)
            if (unitId == 3):
                unitObject = building.Building(self.playerIdMap[playerId],
                                               xA * 32, yA * 32,
                                               self.colorMap[playerId])
            else:
                unitObject = soldier.Soldier(xA * 32, yA * 32, None,
                                             self.colorMap[playerId])
            playerObject = self.playerIdMap[playerId]
            if (self.unitTable.get(playerObject) == None):
                self.unitTable[playerObject] = []
            self.unitTable[playerObject].append(unitObject)
Beispiel #5
0
def optimize(f, x0, ndata, gtol=1e-5, maxiter=100, callback=None, props={}):
    """
    This method can be invoked in a simlar way as lbfgs routines in Scipy,
    with the following differences:
        - f takes additional arguments 's' and 'e' that signify a range of 
          points to evaluate the objective over.
        - The callback gives additional information
        - logging is performed using the standard python logging framework
    
    :param function f:
        Objective function, taking arguments (x,s,e), where
        (s,e) is the range of datapoints over which to evaluate
        the objective.
    :param vector x0:
        Initial point
    :param int ndata: 
        Number of points in dataset. The passed function 
        will be invoked with s,e between 0 and ndata.
    
    :keyword float gtol:
        stopping criterion, measured in 2-norm.
    :keyword int maxiter: 
        Maximum number of steps to complete. Note that this does
        not count line search iterations.
    :keyword function callback:
        Invoked with (xk, fval, gfk, pointsProcessed), 
        useful for tracking progress for latter plotting. 
        PlottingCallback in the convergence module can do
        this for you.
    :keyword object props:
        Map of additional parameters:
         - **parts** (*integer* default 100)
            For computing gradients and hessian vector products,
            the data is split into this many parts. Calls to your
            objective function will be in roughly ndata/parts.
            The default of 100 is suitable for most datasets, 
            smaller numbers are only useful if the dataset is small
            or non-homogeneous, in which case the hessian free method
            is ineffective. Larger numbers of parts may improve 
            convergence, but result proportionally more internal overhead.
         - **subsetVariant** (*string* default 'lbfgs')
            Setting this to 'cg' gives the standard conjugate gradient method
            for solving the linear system Hp = -g, to find the search direction
            p from the gradient g and hessian H. This is computed over only one
            of the parts, so only a small amount of the data is seen.
            Setting this to 'lbfgs' uses a stochastic minibatch lbfgs method
            for solving the linear subproblem. This sees many more parts of the
            data, but is only able to make half as many steps for the same 
            computation time. For problems without extreme curvature, lbfgs
            works much better than cg. If the condition number of the hessian
            is very large however, cg is the better option. In those cases
            the solveFraction property should normally be increases as well.
         - **solveFraction** (*float* default 0.2)
            The cg or lbfgs linear solvers perform a number of iterations
            such that **solveFraction** fraction of overhead is incurred.
            For example, if set to 0.2 and 100 parts, 20 cg iterations on 1
            part will be preformed, if the cg subset variant is used.
            If subsetObjective is off, then essentially 20% extra computation
            is done per outer step over a standard lbfgs method (excluding line
            searches). 
         - **subsetObjective** (*boolean* default True) 
            Turn on or off the use of subsets of data for 
            computing gradients. If off, gradients are computed using 
            the full dataset, but hessian-vector products still use subsets.
            The size of the subset used for computing the gradient is adaptive
            using bounds on the approximation error.
         - **gradRelErrorBound** (*float* default 0.1)
            At a search point, the gradient is computed over enough parts
            so that the relative variance of the gradients is brought below
            this threshold. 0.1 is conservative; better results may be 
            achieved by using values up to about 0.4. Larger values may cause
            erratic convergence behavior though.
         - **lbfgsMemory** (*integer* 10)
            The lbfgs search direction is used as the initial guess at the 
            search direction for the cg and lbfgs inner solves. This controls
            the memory used for that. The same memory is used for the inner 
            lbfgs solve. Changing this has less of an effect than it would
            on a standard lbfgs implementation.
         - **fdEps** (*float* default 1e-8)
            Unless a gaussNewtonProd method is implemented, hessian vector
            products are computed by using finite differences. Unlike 
            applying finite differences to approximate the gradient, the FD
            method allows for the computation of hessian-vector products
            at the cost of only one subset gradient evaluation.
            If convergence plots become erratic near the optimum, tuning this
            parameter can help. This normally occurs long after the test loss
            has plateaued however.
         - **innerSolveAverage** (*boolean* default False)
            Applicable only if subsetVariant is lbfgs, this turns on the 
            use of 50% sequence suffix averaging for the inner solve.
            If a large number of parts (say 1000) is being used, this
            can give better results.
         - **innerSolveStepFactor** (*float* default 0.5)
            The lbfgs subsetVariant is stochastic, however it uses the 
            fact that quadratic problems have a simple formula for exact line
            searches, in order to make better step choices than simple SGD.
            Doing an exact line search makes overconfident steps however, and
            so the step is scaled by this factor. If the lbfgs linear solve
            is diverging, decrease this.
        
    :rtype: (xk, fval)
       
    .. note::
        If your objective is non-convex, you need to explictly provide a 
        function that computes matrix vector products against the 
        Gauss-Newton approximation to the hessian. You can do this
        by making **f** an object with a __call__ method that implements the 
        objective function as above, and a gaussNewtonProd(x, v, s, e) 
        method that implements the matrix vector product against v for
        the GN approximation at x over the datapoints (s,e). This is
        illustrated in the autoencoder example code.
        
    
    """
    logger = logging.getLogger("phf")
    useSubsetObjective = props.get("subsetObjective", True)
    n = len(x0)

    if useSubsetObjective:
        f = objective.SubsetObjective(f, ndata, n, props)
    else:
        f = objective.Objective(f, ndata, n, props)

    x0 = asarray(x0).squeeze()
    if x0.ndim == 0:
        x0.shape = (1, )

    (fval, gfk) = f(x0)
    gfkp1 = None

    if callback is not None:
        callback(x0, fval, gfk, f.pointsProcessed)

    if isinf(fval):
        raise Exception("X0 fval is infinite")

    k = 0
    xk = x0

    gnorm = linalg.norm(gfk)
    logger.info("Initial fval: %1.8f, gnorm %2.2e", fval, gnorm)

    vecs = {}

    while (gnorm > gtol) and (k < maxiter):

        pk = innersolve.solve(f, xk, gfk, k, vecs, props)

        ###### Line search
        (alpha_k, fval,
         gfkp1) = linesearch.strong_wolfe(f, xk, fval, gfk, pk, props)

        previous_fval = fval
        xkp1 = xk + alpha_k * pk
        sk = xkp1 - xk
        xk = xkp1
        yk = gfkp1 - gfk

        skyk = dot(sk, yk)
        rhok = 1.0 / skyk

        if len(vecs) == 0:
            kmax = 0
        else:
            kmax = numpy.max(vecs.keys()) + 1

        if skyk <= 0:
            logger.error("BAD CURVATURE skyk=%1.1e !!!!!!!!!!", skyk)
        else:
            vecs[kmax] = (sk, yk, rhok)

        gnorm = linalg.norm(gfkp1)
        gfk = gfkp1

        logger.info(
            " Iteration %d, fval: %1.8f, gnorm %1.3e, effective iters: %1.2f",
            k, fval, gnorm, f.pointsProcessed / float(ndata))

        if callback is not None:
            callback(xk, fval, gfk, f.pointsProcessed)

        k += 1

    return xk, fval
Beispiel #6
0
	def __init__(self, player, source):
		""" Create a level from an image. """

		# BLOCKSIZE = constants.SCREEN_HEIGHT / 20 # 20 blocks hoog

		from PIL import Image

		# Call the parent constructor
		Level.__init__(self, player)

		level = Image.open(source)

		*__, width, height = level.getbbox()

		self.level_limit = BLOCKSIZE * width

		for py in range(height):

			# The amount of consecutive pixels with the same color (1 when we start)
			consecutive_pixels = 1

			for px in range(width):

				r, g, b, a = level.getpixel((px, py))

				# Skip transparant pixels
				# Skip anything that isn't completely opaque
				# WARNING TODO IDK WTF HELP ITS BROKEN
				# THIS WILL NOT WORK IF WE WANT ANYTHING TO BE TRANSPARENT
				if a < 1:
					continue

				# If there is no "next pixel" or the pixel after this one is different
				if px + 1 >= width or level.getpixel((px + 1, py)) != (r, g, b, a):

					container = None

					if (r, g, b) == constants.OBJECTIVE:
						# Objective, niet renderen
						obj = objective.Objective(px * BLOCKSIZE, py * BLOCKSIZE)
						self.objective_list.add(obj)

					elif (r, g, b) == constants.WATER:
						# Ding, render wel.
						container = self.water_list

					elif (r, g, b) ==  constants.LAVA:
						container = self.lava_list

					# end list
					elif (r, g, b) == constants.BLACK:
						container = self.end_list

					else:
						container = self.platform_list

					if container != None:
						platform = Platform(BLOCKSIZE * consecutive_pixels, BLOCKSIZE, (r, g, b))
						platform.rect.x = (px - consecutive_pixels + 1) * BLOCKSIZE # x-offset -> (px should be inital pixel) * size of 1 block
						platform.rect.y = py * BLOCKSIZE # y-offset, current y-value * size of 1 block

						container.add(platform)


					# Clear the amount of consecutive pixels
					consecutive_pixels = 1

				else:
					# In this case there is a next pixel, and it's the same color as the current
					# So we increment the amount of consecutive pixels by 1
					# which causes the resulting "block" or platform thingy to be widened by BLOCKSIZE
					consecutive_pixels += 1
					continue

		# Fix coords van 1e lava ding dat in het level is
		if len(self.lava_list.sprites()) > 0:
			self.lava_x = self.lava_list.sprites()[0].rect.x
Beispiel #7
0
import matplotlib.pyplot as plt
import json
import numpy
import sys
import objective
import heat
import plot_info
with open('objective_parameters.json') as f:
    objective_parameters = json.load(f)

objective_function = objective.Objective(**objective_parameters)

initial_data = objective_function.initial_data

dt = 1.0 / 2048
dx = 1.0 / 2048
end_time = objective_parameters['end_time']
solution = heat.solve_heat_equation(initial_data, dt, dx, end_time)
x = numpy.arange(0, 1, dx)
plt.plot(x, initial_data(x), label='initial')

plt.plot(x, solution, '*', label='numerical')
plt.plot(x, initial_data.exact_solution(x, end_time), label='exact')
plt.legend()
plot_info.showAndSave("heat_exact_solution")
axes[number_of_plots].plot(iterations,
                           np.zeros_like(coefficients_per_iteration[:, 0]) *
                           objective_parameters['q'],
                           '--',
                           label='q',
                           color=plot_ref[0].get_color())
axes[number_of_plots].grid(True)
axes[number_of_plots].legend()

axes[number_of_plots].set_xlabel("Iteration")
plot_info.showAndSave("coefficients")

dx = 1.0 / 2048
x = np.arange(0, 1, dx)

objective_function = objective.Objective(**objective_parameters)
initial_data = objective_function.initial_data

end_time = objective_parameters['end_time']
#plt.plot(x, initial_data.exact_solution(x, end_time), label='exact true')

objective_function_approximated = objective.Objective(
    end_time=end_time,
    coefficients=coefficients_per_iteration[-1, 1:],
    q=coefficients_per_iteration[-1, 0],
    control_points=objective_parameters['control_points'])

plt.plot(x,
         objective_function_approximated.initial_data(x),
         '--',
         label='Initial data')