def calculate_cubic_value_not_a_knot(t, y): n = len(t) h = [] b = [] v = [0] A = zeros(shape=(n, n)) for i in range(0, n - 1): h.append(t[i + 1] - t[i]) b.append(6 * (y[i + 1] - y[i]) / h[i]) A[0][0] = h[1] A[0][1] = -h[1] - h[0] A[0][2] = h[0] for i in range(1, n - 1): A[i][i - 1] = h[i - 1] A[i][i] = 2 * (h[i - 1] + h[i]) A[i][i + 1] = h[i] v.append(b[i] - b[i - 1]) v.append(0) A[n - 1][n - 3] = h[n - 2] A[n - 1][n - 2] = -h[n - 2] - h[n - 3] A[n - 1][n - 1] = h[n - 3] z = linalg.solve(A, v) return z, h
def deq(self, x): """ Differential equation solution :param x: array of length 6 that contains coordinates and velocities :return: """ r2 = np.dot(x[:3], x[:3]) r3 = r2 * sqrt(r2) omg2 = self.OMGE_GLO * self.OMGE_GLO if r2 <= 0: return zeros(6) deq_a = 1.5 * self.J2_GLO * self.MU_GLO * self.RE_GLO**2 / r2 / r3 # /* 3/2*J2*mu*Ae^2/r^5 */ deq_b = 5.0 * x[2] * x[2] / r2 # /* 5*z^2/r^2 */ deq_c = -self.MU_GLO / r3 - deq_a * (1.0 - deq_b ) # /* -mu/r^3-a(1-b) */ xdot0_2 = x[3:6] xdot_3 = (deq_c + omg2) * x[0] + 2.0 * self.OMGE_GLO * x[4] + self.acc[0] xdot_4 = (deq_c + omg2) * x[1] - 2.0 * self.OMGE_GLO * x[3] + self.acc[1] xdot_5 = (deq_c - 2.0 * deq_a) * x[2] + self.acc[2] return np.append(xdot0_2, np.array([xdot_3, xdot_4, xdot_5]))
def calculate_cubic_value_not_a_knot(t, y): n = len(t) h = [] b = [] v = [0] A = zeros(shape=(n, n)) for i in range(0, n - 1): h.append(t[i + 1] - t[i]) b.append(6 * (y[i + 1] - y[i]) / h[i]) A[0][0] = h[1] A[0][1] = - h[1] - h[0] A[0][2] = h[0] for i in range(1, n - 1): A[i][i - 1] = h[i - 1] A[i][i] = 2 * (h[i - 1] + h[i]) A[i][i + 1] = h[i] v.append(b[i] - b[i - 1]) v.append(0) A[n - 1][n - 3] = h[n - 2] A[n - 1][n - 2] = - h[n - 2] - h[n - 3] A[n - 1][n - 1] = h[n - 3] z = linalg.solve(A, v) return z, h
def convert(self): self.g = g = Gcode(safetyheight=self.safetyheight, tolerance=self.tolerance, spindle_speed=self.spindle_speed, units=self.units) g.begin() g.continuous(self.tolerance) g.safety() if self.roughing_delta and self.roughing_offset: base_image = self.image rough = make_tool_shape(ball_tool, 2*self.roughing_offset, self.pixelsize) w, h = base_image.shape tw, th = rough.shape w1 = w + tw h1 = h + th nim1 = numarray.zeros((w1, h1), 'Float32') + base_image.min() nim1[tw/2:tw/2+w, th/2:th/2+h] = base_image self.image = numarray.zeros((w,h), type="Float32") for j in range(0, w): progress(j,w) for i in range(0, h): self.image[j,i] = (nim1[j:j+tw,i:i+th] - rough).max() self.feed = self.roughing_feed r = -self.roughing_delta m = self.image.min() self.ro = self.roughing_offset while r > m: self.rd = r self.one_pass() r = r - self.roughing_delta if r < m + epsilon: self.rd = m self.one_pass() self.image = base_image self.cache.clear() self.feed = self.base_feed self.ro = 0 self.rd = self.image.min() self.one_pass() g.end()
def convert(self): self.g = g = Gcode(safetyheight=self.safetyheight, tolerance=self.tolerance, spindle_speed=self.spindle_speed, units=self.units) g.begin() g.continuous(self.tolerance) g.safety() if self.roughing_delta and self.roughing_offset: base_image = self.image rough = make_tool_shape(ball_tool, 2 * self.roughing_offset, self.pixelsize) w, h = base_image.shape tw, th = rough.shape w1 = w + tw h1 = h + th nim1 = numarray.zeros((w1, h1), 'Float32') + base_image.min() nim1[tw / 2:tw / 2 + w, th / 2:th / 2 + h] = base_image self.image = numarray.zeros((w, h), type="Float32") for j in range(0, w): progress(j, w) for i in range(0, h): self.image[j, i] = (nim1[j:j + tw, i:i + th] - rough).max() self.feed = self.roughing_feed r = -self.roughing_delta m = self.image.min() self.ro = self.roughing_offset while r > m: self.rd = r self.one_pass() r = r - self.roughing_delta if r < m + epsilon: self.rd = m self.one_pass() self.image = base_image self.cache.clear() self.feed = self.base_feed self.ro = 0 self.rd = self.image.min() self.one_pass() g.end()
def setFromRotatedBasis(self, vecX, vecY, vecZ): #print "setFromRotatedBases: ",vecX,vecY,vecZ m = Numeric.zeros([3,3]) normX = vecX.norm() normY = vecY.norm() normZ = vecZ.norm() #print "norms: ",normX, normY, normZ for i in range(3): m[i][0] = vecX[i] / normX m[i][1] = vecY[i] / normY m[i][2] = vecZ[i] / normZ self.setFromRotationMatrix(m)
def calculate_quadratic_value_not_a_knot(t, y): n = len(t) d = [(y[1] - y[0]) / (t[1] - t[0])] A = zeros(shape=(n, n)) A[0][0] = 1 for i in range(1, n): A[i][i - 1] = 1 A[i][i] = 1 d.append(2 * ((y[i] - y[i - 1]) / (t[i] - t[i - 1]))) return linalg.solve(A, d)
def setFromRotatedBasis(self, vecX, vecY, vecZ): #print "setFromRotatedBases: ",vecX,vecY,vecZ m = Numeric.zeros([3, 3]) normX = vecX.norm() normY = vecY.norm() normZ = vecZ.norm() #print "norms: ",normX, normY, normZ for i in range(3): m[i][0] = vecX[i] / normX m[i][1] = vecY[i] / normY m[i][2] = vecZ[i] / normZ self.setFromRotationMatrix(m)
def calculate_quadratic_value_not_a_knot(t, y): n = len(t) d = [(y[1] - y[0]) / (t[1] - t[0])] A = zeros(shape=(n, n)) A[0][0] = 1 for i in range(1, n): A[i][i-1] = 1 A[i][i] = 1 d.append(2 * ((y[i] - y[i-1]) / (t[i] - t[i-1]))) return linalg.solve(A, d)
def scale(data): m = line_len(data) x_min, x_max = numpy.array([1e10] * m), zeros(m) for (x, _) in data: for i in range(m): x_min[i] = min(x_min[i], x[i]) x_max[i] = max(x_max[i], x[i]) for (x, _) in data: for i in range(m): x[i] = (x[i] - x_min[i]) / (x_max[i] - x_min[i]) if x_max[i] != x_min[i] else 1 return data
def scale(data): m = line_len(data) x_min, x_max = numpy.array([1e10] * m), zeros(m) for (x, _) in data: for i in range(m): x_min[i] = min(x_min[i], x[i]) x_max[i] = max(x_max[i], x[i]) for (x, _) in data: for i in range(m): x[i] = (x[i] - x_min[i]) / ( x_max[i] - x_min[i]) if x_max[i] != x_min[i] else 1 return data
def update(self, arr): """Update the accumulators of the StatsCollector given a complete matrix; assume that all observations have the same weight. Properly handle missing values. """ assert self.width() == arr.shape[1] #shape(arr)[1] i = 0 ## Update number of elements counters (length, width) = arr.shape #shape(arr) initial_n = self.n.copy( ) #self.n[:] # Keep old n for argmin/argmax n = zeros(width) + length missings = isnan(arr) nnan = sum(missings, 0) self.n += n self.nnan += nnan self.nnonnan += n - nnan ## Create masked version of arr and update accumulators ma = masked_array(arr, mask=missings) # Here, mask missings only arr_nomissings = arr[~normal_sometrue(missings, 1)] # Here, strip missing rows self.sum = self.sum + sum(ma, 0) # += does not work... self.sum_ssq = self.sum_ssq + sum(ma * ma, 0) # += does not work... self.sum_xxt = self.sum_xxt + matrixmultiply(transpose(arr_nomissings), arr_nomissings) self.sum_nomi = self.sum_nomi + sum(arr_nomissings, 0) self.nxxt += arr_nomissings.shape[0] #shape(arr_nomissings)[0] ## Update (arg)min / make sure old argmin is kept if not updated ma_argmin = argmin(ma, 0) ma_min = ma[ma_argmin, range(width)] min_newpos = argmin(array([self.min, ma_min]), 0).astype('Bool') self.min[min_newpos] = ma_min[min_newpos] # XXX Argmin computation needs to be revised! Does not work, at least # when passing array of shape (1,1). self.argmin[min_newpos] = ma_argmin[min_newpos] + initial_n[min_newpos] ## Update (arg)max / make sure old argmax is kept if not updated ma_argmax = argmax(ma, 0) ma_max = ma[ma_argmax, range(width)] max_newpos = argmax(array([self.max, ma_max]), 0).astype('Bool') self.max[max_newpos] = ma_max[max_newpos] # XXX Argmax computation needs to be revised! Does not work, at least # when passing array of shape (1,1). Also, is the use of min_newpos # correct? self.argmax[max_newpos] = ma_argmax[max_newpos] + initial_n[min_newpos]
def update(self, arr): """Update the accumulators of the StatsCollector given a complete matrix; assume that all observations have the same weight. Properly handle missing values. """ assert self.width() == arr.shape[1] #shape(arr)[1] i = 0 ## Update number of elements counters (length,width)= arr.shape #shape(arr) initial_n = self.n.copy()#self.n[:] # Keep old n for argmin/argmax n = zeros(width) + length missings = isnan(arr) nnan = sum(missings,0) self.n += n self.nnan += nnan self.nnonnan += n - nnan ## Create masked version of arr and update accumulators ma = masked_array(arr, mask=missings) # Here, mask missings only arr_nomissings = arr[~normal_sometrue(missings,1)] # Here, strip missing rows self.sum = self.sum + sum(ma,0) # += does not work... self.sum_ssq = self.sum_ssq + sum(ma*ma,0) # += does not work... self.sum_xxt = self.sum_xxt + matrixmultiply(transpose(arr_nomissings), arr_nomissings) self.sum_nomi= self.sum_nomi + sum(arr_nomissings,0) self.nxxt += arr_nomissings.shape[0] #shape(arr_nomissings)[0] ## Update (arg)min / make sure old argmin is kept if not updated ma_argmin = argmin(ma,0) ma_min = ma[ma_argmin, range(width)] min_newpos = argmin(array([self.min, ma_min]), 0).astype('Bool') self.min[min_newpos] = ma_min[min_newpos] # XXX Argmin computation needs to be revised! Does not work, at least # when passing array of shape (1,1). self.argmin[min_newpos] = ma_argmin[min_newpos] + initial_n[min_newpos] ## Update (arg)max / make sure old argmax is kept if not updated ma_argmax = argmax(ma,0) ma_max = ma[ma_argmax, range(width)] max_newpos = argmax(array([self.max, ma_max]), 0).astype('Bool') self.max[max_newpos] = ma_max[max_newpos] # XXX Argmax computation needs to be revised! Does not work, at least # when passing array of shape (1,1). Also, is the use of min_newpos # correct? self.argmax[max_newpos] = ma_argmax[max_newpos] + initial_n[min_newpos]
def damage_state_position(capacityDisp, demandDisp): numberAccs = len(demandDisp) numberLS = len(capacityDisp) dsPositions = [] for i in range(numberAccs): dsPositionsAcc = numarray.zeros(4) dsPositionsAcc[3] = dsPositionsAcc[3] + 1.0 for j in range(numberLS): if demandDisp[i][j] < capacityDisp[j]: dsPositionsAcc[j] = dsPositionsAcc[j] + 1.0 dsPositionsAcc[3] = dsPositionsAcc[3] - 1.0 break dsPositions.append(dsPositionsAcc) return dsPositions
def scanSequence(mix, bg, seq,scoring='mix'): """ Scores all positions of a sequence with the given model and background. @param mix: MixtureModel object @param bg: background MixtureModel object @param seq: sequence as list of nucleotides @param scoring: flag to determine the scoring scheme used for the mixtures. 'compmax' means maximum density over the components, 'mix' means true mixture density @return: list of position-wise log-odd scores """ # convert sequence to internal representation, alphabet of seq must be DNA alph = mixture.Alphabet(['A','C','G','T']) f = lambda x: alph.internal(x) seq=map(f,seq) dnr = mix.components[0].dist_nr # init with dummy value at first position s = numarray.array([[-1]+ seq[0:dnr-1]]) score = [] for i in range(dnr-1,len(seq),1): # shift query sequence by one position s[0] = numarray.concatenate( [s[0][1:],numarray.array([seq[i]])],0) if scoring == 'compmax': # score as maximum over components c_m_l = numarray.zeros(mix.G,numarray.Float) for i in range(mix.G): c_m_l[i] = mix.components[i].pdf(s)[0] m_l = c_m_l.max() elif scoring == 'mix': m_l = mix.pdf(s)[0] bg_l = bg.pdf(s)[0] score.append(m_l-bg_l) return score
def get_data(): result = [] file = urlopen("http://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data") for line in file.readlines(): array = line.decode("utf-8").split(',') result.append(([1.0] + [float(f) for f in array[2:]], 1 if array[1] == "M" else -1)) rlen = llength(result) x_min, x_max = numpy.array([1e10] * rlen), zeros(rlen) for (x, _) in result: for i in range(rlen): x_min[i] = min(x_min[i], x[i]) x_max[i] = max(x_max[i], x[i]) for (x, _) in result: for i in range(rlen): x[i] = (x[i] - x_min[i]) / (x_max[i] - x_min[i]) if x_max[i] != x_min[i] else 1 return result
def scanSequence(mix, bg, seq, scoring='mix'): """ Scores all positions of a sequence with the given model and background. @param mix: MixtureModel object @param bg: background MixtureModel object @param seq: sequence as list of nucleotides @param scoring: flag to determine the scoring scheme used for the mixtures. 'compmax' means maximum density over the components, 'mix' means true mixture density @return: list of position-wise log-odd scores """ # convert sequence to internal representation, alphabet of seq must be DNA alph = mixture.Alphabet(['A', 'C', 'G', 'T']) f = lambda x: alph.internal(x) seq = map(f, seq) dnr = mix.components[0].dist_nr # init with dummy value at first position s = numarray.array([[-1] + seq[0:dnr - 1]]) score = [] for i in range(dnr - 1, len(seq), 1): # shift query sequence by one position s[0] = numarray.concatenate([s[0][1:], numarray.array([seq[i]])], 0) if scoring == 'compmax': # score as maximum over components c_m_l = numarray.zeros(mix.G, numarray.Float) for i in range(mix.G): c_m_l[i] = mix.components[i].pdf(s)[0] m_l = c_m_l.max() elif scoring == 'mix': m_l = mix.pdf(s)[0] bg_l = bg.pdf(s)[0] score.append(m_l - bg_l) return score
def deq(self, x): """ Differential equation solution :param x: array of length 6 that contains coordinates and velocities :return: """ r2 = np.dot(x[:3], x[:3]) r3 = r2 * sqrt(r2) omg2 = self.OMGE_GLO * self.OMGE_GLO if r2 <= 0: return zeros(6) deq_a = 1.5 * self.J2_GLO * self.MU_GLO * self.RE_GLO**2 / r2 / r3 # /* 3/2*J2*mu*Ae^2/r^5 */ deq_b = 5.0 * x[2] * x[2] / r2 # /* 5*z^2/r^2 */ deq_c = -self.MU_GLO / r3 - deq_a * (1.0 - deq_b) # /* -mu/r^3-a(1-b) */ xdot0_2 = x[3:6] xdot_3 = (deq_c + omg2) * x[0] + 2.0 * self.OMGE_GLO * x[4] + self.acc[0] xdot_4 = (deq_c + omg2) * x[1] - 2.0 * self.OMGE_GLO * x[3] + self.acc[1] xdot_5 = (deq_c - 2.0 * deq_a) * x[2] + self.acc[2] return np.append(xdot0_2, np.array([xdot_3, xdot_4, xdot_5]))
#data.fromFiles(["filt_DAT_134.txt","DRD1_134.txt","DRD2_134.txt","DRD3_134.txt","DRD5_134.txt" ,"filt_WISC_WIAT_DISC_134.txt"]) #m = mixture.readMixture("NEC_ADHD_struct_7.mix") data.internalInit(m) c = m.classify(data,entropy_cutoff=0.50) data.printClustering(m.G,c) m.getClusterEntropy(data) m.evalStructure(data.headers) print m.groups plot = numarray.zeros((m.G, len(m.leaders))) for i in range(len(m.leaders)): # check for noise variables if len(m.leaders[i]) == 1: l = m.leaders[i][0] for g in range(m.G): plot[g, i] = 1 else: for l in m.leaders[i]: if len(m.groups[i][l]) == 0: plot[l, i] = 2 else: plot[l, i] = l + 3 for g in m.groups[i][l]: plot[g, i] = l + 3 print
def forget(self, fieldnames): """Reset all accumulators to zero. """ width = len(fieldnames) self.fieldnames = fieldnames self.n = zeros(width) # Sum of nnonnan and nnan self.nnonnan = zeros(width) # Nb of non-missing elements self.nnan = zeros(width) # Nb of missing elements self.nxxt = 0 # Nb of elements in sum_xxt self.sum = zeros(width) # Sum of elements self.sum_ssq = zeros(width) # Sum of square of elements self.sum_xxt = zeros((width, width)) # Accumulator of outer products self.sum_nomi = zeros(width) # Sum of elements (no missings) self.min = zeros(width) + PosInf # Minimum element self.argmin = zeros(width) + -1 # Position of minimum self.max = zeros(width) + NegInf # Maximum element self.argmax = zeros(width) + -1 # Position of maximum
struct=1) data = mix.sampleDataSet(500) #print mix mix.updateStructureGlobal(data) #print mix #print mix.groups #print mix.leaders #writeMixture(mix, "test.mix") #mix.evalStructure(data.headers) plot = numarray.zeros((mix.G, mix.dist_nr)) for i in range(mix.dist_nr): for l in mix.leaders[i]: plot[l, i] = l + 1 for g in mix.groups[i][l]: plot[g, i] = l + 1 print "Generating", get_loglikelihood(mix, data.internalData) #print "G =" #print range(0,len(plot[0])) for p in plot: print p for k in range(5): m = MixtureModel(6, [0.1, 0.1, 0.1, 0.2, 0.2, 0.3],
maxPeriod = 10.0 step = 0.04 damping = 0.05 spectraPeriods = demand_calculations.compute_spectra_periods(minPeriod,maxPeriod,step) spectraDisp = demand_calculations.compute_spectra(minPeriod,maxPeriod,step,damping,ACCELEROGRAMS,SPECTRA) numberAccs = len(spectraDisp) print 'Spectra produced' # READ THE PORFOLIO OF BUILDINGS lines = portfolio_builder.parse_input(EXPOSURE) assets_count = portfolio_builder.buildings_counter(lines) number_categories = int(assets_count[0]) number_assets = assets_count[1] # CREATE THE VECTOR FOR THE DAMAGE STATES AND BETA FOR INFILLED FRAMES damageStates= numarray.zeros(4) IMTs=['PGA','PGV','Sa03','Saelastic'] elasticPeriods = [] betas = [0.52, 0.46, 0.28] # COMPUTE THE DISPLACAMENT FOR EACH BUILDING for asset_category in range(number_categories): for asset in range(number_assets[asset_category]): data = portfolio_builder.create_asset(lines[asset_category]) code=data[1] if code == "Low_Code": ec_ls2 = 0.0035 # ec_ls3 = 0.0075 es_ls2 = 0.0150
print data for i in range(10): for j in range(10): data[i][j] = float(i * j) print data #fitsobj=pyfits.HDUList() #hdu=pyfits.PrimaryHDU() #hdu.data=data #fitsobj.append(hdu) #fitsobj.writeto('tmp.fits') fitsobj = pyfits.HDUList() # create Primary HDU with minimal header keywords hdu = pyfits.PrimaryHDU() # add a 10x5 array of zeros hdu.data = numarray.zeros((10, 10), type=numarray.Float32) hdu.data[5][5] = 10. hdu.data[4][5] = 5. hdu.data[6][5] = 5. hdu.data[5][4] = 5. hdu.data[5][6] = 5. print hdu.data fitsobj.append(hdu) # save to a file, the writeto method will make sure the required # keywords are conforming to the data fitsobj.writeto('tmp.fits')
def compute_disp(accs, accstep, period, damping): fraction = 1 / 0.02 pNocount = 0 lfine = 0 np = len(accs) pNocount = pNocount + 1 MaxSteps = (np + 1) * (round(int(accstep * fraction / period)) + 1) + 1 ugh = numarray.zeros(MaxSteps) fine = round(int(accstep * fraction / period)) + 1 if fine != lfine: L = 1 i = 1 while (i <= 1 + (np - 1) * fine): i = i + fine L = L + 1 dt = accstep / fine lfine = fine M = 1 + (np - 1) * fine pNocount = 0 xie = damping maxug = max(accs) lfine = 0 pNocount = 1 fine = round(int(accstep * fraction / period)) + 1 if fine != lfine: L = -1 i = 0 while (i <= (np - 1) * fine): i = i + fine L = L + 1 ugh[i] = accs[L] for M in range(int(fine)): ugh[i - M + 1] = ugh[i - fine] + (ugh[i] - ugh[i - fine]) * (fine - M + 1) / fine lfine = fine M = 1 + (np - 1) * fine dt = accstep / fine ncf = 2.0 * 3.14159265 / period fraction = 1.0 / 0.02 Gamma_Parm = 0.5 Beta_Parm = 0.25 damp = 0.05 THdsps = [] THaccs = [] U0 = 0.0 U1 = 0.0 V0 = 0.0 V1 = 0.0 A0 = 0.0 A1 = 0.0 xie = damping for i in range(int(M)): U1 = (-ugh[i + 1] + U0 / Beta_Parm / (dt * dt) + V0 / Beta_Parm / dt + (1.0 / 2.0 / Beta_Parm - 1.0) * A0 + (U0 * Gamma_Parm / Beta_Parm / dt + (Gamma_Parm / Beta_Parm - 1.0) * V0 + (Gamma_Parm / 2.0 / Beta_Parm - 1.0) * dt * A0) * 2.0 * xie * ncf) / (1.0 / Beta_Parm / (dt * dt) + 2.0 * xie * ncf * Gamma_Parm / Beta_Parm / dt + (ncf * ncf)) V1 = (U1 - U0) * Gamma_Parm / Beta_Parm / dt + ( 1.0 - Gamma_Parm / Beta_Parm) * V0 + ( 1.0 - Gamma_Parm / 2.0 / Beta_Parm) * dt * A0 A1 = (U1 - U0) / Beta_Parm / (dt * dt) - V0 / Beta_Parm / dt + ( 1.0 - 1.0 / 2.0 / Beta_Parm) * A0 U0 = U1 V0 = V1 A0 = A1 THdsps.append(U1) THaccs.append(A1 + ugh[i + 1]) Sa = max([math.fabs(min(THaccs)), max(THaccs)]) Sd = max([math.fabs(min(THdsps)), max(THdsps)]) * 9.81 * 100 return Sd, Sa
def parse_input(path): file = open(path) lines = file.readlines() file.close Latitude=[] Longitude=[] UniqueLocations=[] UniqueAreas=[] NumberUniqueLocations=1 Area=[] Population=[] numberAssets = len(lines) for line in lines: Longitude.append(line.split(',')[0].strip()) Latitude.append(line.split(',')[1].strip()) Area.append(line.split(',')[2].strip()) Population.append(int(line.split(',')[3].strip())) UniqueLocations.append(str(Longitude[0]) + ','+ str(Latitude[0])) UniqueAreas.append(float(Area[0])) addLocation = 0; for i in range(numberAssets): # for i in range(100): for j in range(NumberUniqueLocations): if str(Longitude[i]) + ',' + str(Latitude[i]) == UniqueLocations[j]: addLocation = 0 if addLocation == 1: UniqueLocations.append(str(Longitude[i]) + ','+ str(Latitude[i])) UniqueAreas.append(float(Area[i])) NumberUniqueLocations=NumberUniqueLocations+1 addLocation = 1; SumPopulation = numarray.zeros(NumberUniqueLocations) for i in range(numberAssets): for j in range(NumberUniqueLocations): if str(Longitude[i]) + ','+ str(Latitude[i]) == UniqueLocations[j]: SumPopulation[j]=SumPopulation[j]+Population[i] Ratios=[0.1, 0.1 ,0.2, 0.3, 0.3] VulnerabilityFunction=['HAZUS_W1_MC','HAZUS_C1L_MC','HAZUS_C1M_MC','HAZUS_C1H_MC','HAZUS_S1H_MC'] Periods=['0.00', '0.30', ' 0.8', '1.50', '2.20'] Results=[] print SumPopulation print UniqueLocations for typoligies in range(5): for j in range(NumberUniqueLocations): print typoligies Results.append(UniqueLocations[j]+','+str(UniqueAreas[j]*float(SumPopulation[j])*Ratios[typoligies]*1000*1000*100)+','+Periods[typoligies]) for j in range(NumberUniqueLocations*5): log(str(Results[j]+'\n')) for typoligies in range(5): for j in range(NumberUniqueLocations): log1(VulnerabilityFunction[typoligies]+'\n') print Results
def compute_disp(accs, accstep, period, damping): # interpolator = interpolate.interp1d(times,accs, kind = 'linear') fraction = 1/0.02 pNocount = 0 lfine = 0 np = len(accs) pNocount=pNocount+1 MaxSteps = (np+1) * (round(int(accstep*fraction/period)) + 1) +1 ugh=numarray.zeros(MaxSteps) fine = round(int(accstep * fraction / period)) + 1 if fine != lfine: L = 1 i = 1 while (i <=1 + (np - 1) * fine ): i=i+fine L=L+1 dt = accstep / fine lfine = fine M = 1 + (np - 1) * fine pNocount = 0 xie = damping maxug = max(accs) lfine =0 pNocount = 1 fine = round(int(accstep * fraction / period)) + 1 if fine != lfine: L = -1 i = 0 while (i <= (np - 1) * fine ): i=i+fine L=L+1 ugh[i] = accs[L] for M in range(int(fine)): ugh[i-M+1] = ugh[i - fine] + (ugh[i] - ugh[i - fine]) * (fine - M +1) / fine lfine = fine M = 1 + (np - 1) * fine dt = accstep / fine ncf = 2.0 * 3.14159265 / period fraction = 1.0/0.02 Gamma_Parm = 0.5 Beta_Parm = 0.25 damp = 0.05 THdsps = [] THvels = [] THaccs = [] U0 = 0.0 U1 = 0.0 V0 = 0.0 V1 = 0.0 A0 = 0.0 A1 = 0.0 xie = damping for i in range(int(M)): U1 = (-ugh[i+1] + U0 / Beta_Parm / (dt*dt) + V0 / Beta_Parm / dt + (1.0 / 2.0 / Beta_Parm - 1.0) * A0 + (U0 * Gamma_Parm / Beta_Parm / dt + (Gamma_Parm / Beta_Parm - 1.0) * V0 + (Gamma_Parm / 2.0 / Beta_Parm - 1.0) * dt * A0) * 2.0 * xie * ncf) / (1.0 / Beta_Parm / (dt*dt) + 2.0 * xie * ncf * Gamma_Parm / Beta_Parm / dt + (ncf*ncf)) V1 = (U1 - U0) * Gamma_Parm / Beta_Parm / dt + (1.0 - Gamma_Parm / Beta_Parm) * V0 + (1.0 - Gamma_Parm / 2.0 / Beta_Parm) * dt * A0 A1 = (U1 - U0) / Beta_Parm / (dt*dt) - V0 / Beta_Parm / dt + (1.0 - 1.0 / 2.0 / Beta_Parm) * A0 U0 = U1 V0 = V1 A0 = A1 THdsps.append(U1) # THvels.append(V1) THaccs.append(A1+ugh[i+1]) Sa = max([math.fabs(min(THaccs)),max(THaccs)]) # Sv = max([math.fabs(min(THvels)),max(THvels)])*9.81 Sd = max([math.fabs(min(THdsps)),max(THdsps)])*9.81 return Sd,Sa
pix = fitsobj[0].data fitsobj.close() try: xs = float(sys.argv[-2]) ys = float(sys.argv[-1]) except: print 'error in shift values' sys.exit() xf = xs - int(xs) xs = int(xs) yf = ys - int(ys) ys = int(ys) out = numarray.zeros((ny, nx), 'Float32') for j in range(ny): for i in range(nx): out[j][i] = float('nan') for j in range(max(0, ys), min(ny, ny + ys)): for i in range(max(0, xs), min(nx, nx + xs)): out[j][i] = (1. - xf) * (1. - yf) * pix[j - ys][i - xs] out[j][i] = out[j][i] + xf * (1. - yf) * pix[j - ys][i - xs - 1] out[j][i] = out[j][i] + (1. - xf) * yf * pix[j - ys - 1][i - xs] out[j][i] = out[j][i] + xf * yf * pix[j - ys - 1][i - xs - 1] fitsobj = pyfits.HDUList() hdu = pyfits.PrimaryHDU() hdu.data = out fitsobj.append(hdu)
data = mix.sampleDataSet(500) # print mix mix.updateStructureGlobal(data) # print mix # print mix.groups # print mix.leaders # writeMixture(mix, "test.mix") # mix.evalStructure(data.headers) plot = numarray.zeros((mix.G, mix.dist_nr)) for i in range(mix.dist_nr): for l in mix.leaders[i]: plot[l, i] = l + 1 for g in mix.groups[i][l]: plot[g, i] = l + 1 print "Generating", get_loglikelihood(mix, data.internalData) # print "G =" # print range(0,len(plot[0])) for p in plot: print p for k in range(5): m = MixtureModel(6, [0.1, 0.1, 0.1, 0.2, 0.2, 0.3], [pd, pd2, pd3, pd4, pd5, pd6], struct=1)
#data.fromFiles(["filt_DAT_134.txt","DRD1_134.txt","DRD2_134.txt","DRD3_134.txt","DRD5_134.txt" ,"filt_WISC_WIAT_DISC_134.txt"]) #m = mixture.readMixture("NEC_ADHD_struct_7.mix") data.internalInit(m) c = m.classify(data,entropy_cutoff=0.50) data.printClustering(m.G,c) m.getClusterEntropy(data) m.evalStructure(data.headers) print m.groups plot = numarray.zeros(( m.G,m.dist_nr ) ) for i in range(m.dist_nr): # check for noise variables if len(m.leaders[i]) == 1: l = m.leaders[i][0] for g in range(m.G): plot[g,i] = 1 else: for l in m.leaders[i]: if len(m.groups[i][l]) == 0: plot[l,i] = 2 else: plot[l,i] = l+3 for g in m.groups[i][l]:
def main(): if len(sys.argv) > 1: im_name = sys.argv[1] else: import tkFileDialog, Tkinter im_name = tkFileDialog.askopenfilename( defaultextension=".png", filetypes=((_("Depth images"), ".gif .png .jpg"), (_("All files"), "*")) ) if not im_name: raise SystemExit Tkinter._default_root.destroy() Tkinter._default_root = None im = Image.open(im_name) size = im.size im = im.convert("L") # grayscale w, h = im.size nim = numarray.fromstring(im.tostring(), "UInt8", (h, w)).astype("Float32") options = ui(im, nim, im_name) step = options["pixelstep"] depth = options["depth"] if options["normalize"]: a = nim.min() b = nim.max() if a != b: nim = (nim - a) / (b - a) else: nim = nim / 255.0 maker = tool_makers[options["tool_type"]] tool_diameter = options["tool_diameter"] pixel_size = options["pixel_size"] tool = make_tool_shape(maker, tool_diameter, pixel_size) if options["expand"]: if options["expand"] == 1: pixel = 1 else: pixel = 0 w, h = nim.shape tw, th = tool.shape w1 = w + 2 * tw h1 = h + 2 * th nim1 = numarray.zeros((w1, h1), "Float32") + pixel nim1[tw : tw + w, th : th + h] = nim nim = nim1 w, h = w1, h1 nim = nim * depth if options["invert"]: nim = -nim else: nim = nim - depth rows = options["pattern"] != 1 columns = options["pattern"] != 0 columns_first = options["pattern"] == 3 spindle_speed = options["spindle_speed"] if rows: convert_rows = convert_makers[options["converter"]]() else: convert_rows = None if columns: convert_cols = convert_makers[options["converter"]]() else: convert_cols = None if options["bounded"] and rows and columns: slope = tan(options["contact_angle"] * pi / 180) if columns_first: convert_rows = Reduce_Scan_Lace(convert_rows, slope, step + 1) else: convert_cols = Reduce_Scan_Lace(convert_cols, slope, step + 1) if options["bounded"] > 1: if columns_first: convert_cols = Reduce_Scan_Lace(convert_cols, slope, step + 1) else: convert_rows = Reduce_Scan_Lace(convert_rows, slope, step + 1) units = unitcodes[options["units"]] convert( nim, units, tool, pixel_size, step, options["safety_height"], options["tolerance"], options["feed_rate"], convert_rows, convert_cols, columns_first, ArcEntryCut(options["plunge_feed_rate"], 0.125), spindle_speed, options["roughing_offset"], options["roughing_depth"], options["feed_rate"], )
def main(): if len(sys.argv) > 1: im_name = sys.argv[1] else: import tkFileDialog, Tkinter im_name = tkFileDialog.askopenfilename(defaultextension=".png", filetypes=((_("Depth images"), ".gif .png .jpg"), (_("All files"), "*"))) if not im_name: raise SystemExit Tkinter._default_root.destroy() Tkinter._default_root = None im = Image.open(im_name) size = im.size im = im.convert("L") #grayscale w, h = im.size nim = numarray.fromstring(im.tostring(), 'UInt8', (h, w)).astype('Float32') options = ui(im, nim, im_name) step = options['pixelstep'] depth = options['depth'] if options['normalize']: a = nim.min() b = nim.max() if a != b: nim = (nim - a) / (b - a) else: nim = nim / 255.0 maker = tool_makers[options['tool_type']] tool_diameter = options['tool_diameter'] pixel_size = options['pixel_size'] tool = make_tool_shape(maker, tool_diameter, pixel_size) if options['expand']: if options['expand'] == 1: pixel = 1 else: pixel = 0 w, h = nim.shape tw, th = tool.shape w1 = w + 2 * tw h1 = h + 2 * th nim1 = numarray.zeros((w1, h1), 'Float32') + pixel nim1[tw:tw + w, th:th + h] = nim nim = nim1 w, h = w1, h1 nim = nim * depth if options['invert']: nim = -nim else: nim = nim - depth rows = options['pattern'] != 1 columns = options['pattern'] != 0 columns_first = options['pattern'] == 3 spindle_speed = options['spindle_speed'] if rows: convert_rows = convert_makers[options['converter']]() else: convert_rows = None if columns: convert_cols = convert_makers[options['converter']]() else: convert_cols = None if options['bounded'] and rows and columns: slope = tan(options['contact_angle'] * pi / 180) if columns_first: convert_rows = Reduce_Scan_Lace(convert_rows, slope, step + 1) else: convert_cols = Reduce_Scan_Lace(convert_cols, slope, step + 1) if options['bounded'] > 1: if columns_first: convert_cols = Reduce_Scan_Lace(convert_cols, slope, step + 1) else: convert_rows = Reduce_Scan_Lace(convert_rows, slope, step + 1) units = unitcodes[options['units']] convert(nim, units, tool, pixel_size, step, options['safety_height'], options['tolerance'], options['feed_rate'], convert_rows, convert_cols, columns_first, ArcEntryCut(options['plunge_feed_rate'], .125), spindle_speed, options['roughing_offset'], options['roughing_depth'], options['feed_rate'])