def tx(clk, frequency, audio_i, audio_q, audio_stb, interpolation_factor, lut_bits, channels): dlo_i, dlo_q = nco(clk, frequency, lut_bits, channels) dlo_i = [i.label("nco_i_%s" % idx) for idx, i in enumerate(dlo_i)] dlo_q = [i.label("nco_q_%s" % idx) for idx, i in enumerate(dlo_q)] lo_i = [i[i.subtype.bits - 1] for i in dlo_i] lo_q = [i[i.subtype.bits - 1] for i in dlo_q] audio_bits = audio_i.subtype.bits audio_i = interpolate(clk, audio_i, audio_stb, interpolation_factor, channels) audio_q = interpolate(clk, audio_q, audio_stb, interpolation_factor, channels) audio_i = [i.label("audio_i_%s" % idx) for idx, i in enumerate(audio_i)] audio_q = [i.label("audio_q_%s" % idx) for idx, i in enumerate(audio_q)] product_bits = audio_bits + lut_bits - 1 rf_i = [ ((a.resize(product_bits) * l) >> lut_bits - 1).resize(audio_bits + 1) for a, l in zip(audio_i, dlo_i) ] rf_q = [ ((a.resize(product_bits) * l) >> lut_bits - 1).resize(audio_bits + 1) for a, l in zip(audio_q, dlo_q) ] rf_i = [i.subtype.register(clk, d=i) for i in rf_i] rf_q = [i.subtype.register(clk, d=i) for i in rf_q] rf_i = [i.label("rf_i_%s" % idx) for idx, i in enumerate(rf_i)] rf_q = [i.label("rf_q_%s" % idx) for idx, i in enumerate(rf_q)] rf = [i.subtype.register(clk, d=i + q) for i, q in zip(rf_i, rf_q)] rf = [i.label("rf_full_%s" % idx) for idx, i in enumerate(rf)] rf = [dither(clk, i, True) for i in rf] return rf, lo_i, lo_q
def __init__(self, number_of_particles=1e5, initial_energy=0.1405, E=1e-3, W=1e-2): """ number_of_particles: Anzahl zu simulierender Teilchen, default 1e5 initial_energy: Anfangsenergie (MeV), default 0.1405 E: Mindestenergie für Teilchenüberleben, default 1e-3 W: Mindestgewicht für Teilchenüberleben, default 1e-2 Input sanitizing, Erstellen der interpolate- und particle-Instanzen. Dichte von Wasser und Blei wird absichtlich um eine Größenordnung zu niedrig angegeben, um quasi implizit auf 1/mm für den Wirkungsquerschnitt umzurechnen. """ for _ in [number_of_particles, initial_energy]: try: float(_) except: print("{0} ist keine gültige Zahl.".format(_)) self.init_E = initial_energy self.init_count = number_of_particles self.particles = particles(number_of_particles, self.init_E, E, W) self.water = ip.interpolate("CrossSectWasser.txt", 0.1) self.water.set_name(1, "scatter") self.water.set_name(2, "photo") self.lead = ip.interpolate("CrossSectBlei.txt", 1.134) self.lead.set_name(1, "photo") self.water_mask = np.ones(self.init_count, dtype=bool) self.update_xsect() self.initial_move()
def respond(self, config, context): headers = {} for header, value in config.get("headers", {}): headers[header] = interpolate(value, **context) body = interpolate(config.get("body", ""), **context) return config["status-code"], headers, body
def iteration(self, k, t0, dt, **kw): """Perform one PFASST iteration.""" rank = self.mpi.rank state = self.state levels = self.levels T = levels[0] # finest/top level B = levels[-1] # coarsest/bottom level state.set(iteration=k, cycle=0) T.call_hooks("pre-iteration", **kw) # post receive requests for F in levels[:-1]: F.post_receive((F.level + 1) * 100 + k) # down for F, G in self.fine_to_coarse: state.increment_cycle() for s in range(F.sweeps): F.sdc.sweep(t0, dt, F, **kw) F.send((F.level + 1) * 100 + k, blocking=False) restrict_time_space(t0, dt, F, G, **kw) # bottom state.increment_cycle() B.receive((B.level + 1) * 100 + k, blocking=True) for s in range(B.sweeps): B.sdc.sweep(t0, dt, B, **kw) B.send((B.level + 1) * 100 + k, blocking=True) # up for F, G in self.coarse_to_fine: state.increment_cycle() interpolate_time_space(t0, F, G, **kw) F.receive((F.level + 1) * 100 + k, **kw) if rank > 0: interpolate(F.q0, G.q0, F, G, **kw) if F.level != 0: for s in range(F.sweeps): F.sdc.sweep(t0, dt, F, **kw) # done state.set(cycle=0) T.call_hooks("post-iteration", **kw)
def startup(self, persistent): self.persist = persistent self.color = self.persist[ "choice"] if "choice" in self.persist else COLORS[random.randint( 0, len(COLORS) - 1)] if "random" in self.persist: self.inensity = self.persist["random"] else: r = COLORS[random.randint(0, len(COLORS) - 1)][1] self.inensity = [r.r, r.g, r.b] # self.rt = interpolate(*self.inensity) self.rt = interpolate(self.color[1].r, self.color[1].g, self.color[1].b) self.rgb_txt = [ "Lainepikkus " + str(RGB[0][5]) + " nm, valgustugevus " + str(100 * self.color[1].r // 255) + "%", "Lainepikkus " + str(RGB[1][5]) + " nm, valgustugevus " + str(100 * self.color[1].g // 255) + "%", "Lainepikkus " + str(RGB[2][5]) + " nm, valgustugevus " + str(100 * self.color[1].b // 255) + "%", ] #self.rgb_txt = [ # "Lainepikkus "+ str(RGB[0][5]) + " nm, valgustugevus [ ]", # "Lainepikkus "+ str(RGB[1][5]) + " nm, valgustugevus [ ]", # "Lainepikkus "+ str(RGB[2][5]) + " nm, valgustugevus [ ]", #] self.color_txt = "Domineeriv lainepikkus " + str( self.color[5]) + " nm, valgustugevus " + str(self.color[4]) + "%" self.title_with_color = self.title + " - " + self.color[0]
def test_interpolate_degree_3(): points = [(1, 1), (2, 0), (-3, 2), (4, 4)] interpolating_polynomial = interpolate(points) evaluations = [interpolating_polynomial.evaluateAt(x) for (x, _) in points] for (p, y) in zip(points, evaluations): assert p[1] == pytest.approx(y)
def test_interpolate_degree_3(): points = [(1, 1), (2, 0), (-3, 2), (4, 4)] actual_polynomial = interpolate(points) expected_evaluations = points actual_evaluations = [(x, actual_polynomial.evaluateAt(x)) for (x, y) in expected_evaluations] for (p1, p2) in zip(expected_evaluations, actual_evaluations): for (a, b) in zip(p1, p2): assert_that(a).is_close_to(b, EPSILON)
def main(argc, argv): if argc == 1: print( 'Usage: ./interpolate.py <input filename>.csv <output filename>.csv' ) sys.exit(1) grid = grid_readval_csv(argv[1]) poly = ip.interpolate(integrate(grid)) pd.DataFrame(poly).to_csv(argv[2])
def touch_move(start, end, duration, step): """ タッチした状態で移動する """ device = DeviceInitializer.device() positions = interpolate(start, end, step) sleep_per_move = max( float(duration) / step, __EVENT_REGISTERING_TIME) for pos in positions[1:]: device.touch_move(*pos) time.sleep(sleep_per_move)
def read_data(file): accel = pd.read_csv(file) # Crop out invalid times current_time = time.time() * 1000 # Current time in ms accel = accel[accel['timestamp'] < current_time] # Get the time and L2 values ip = interp.interpolate(accel) df = pd.DataFrame() df['t'] = ip.index df['L2'] = ip.values return df
def smooth_metrics(quaternions, distances, metrics): from interpolate import interpolate smoothed = [] for i in range(len(metrics)): # print(i) interpolated = interpolate(quaternions[i], distances[i], quaternions, distances, metrics, sigma_d=.02, sigma_a=(16 * np.pi / 180)) smoothed.append(interpolated) # print(interpolated - metrics[one]) return smoothed
def generate_image(): operation = json.loads(request.args.get('operation')) print operation if operation == 0: z = json.loads(request.args.get('new_z')) z = to_var(z) if z.dim() == 1: z = z.unsqueeze(0) fake_image = g(z) fake_image = (denorm(fake_image.squeeze().data).cpu() * 255).long() fake_image = fake_image.numpy().transpose(1, 2, 0).reshape(-1).tolist() else: z1 = json.loads(request.args.get('new_z1')) z2 = json.loads(request.args.get('new_z2')) z3 = json.loads(request.args.get('new_z3')) z4 = json.loads(request.args.get('new_z4')) fake_image = interpolate(z1, z2, z3, z4) print len(fake_image) return json.dumps({'generated_image': fake_image})
def get_val(list1,year,extrapol=1): ''' Returns the value for a given year in a list1 of TimeParam objects Interpolation is used, when the year is not in the list1. extrapol == 1 then first or last given value is return, when out of year-range. ''' years = [] vals = [] if (len(list1) > 0): prev_year = float(list1[0].get_year())-1. for item in range(len(list1)): if (list1[item].get_val() != None): years.append(float(list1[item].get_year())) if (years[-1] < prev_year): raise MyError("TIMEPARAM.get_val: list is not sorted.") prev_year = years[-1] vals.append(list1[item].get_val()) return interpolate.interpolate(year,years,vals,extrapol=extrapol)
def photo_average(values, time_start, time_end): ''' This function delivers the average of the daily photosynthese of the time period time_start - time_end. Time_start and time_end are given in years. The list with photosynthese is given for each day and for only one year. ''' rel_time1 = time_start - int(time_start) rel_time2 = time_end - int(time_end) #print("REL TIME: ", rel_time1,rel_time2) if (time_end - time_start >= 1.0): avg_year = sum(values) / float(len(values)) factor = float(int(time_end - time_start)) else: avg_year = 0.0 factor = 0.0 #print("AVG_YEAR,FACTOR,TIMEDIFF: ",avg_year,factor, time_end - time_start) # This is part of a year. if (rel_time1 == rel_time2): return interpolate.interpolate(rel_time1, fdays, values) elif (rel_time1 < rel_time2): #print("START SINGLE: ",len(fdays),len(values)) avg_period = general_func.avg_within_range_2dim( fdays, values, rel_time1, rel_time2) #print("SINGLE: ",avg_period) try: return ((rel_time2 - rel_time1) * avg_period + factor * avg_year) / (rel_time2 - rel_time1 + factor) except ZeroDivisionError: return general_func.sum_within_range_2dim(fdays, values, rel_time1, rel_time2) * 0.5 else: # It is an average of two periods #print("START DOUBLE: ") period1 = general_func.avg_within_range_2dim(fdays, values, 0.0, rel_time2) period2 = general_func.avg_within_range_2dim(fdays, values, rel_time1, fdays[-1] + 0.0001) #print("DOUBLE: ",period1,period2) return (period1 * rel_time2 + period2 * (1.0 - rel_time1) + factor * avg_year) / (1.0 - rel_time1 + rel_time2 + factor)
def calc_significance(): data = json.loads(request.form.get('data')) lat = np.array([i[1] for i in DATA]) lon = np.array([i[2] for i in DATA]) val = np.log(np.array([i[0] for i in DATA])) smooth = interpolate(lat, lon, val) x = np.array([i[1] for i in data]) y = np.array([i[2] for i in data]) # cpm to microsievert to nanogray per hour z = np.log(np.array([i[0] for i in data])) smooth.pick_points(x, y) choice = json.loads(request.form.get('choice')) s2_k = np.zeros(len(x)) if choice == "Inverse Distance Weighting": smooth.simple_idw() elif choice == "Radial Basis Network": smooth.rbf() elif choice == "Ordinary Kriging": smooth.kriging() s2_k = np.sqrt(smooth.s2_k) * 1.96 # 95% CI z_smooth = smooth.z ## run cross validation cv_results = cv(choice, lat, lon, val) # with open('dump2.json', 'w') as outfile: # json.dump(data, outfile) return jsonify(result=(z / z_smooth).tolist(), cv_results=cv_results, z=z.tolist(), z_smooth=z_smooth.tolist(), s2_k=s2_k.tolist())
def initialCor(testInstance): pos = [] pos=getNeighbors(trainingSet, testInstance, 3) x_cor = 0.0 y_cor = 0.0 k = 3 if len(pos) < 3 or pos[0][1] > 0.75: x_cor = pos[0][0][7] y_cor = pos[0][0][8] else: for i in range(k): x_cor += pos[i][0][7]*pos[i][1] y_cor += pos[i][0][8]*pos[i][1] print x_cor, y_cor location = interpolate.interpolate(x_cor, y_cor) f = open('db.json', 'wb') f.write('[{"geometry": {"type": "Point", "coordinates": [' + str(location[0]) + ',' + str(location[1]) + ']}, "type": "Feature", "properties": {}}]') f.close() return x_cor, y_cor
def doRecord(self): if self.config['RECORD_TYPE'] == 1: # we'll always record the data at elements self.recdata_depths = list(self.xs_e) # add new record grid for this timestep self.recdata.append(Grid.GriddedData(self.ne, self.xs_e)) self.recdata[-1].addMetaData("time", self.time) # add data to the grid for dataname in self.recdata_sources.keys(): if dataname[0] != "!": # interpolatable data self.recdata[-1].addData( dataname, ip.interpolate(self.xs_e, eval(self.recdata_sources[dataname]), self.recdata_depths)) else: # non-interpolatable data self.recdata[-1].addData( dataname, eval(self.recdata_sources[dataname]))
def calc_significance(): data = json.loads(request.form.get('data')) lat = np.array([i[1] for i in DATA]) lon = np.array([i[2] for i in DATA]) val = np.log(np.array([i[0] for i in DATA])) smooth = interpolate(lat, lon, val) x = np.array([i[1] for i in data]) y = np.array([i[2] for i in data]) # cpm to microsievert to nanogray per hour z = np.log(np.array([i[0] for i in data])) smooth.pick_points(x, y) choice = json.loads(request.form.get('choice')) s2_k = np.zeros(len(x)) if choice == "Inverse Distance Weighting": smooth.simple_idw() elif choice == "Radial Basis Network": smooth.rbf() elif choice == "Ordinary Kriging": smooth.kriging() s2_k = np.sqrt(smooth.s2_k) * 1.96 # 95% CI z_smooth = smooth.z ## run cross validation cv_results = cv(choice, lat, lon, val) # with open('dump2.json', 'w') as outfile: # json.dump(data, outfile) return jsonify(result = (z / z_smooth).tolist(), cv_results = cv_results, z = z.tolist(), z_smooth = z_smooth.tolist(), s2_k = s2_k.tolist())
def test_interpolate(): tests = ( (((1, 3, 5), (1, 9, 25), 2), 5.0, 'closest values: x[0] == 1 < 2.0 < x[1] == 3; y[0] == 1 and y[1] == 9; a = 4.0, b = -3.0, answer = 5.0' ), (((1, 3, 5), (1, 9, 25), 4), 17.0, 'closest values: x[1] == 3 < 4.0 < x[2] == 5; y[1] == 9 and y[2] == 25; a = 8.0, b = -15.0, answer = 17.0' ), (((1, 3, 5), (1, 9, 25), 1.25), 2.0, 'closest values: x[0] == 1 < 1.25 < x[1] == 3; y[0] == 1 and y[1] == 9; a = 4.0, b = -3.0, answer = 2.0' ), (((1, 3, 5), (1, 9, 25), 1.5), 3.0, 'closest values: x[0] == 1 < 1.5 < x[1] == 3; y[0] == 1 and y[1] == 9; a = 4.0, b = -3.0, answer = 3.0' ), (((1, 3, 5), (1, 9, 25), 1.75), 4.0, 'closest values: x[0] == 1 < 1.75 < x[1] == 3; y[0] == 1 and y[1] == 9; a = 4.0, b = -3.0, answer = 4.0' ), ) for (x, y, target), correct, message in tests: result = interpolate(x, y, target) assert abs(result - correct) < 1e-6, message
def translate_image(image_path, transformations_path, quality): img = load_image_file(image_path) transformations = load_trans_file(transformations_path) new_img, mat, inv_mat = transform.apply_trans_on_img(transformations, img) if quality == "N": interpolate(new_img, img, inv_mat, interpolate_nearest) elif quality == "B": interpolate(new_img, img, inv_mat, interpolate_bilinear) elif quality == "C": img = add_margins(img) interpolate(new_img, img, inv_mat, interpolate_cubic) else: print("Invalid Input") return cv2.imwrite('out_{0}.png'.format(quality), new_img)
def test_line(self): self.assertEqual(interpolate(1., 10., lambda x: 1. + 2. * x, 2.), 5.) self.assertEqual(interpolate(10., 10., lambda x: 1. + 2. * x, 10.), 21.)
def buck(L,log,log_dia): (prices,price_skew) = buckPCh.get_prices() Li = [L,0,0,0,0,0] #length iterators #Lengths-to-Check Vector (255 terminated) LCV = [36,40,38,34,32,30,28,26,24,22,20,18,16,255] p16 = prices[0] p30 = prices[1] p36 = prices[2] it = [255,255,255,255,255,255] #iteration tracker (this currently is used # to track index of LCV) p = [0,0,0,0,0,0] #price tracker p1 = [0,0,0,0,0,0] p2 = [0,0,0,0,0,0] v = [0,0,0,0,0,0] #volume tracker v1 = [0,0,0,0,0,0] v2 = [0,0,0,0,0,0] td = [0,0,0,0,0,0] #top diameter tracker td1 = [0,0,0,0,0,0] td2 = [0,0,0,0,0,0] Lf = [0,0,0,0,0,0] #lengths tracker Lf2 = [0,0,0,0,0,0] #secondary lengths tracker lognum = 5 #log number control variable min_length = 100 #minimum log length variable for entry in LCV: #find minimum length if min_length > entry: min_length = entry s=0 while s >= 0: if it[s] == 255: #eg "top" of tree it[s] = 0 # (there will never be 255 LCV elements) for entry in LCV: if (entry + 0.8333) <= Li[s]: Li[s] = entry + 0.8333 it[s] = it[s] + 1 break it[s] = it[s] + 1 it[s] = it[s] - 1 if entry == 255: print "\n Too short!\n" break else: #middle of tree Li[s] = 0 Li[s+1] = 0 it[s] = it[s] + 1 while (L - sum(Li)) < (LCV[it[s]] + 0.8333): if (LCV[it[s]] == 255): break it[s] = it[s] + 1 if (LCV[it[s]] == 255) & (s == 0): break # END! QUIT! VAMOS! NOW! if (LCV[it[s]] == 255): #clear all previous log lengths from the top of the tree if (s+1) < len(Li): Li[s+1] = 0 Li[s] = 0 p[s] = 0 v[s] = 0 td[s] = 0 it[s] = 255 # there will never be 255 LCV elements s = s - 1 sum_Li = sum(Li) continue Li[s] = LCV[it[s]] + 0.8333 # print 'log loop %i\n' %s # print 'Li[s] = %0.4f\n' %Li[s] # print 'it[s] = %i\n' %it[s] #calculate length price dia = interpolate.interpolate(sum(Li),log,log_dia) dia = int(dia) #-->FIXME: Look at this later td[s] = dia v[s] = logvolume_2.logvolume_2(Li[s],dia) p[s] = buck1p.buck1p(Li[s],v[s],p16,p30,p36,price_skew) Li[s+1] = L - sum(Li) #bump remaining length ahead sum_p = sum(p) if sum_p >= sum(p1): p2 = copy(p1) p1 = copy(p) v2 = copy(v1) v1 = copy(v) td2 = copy(td1) td1 = copy(td) Lf2 = copy(Lf) Lf = copy(Li) elif sum_p >= sum(p2): p2 = copy(p) v2 = copy(v) td2 = copy(td) Lf2 = copy(Li) if (Li[s+1] >= (min_length + 0.8333)) & (s < (lognum - 1)): s = s + 1 return (Lf,v1,td1,p1,Lf2,v2,td2,p2)
def test_flat(self): self.assertEqual(interpolate(1., 10., lambda x: 4., 2.), 4.)
def process_dat_adaf(adaf_obj): remove_bad_signals(adaf_obj) new_adaf_obj = interpolate(adaf_obj) return new_adaf_obj
def renderOpencv(ffmpeg, ffprobe, vidFile: str, args, chunks: list, speeds: list, fps, has_vfr, effects, temp, log): import cv2 if(has_vfr): cmd = ['-i', vidFile, '-map', '0:v:0', '-vf', f'fps=fps={fps}', '-r', str(fps), '-vsync', '1', '-f','matroska', '-vcodec', 'rawvideo', 'pipe:1'] fileno = ffmpeg.Popen(cmd).stdout.fileno() cap = cv2.VideoCapture('pipe:{}'.format(fileno)) else: cap = cv2.VideoCapture(vidFile) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) fourcc = cv2.VideoWriter_fourcc(*'mp4v') if(args.scale != 1): width = int(width * args.scale) height = int(height * args.scale) if(width < 2 or height < 2): log.error('Resolution too small.') log.debug(f'\n Resolution {width}x{height}') out = cv2.VideoWriter(f'{temp}/spedup.mp4', fourcc, fps, (width, height)) totalFrames = chunks[len(chunks) - 1][1] cframe = 0 cap.set(cv2.CAP_PROP_POS_FRAMES, cframe) remander = 0 framesWritten = 0 videoProgress = ProgressBar(totalFrames, 'Creating new video', args.machine_readable_progress, args.no_progress) def findState(chunks, cframe) -> int: low = 0 high = len(chunks) - 1 while low <= high: mid = low + (high - low) // 2 if(cframe >= chunks[mid][0] and cframe < chunks[mid][1]): return chunks[mid][2] elif(cframe > chunks[mid][0]): low = mid + 1 else: high = mid - 1 # cframe not in chunks return 0 import numpy as np from interpolate import interpolate def values(val, log, _type, totalFrames, width, height): if(val == 'centerX'): return int(width / 2) if(val == 'centerY'): return int(height / 2) if(val == 'start'): return 0 if(val == 'end'): return totalFrames - 1 if(val == 'width'): return width if(val == 'height'): return height if(not isinstance(val, int) and not (val.replace('.', '', 1)).replace('-', '', 1).isdigit()): log.error(f'Variable {val} not implemented.') return _type(val) effect_sheet = [] for effect in effects: if(effect[0] == 'rectangle'): rectx1_sheet = np.zeros((totalFrames + 1), dtype=int) recty1_sheet = np.zeros((totalFrames + 1), dtype=int) rectx2_sheet = np.zeros((totalFrames + 1), dtype=int) recty2_sheet = np.zeros((totalFrames + 1), dtype=int) rectco_sheet = np.zeros((totalFrames + 1, 3), dtype=int) rect_t_sheet = np.zeros((totalFrames + 1), dtype=int) r = effect[1:] for i in range(6): r[i] = values(r[i], log, int, totalFrames, width, height) rectx1_sheet[r[0]:r[1]] = r[2] recty1_sheet[r[0]:r[1]] = r[3] rectx2_sheet[r[0]:r[1]] = r[4] recty2_sheet[r[0]:r[1]] = r[5] rectco_sheet[r[0]:r[1]] = r[6] rect_t_sheet[r[0]:r[1]] = r[7] effect_sheet.append( ['rectangle', rectx1_sheet, recty1_sheet, rectx2_sheet, recty2_sheet, rectco_sheet, rect_t_sheet] ) if(effect[0] == 'zoom'): zoom_sheet = np.ones((totalFrames + 1), dtype=float) zoomx_sheet = np.full((totalFrames + 1), int(width / 2), dtype=float) zoomy_sheet = np.full((totalFrames + 1), int(height / 2), dtype=float) z = effect[1:] z[0] = values(z[0], log, int, totalFrames, width, height) z[1] = values(z[1], log, int, totalFrames, width, height) if(z[7] is not None): # hold value z[7] = values(z[7], log, int, totalFrames, width, height) if(z[7] is None or z[7] > z[1]): zoom_sheet[z[0]:z[1]] = interpolate(z[2], z[3], z[1] - z[0], log, method=z[6]) else: zoom_sheet[z[0]:z[0]+z[7]] = interpolate(z[2], z[3], z[7], log, method=z[6]) zoom_sheet[z[0]+z[7]:z[1]] = z[3] zoomx_sheet[z[0]:z[1]] = values(z[4], log, float, totalFrames, width, height) zoomy_sheet[z[0]:z[1]] = values(z[5], log, float, totalFrames, width, height) effect_sheet.append( ['zoom', zoom_sheet, zoomx_sheet, zoomy_sheet] ) while cap.isOpened(): ret, frame = cap.read() if(not ret or cframe > totalFrames): break for effect in effect_sheet: if(effect[0] == 'rectangle'): x1 = int(effect[1][cframe]) y1 = int(effect[2][cframe]) x2 = int(effect[3][cframe]) y2 = int(effect[4][cframe]) if(x1 == y1 and y1 == x2 and x2 == y2 and y2 == 0): pass else: np_color = effect[5][cframe] color = (int(np_color[0]), int(np_color[1]), int(np_color[2])) t = int(effect[6][cframe]) frame = cv2.rectangle(frame, (x1,y1), (x2,y2), color, thickness=t) if(effect[0] == 'zoom'): zoom = effect[1][cframe] zoom_x = effect[2][cframe] zoom_y = effect[3][cframe] # Resize Frame new_size = (int(width * zoom), int(height * zoom)) if(zoom == 1 and args.scale == 1): blown = frame elif(new_size[0] < 1 or new_size[1] < 1): blown = cv2.resize(frame, (1, 1), interpolation=cv2.INTER_AREA) else: inter = cv2.INTER_CUBIC if zoom > 1 else cv2.INTER_AREA blown = cv2.resize(frame, new_size, interpolation=inter) x1 = int((zoom_x * zoom)) - int((width / 2)) x2 = int((zoom_x * zoom)) + int((width / 2)) y1 = int((zoom_y * zoom)) - int((height / 2)) y2 = int((zoom_y * zoom)) + int((height / 2)) top, bottom, left, right = 0, 0, 0, 0 if(y1 < 0): top = -y1 y1 = 0 if(x1 < 0): left = -x1 x1 = 0 frame = blown[y1:y2+1, x1:x2+1] bottom = (height + 1) - (frame.shape[0]) - top right = (width + 1) - frame.shape[1] - left frame = cv2.copyMakeBorder( frame, top = top, bottom = bottom, left = left, right = right, borderType = cv2.BORDER_CONSTANT, value = args.background ) if(frame.shape != (height+1, width+1, 3)): # Throw error so that opencv dropped frames don't go unnoticed. print(f'cframe {cframe}') log.error(f'Wrong frame shape. was {frame.shape},' \ f' should be {(height+1, width+1, 3)} ') if(effects == [] and args.scale != 1): inter = cv2.INTER_CUBIC if args.scale > 1 else cv2.INTER_AREA frame = cv2.resize(frame, (width, height), interpolation=inter) cframe = int(cap.get(cv2.CAP_PROP_POS_FRAMES)) # current frame state = findState(chunks, cframe) mySpeed = speeds[state] if(mySpeed != 99999): doIt = (1 / mySpeed) + remander for __ in range(int(doIt)): out.write(frame) framesWritten += 1 remander = doIt % 1 videoProgress.tick(cframe) log.debug(f'\n - Frames Written: {framesWritten}') log.debug(f' - Total Frames: {totalFrames}') cap.release() out.release() cv2.destroyAllWindows() cmd = properties(['-i', vidFile], args, vidFile, ffprobe) cmd.append(f'{temp}/spedup.mp4') ffmpeg.run(cmd) if(log.is_debug): log.debug('Writing the output file.') else: log.conwrite('Writing the output file.')
def test_interpolate_degree_0(): assert interpolate([(1, 2)]).coefficients == [2]
#### plot single value fields plt.close('all') plt.figure() depthres = 60 times = np.zeros(len(stablephases)) xs = np.linspace(0,35e3,depthres) recfields = ['T', 'rho', 'Vs', 'Vp'] plotsqr = int(len(recfields)**0.5-1e-12)+1 TDW = np.zeros((len(recfields), depthres, len(cr.recdata))) for i in range(len(cr.recdata)): sys.stdout.write(" " + str(i) + "\r") sys.stdout.flush() for ifield in range(len(recfields)): fieldvals = np.array(ip.interpolate(cr.recdata[i].grid.xs, cr.recdata[i].data[recfields[ifield]], xs)) TDW[ifield,:,i] = fieldvals times[i] = cr.recdata[i].metadata["time"] XS, YS = np.meshgrid(times/(SECINYR*1e3), -xs) for i in range(len(recfields)): plt.subplot(plotsqr,plotsqr,i) CS = plt.contourf(XS, YS, TDW[i,:,:], 30) cbar = plt.colorbar(CS) plt.title(recfields[i]) #### plot phase assemblages and weight percentages plt.figure() depthres = 60 TDA = np.zeros((depthres, len(stablephases)))
def sqmcl(): p = [] N = 1000 ''' for i in range(N): x=human_pos() p.append(x) ''' output = conn.recv(2048) values = parser.firstParser(output) strength = values[0] var1, var2 = initialCor(trainingSet, values[0]) #print var1,var2 robbie = human_pos() po = [] for o in range(N): par = human_pos() circle_x = var1 circle_y = var2 circle_r = 2 # random angle alpha = 2 * math.pi * random.random() # random radius r = circle_r * random.random() # calculating cooringates x = abs(r * math.cos(alpha) + circle_x) y = abs(r * math.sin(alpha) + circle_y) orientation = random.random() * 2.0 * pi #orien = random.random() par.set(x, y, alpha) #par.set_noise(0.5, 0.5, 5.0) po.append(par) p = po #print po #robbie.motionmodel(10,2) # deg and rad while True: output = conn.recv(2048) if output.strip() == "disconnect": conn.close() sys.exit("Received disconnect message. Shutting down.") conn.send("dack") elif output: print output values = parser.firstParser(output) strength = value[0] orientation = values[1] totalSteps = values[2] p2 = [] for i in range(N): p2.append(p[i].motionmodel( orientation, 2)) # the orientation value chai normalize garna baaki cha p = p2 # define new list that calls for the fucntion that gets rssi value w = [] my_p = [] Z = strength for i in range(N): p[i].my_fun() w = measurement_prob(np.array(my_p), Z) #for i in range(N): # w.append(p[i].measurement_prob(Z)) p3 = [] # random starting particle index index = int(random.random() * N) # beta b = 0 w_max = max(w) for i in range(N): b += random.random() * 2.0 * w_max while b > w[index]: b = b - w[index] index = (index + 1) % N p3.append(p[index]) p = p3 xa = 0 ya = 0 for j in range(N): xa = xa + p[j].x ya = ya + p[j].y xa = xa / N ya = ya / N print xa, ya location = interpolate.interpolate(xa, ya) f = open('db.json', 'wb') f.write('[{"geometry": {"type": "Point", "coordinates": [' + str(location[0]) + ',' + str(location[1]) + ']}, "type": "Feature", "properties": {}}]') f.close() '''
plt.ylabel("Leave One Out Cross-Validation Score") plt.xlabel("Log(Radiation)") plt.savefig("plots/cv_vs_val.pdf") plt.close() ## plot lat and lon vs cv score plt.scatter(d[:,1], d[:,0], s = out_cv*500, alpha = .5) plt.xlabel("Longitude") plt.ylabel("Latitude") plt.savefig("plots/cv_vs_latlon.pdf") plt.close() ## correlation between methods ## performance between methods wrt to lat, lon model = interpolate(d[:,0], d[:,1], np.log(d[:,2])) ## universal kriging or regression since central mean (so see error at the highest point) ## now get citizen data m = c.execute(""" select lat, lon, val from measurements_thin where datetime = "2011-09-24" """).fetchall() m = np.array(m) ## kriging interpolate and get the average difference between the two! smooth = interpolate(d[:,0], d[:,1], np.log(d[:,2])) smooth.pick_points(m[:,0], m[:,1]) smooth.kriging() np.median(np.log(m[:,2] / 300.0 * 1000.0) / smooth.z)
def train(self, filename, output_dir): logging.basicConfig(level=LOGGING_LEVEL, format="DEBUG: %(message)s") all_filename = filename try: os.mkdir(output_dir) except OSError: # directory already exists pass train_filename = path.join(output_dir, 'train.txt') dev_filename = path.join(output_dir, 'dev.txt') dev_percent = 0.1 # sample all-data into train and dev sets sample.main( [all_filename, train_filename, dev_filename, dev_percent] ) logging.debug('Split {0} into training ({1}) and development ({2})'.format(all_filename, train_filename, dev_filename)) # train models on training set self.models = {} def add_model(ModelClass, name): self.models[name] = ModelClass() self.models[name].train(train_filename) logging.debug('Done training {0} model'.format(name)) add_model( Unigram, 'unigram' ) add_model( Bigram, 'bigram' ) add_model( Trigram, 'trigram' ) add_model( Fourgram, 'fourgram' ) add_model( Fivegram, 'fivegram' ) add_model( Sixgram, 'sixgram' ) add_model( Sevengram, 'sevengram') add_model( Eightgram, 'eightgram') add_model( Maxent, 'maxent' ) #self.models['bigram'].backoff_model = self.models['unigram'] #self.models['trigram'].backoff_model = self.models['bigram'] #self.models['fourgram'].backoff_model = self.models['trigram'] #self.models['fivegram'].backoff_model = self.models['fourgram'] #self.models['sixgram'].backoff_model = self.models['fivegram'] dev_words = [line.strip() for line in open(dev_filename, 'r')] # write predictions out to disk using dev set model_outputs = [] model_output_dir = tempfile.mkdtemp() logging.debug('Temporary Output Directory: {0}'.format(model_output_dir)) for model_name in self.model_names: model = self.models[model_name] model_outputs.append( path.join( model_output_dir, model_name + '.probs' ) ) model.write_probability_list(dev_words, model_outputs[-1]) logging.debug('Wrote dev set predictions using {0} model'.format(model_name)) # interpolate the models, get the weights weights_list = interpolate.interpolate(model_outputs) logging.debug('Weights: {0}'.format(weights_list)) self.weights = dict( zip( self.model_names, weights_list ) )
def make_diagonal_speed_interpolator(self,board_name): # I have not been able to figure out the relationship between the speeds # in the first column and the codes in the second columns below. # these codes somehow ensure the speeds on the diagonal are the same horizontal # and vertical moves. For now we will just use tables and interpolate as needed. vals = [] self.diag_linterp = None ################################################################# if board_name=="LASER-M2": vals = [ [ 0.010 , 2617140 ], [ 0.050 , 523130 ], [ 0.100 , 261193 ], [ 0.150 , 174129 ], [ 0.200 , 130224 ], [ 0.300 , 87064 ], [ 0.400 , 65112 ], [ 0.500 , 52089 ], [ 0.600 , 43160 ], [ 0.700 , 37101 ], [ 0.800 , 32184 ], [ 0.900 , 29021 ], [ 0.990 , 26112 ], [ 1.000 , 13022 ], [ 1.500 , 8185 ], [ 2.000 , 4092 ], [ 3.000 , 2046 ], [ 3.500 , 1222 ], [ 4.000 , 1079 ], [ 4.500 , 1041 ], [ 4.990 , 1012 ], [ 5.000 , 223 ], [ 6.000 , 159 ], [ 6.990 , 136 ], [ 7.000 , 5155 ], [ 8.000 , 4092 ], [ 9.000 , 3125 ], [ 10.000 , 2219 ], [ 12.000 , 2003 ], [ 12.080 , 2000 ], [ 12.090 , 1255 ], [ 12.500 , 1238 ], [ 13.000 , 1185 ], [ 15.000 , 1079 ], [ 17.000 , 1006 ], [ 17.450 , 1000 ], [ 17.460 , 255 ], [ 18.000 , 235 ], [ 19.000 , 211 ], [ 20.000 , 191 ], [ 25.000 , 123 ], [ 30.000 , 86 ], [ 40.000 , 49 ], [ 50.000 , 31 ], [ 60.000 , 21 ], [ 70.000 , 16 ], [ 80.000 , 12 ], [ 90.000 , 9 ], [ 100.000 , 7 ], [ 120.000 , 5 ], [ 150.000 , 4 ], [ 200.000 , 3 ], [ 220.000 , 2 ], [ 230.000 , 2 ], [ 240.000 , 2 ], [ 241.000 , 0 ] ] ################################################################# elif board_name=="LASER-M1": vals = [ [ 0.100 , 3141014 ], [ 0.200 , 1570135 ], [ 0.300 , 1047004 ], [ 0.400 , 785067 ], [ 0.500 , 628054 ], [ 0.600 , 523130 ], [ 0.700 , 448185 ], [ 0.800 , 392161 ], [ 0.900 , 349001 ], [ 1.000 , 157013 ], [ 2.000 , 52089 ], [ 3.000 , 26044 ], [ 4.000 , 15180 ], [ 5.000 , 10120 ], [ 6.000 , 7122 ], [ 7.000 , 5155 ], [ 8.000 , 4092 ], [ 9.000 , 3125 ], [ 10.000 , 2219 ], [ 20.000 , 191 ], [ 50.000 , 31 ], [ 70.000 , 16 ], [ 100.000 , 7 ], [ 150.000 , 4 ], [ 200.000 , 3 ] ] ################################################################# elif board_name=="LASER-M": # LASER-M does not have this type of speed code. pass ################################################################# elif board_name=="LASER-B2": vals = [ [ 0.100 , 523 ], [ 0.200 , 261 ], [ 0.300 , 174 ], [ 0.400 , 130 ], [ 0.500 , 104 ], [ 0.600 , 87 ], [ 0.700 , 74 ], [ 0.800 , 65112 ], [ 0.900 , 58043 ], [ 1.000 , 26044 ], [ 2.000 , 8185 ], [ 3.000 , 4092 ], [ 4.000 , 2158 ], [ 5.000 , 1190 ], [ 6.000 , 1063 ], [ 7.000 , 11055 ], [ 8.000 , 8185 ], [ 9.000 , 6250 ], [ 10.000 , 5182 ], [ 15.000 , 2158 ], [ 20.000 , 1126 ], [ 30.000 , 172 ], [ 50.000 , 63 ], [ 100.000 , 15 ], [ 150.000 , 8 ], [ 200.000 , 6 ] ] ################################################################# elif board_name=="LASER-B1": vals = [ [ 0.100 , 518083 ], [ 0.200 , 259041 ], [ 0.300 , 172198 ], [ 0.400 , 129148 ], [ 0.500 , 103170 ], [ 0.600 , 86099 ], [ 0.700 , 74012 ], [ 0.800 , 64202 ], [ 0.900 , 57151 ], [ 1.000 , 25234 ], [ 2.000 , 8163 ], [ 5.000 , 1186 ], [ 10.000 , 120 ], [ 20.000 , 31 ], [ 30.000 , 14 ], [ 40.000 , 8 ], [ 50.000 , 5 ], [ 70.000 , 2 ], [ 90.000 , 1 ], [ 100.000 , 1 ], [ 190.000 , 0 ], [ 199.000 , 0 ], [ 200.000 , 0 ] ] ################################################################# elif board_name=="LASER-B" or board_name=="LASER-A": # LASER-A and LASER-B do not have this type of speed code. pass if vals != []: xvals=[] yvals=[] for i in range(len(vals)): xvals.append(vals[i][0]) yvals.append(vals[i][1]) return interpolate(xvals,yvals) else: return None
def test_interpolate_degree_1(): assert_that(interpolate([(1, 2), (2, 3)]).coefficients).is_equal_to([1, 1])
def test_interpolate_repeated_x_values(): with pytest.raises(ValueError): interpolate([(1, 2), (1, 3)])
def test_interpolate_empty(): with pytest.raises(ValueError): interpolate([])
def test_saga_executes_with_resolved_interpolations_for_real_configuration( self): configuration = { "host": "productpage.svc", "matchRequest": { "method": "GET", "url": "http://localhost:3001", "headers": { "Start-Faking": "True" }, }, "onMatchedRequest": [ { "method": "GET", "url": "http://ratings.svc/add/${parent.headers.Product-Id}", "isSuccessIfReceives": [{ "status-code": 200, "headers": { "Content-type": "application/json" }, }], "onFailure": [{ "method": "GET", "url": "http://ratings.svc/delete/${root.headers.Product-Id}", "timeout": 3, "maxRetriesOnTimeout": 1, "isSuccessIfReceives": [{ "status-code": 200, "headers": { "Content-type": "application/json" }, }], }], "timeout": 30, "maxRetriesOnTimeout": 3, }, { "method": "GET", "url": "http://details.svc/details/add/${root.headers.Product-Id}", "isSuccessIfReceives": [{ "status-code": 200, "headers": { "Content-type": "application/json" }, }], "onFailure": [{ "method": "GET", "url": "http://details.svc/details/remove/${root.headers.Product-Id}", "timeout": 3, "maxRetriesOnTimeout": 1, "isSuccessIfReceives": [{ "status-code": 200, "headers": { "Content-type": "application/json" }, }], }], "timeout": 30, "maxRetriesOnTimeout": 3, }, ], "onAllSucceeded": { "status-code": 200, "body": "Ratings: ${transaction[0].response.body}\nDetails: ${transaction[1].response.body}\n", }, "onAnyFailed": { "status-code": 500, "body": "Ratings: ${transaction[0].response.body}\nDetails: ${transaction[1].response.body}\n", }, } start_request_headers = {"Product-Id": "12"} with requests_mock.Mocker() as m: m.get( "http://ratings.svc/add/12", status_code=200, headers={"Content-type": "application/json"}, text="bar", ) m.get( "http://details.svc/details/add/12", status_code=200, headers={"Content-type": "application/json"}, text="foo", ) coordinator = SagaCoordinator( configuration, start_request_headers=start_request_headers) success, transactions, failed_compensations = coordinator.execute_saga( ) self.assertEqual( [ "http://ratings.svc/add/12", "http://details.svc/details/add/12" ], [request.url for request in m.request_history], ) self.assertTrue(success) self.assertEqual(len(transactions), 2) self.assertEqual(len(failed_compensations), 0) context = { "parent": RequestNode(), "root": coordinator.root, "transactions": transactions, } out = interpolate(configuration["onAllSucceeded"]["body"], **context) self.assertEqual("Ratings: bar\nDetails: foo\n", out) with requests_mock.Mocker() as m: m.get("http://ratings.svc/add/12", status_code=403) coordinator = SagaCoordinator( configuration, start_request_headers=start_request_headers) success, transactions, failed_compensations = coordinator.execute_saga( ) self.assertEqual( ["http://ratings.svc/add/12"], [request.url for request in m.request_history], ) self.assertFalse(success) self.assertEqual(len(transactions), 0) self.assertEqual(len(failed_compensations), 0) with requests_mock.Mocker() as m: m.get( "http://ratings.svc/add/12", status_code=200, headers={"Content-type": "application/json"}, ) m.get( "http://ratings.svc/delete/12", status_code=200, headers={"Content-type": "application/json"}, ) m.get("http://details.svc/details/add/12", status_code=404) coordinator = SagaCoordinator( configuration, start_request_headers=start_request_headers) success, transactions, failed_compensations = coordinator.execute_saga( ) self.assertEqual( [ "http://ratings.svc/add/12", "http://details.svc/details/add/12", "http://ratings.svc/delete/12", ], [request.url for request in m.request_history], ) self.assertFalse(success) self.assertEqual(len(transactions), 1) self.assertEqual(len(failed_compensations), 0)
start = end end = start + one_day day_index += 1 def plot(day, index): plt.figure(figsize=(10, 4)) day.plot(x='t', y='L2') plt.savefig('./data/days/day' + str(index) + '.png') plt.close() day.to_csv('./data/days_csv/day' + str(index) +'.csv') accel = pd.read_csv('./data/in.csv') current_time = time.time() * 1000 # Current time in ms accel = accel[accel['timestamp'] < current_time] ip = ip.interpolate(accel) df = pd.DataFrame() df['t'] = ip.index df['L2'] = ip.values #df = splitTime(df) df['t'].apply(lambda time: time.strftime('%H:%M:%S')) plotter(df) #total = len(accel) #day = int(total / 7) # Approximately the number of rows of the first day.
def sqmcl(): p=[] N= 1000 ''' for i in range(N): x=human_pos() p.append(x) ''' output = conn.recv(2048) values = parser.firstParser(output) strength = values[0] var1,var2 = initialCor(trainingSet,values[0]) #print var1,var2 robbie = human_pos() po=[] for o in range(N): par = human_pos() circle_x = var1 circle_y = var2 circle_r = 2 # random angle alpha = 2 * math.pi * random.random() # random radius r = circle_r * random.random() # calculating cooringates x = abs(r * math.cos(alpha) + circle_x ) y = abs(r * math.sin(alpha) + circle_y ) orientation = random.random() * 2.0 * pi #orien = random.random() par.set(x,y,alpha) #par.set_noise(0.5, 0.5, 5.0) po.append(par) p =po #print po #robbie.motionmodel(10,2) # deg and rad while True: output = conn.recv(2048) if output.strip() == "disconnect": conn.close() sys.exit("Received disconnect message. Shutting down.") conn.send("dack") elif output: print output values = parser.firstParser(output) strength = value[0] orientation = values[1] totalSteps = values[2] p2 = [] for i in range(N): p2.append(p[i].motionmodel(orientation,2)) # the orientation value chai normalize garna baaki cha p = p2 # define new list that calls for the fucntion that gets rssi value w = [] my_p =[] Z = strength for i in range(N): p[i].my_fun() w = measurement_prob(np.array(my_p),Z) #for i in range(N): # w.append(p[i].measurement_prob(Z)) p3 = [] # random starting particle index index = int(random.random() * N) # beta b = 0 w_max = max(w) for i in range(N): b += random.random() * 2.0 * w_max while b > w[index]: b = b - w[index] index = (index + 1) % N p3.append(p[index]) p = p3 xa = 0 ya = 0 for j in range(N): xa = xa + p[j].x ya = ya + p[j].y xa = xa/N ya = ya/N print xa,ya location = interpolate.interpolate(xa, ya) f = open('db.json', 'wb') f.write('[{"geometry": {"type": "Point", "coordinates": [' + str(location[0]) + ',' + str(location[1]) + ']}, "type": "Feature", "properties": {}}]') f.close() '''
def dardar2era(dardar, ERA, p_grid): """ interpolates ERA5 data to DARDAR locations and the pressure grid is defined in p_grid Parameters ---------- dardar : DARDARProduct instance ERA : ERA5 instance p_grid : a pressure grid in hPa, where the values should to interpolated to Returns ------- grid_t : temperature gridded to DARDAR locations grid_z : geopotential gridded to DARDAR locations """ lon_d = dardar.get_data('longitude') lat_d = dardar.get_data('latitude') height_d = dardar.get_data('height') # convert longitude from -180-180 to 0-360 if lon_d.min() < 0: lon_d = lon_d % 360 # add extra pressure level in ERA5 data xlevel = 1200 ERA.add_extra_level('temperature', xlevel) ERA.add_extra_level('geopotential', xlevel) # get ERA lat/lon/pressure grids lat = ERA.t.latitude.data lon = ERA.t.longitude.data level = ERA.t.level.data t = ERA.t.t[0].data z = ERA.z.z[0].data level = np.log(level) # convert pressure to log # add two extra dimension to longitudes to wrap around during interpolation lon, z = expand_lon(ERA.z.longitude.data, z) lon, t = expand_lon(ERA.t.longitude.data, t) #my_interpolating_function = RegularGridInterpolator((level, lat, lon), A) p_grid = np.arange(1, 1150, 10) points = [] # interpolate ERA5 to DARDAR lat/lon locations for i in range(len(p_grid)): p = np.log(p_grid[i]) # convert pressure to log range pts = [[p, lat_d[j], lon_d[j]] for j in range(len(lat_d))] points.append(pts) my_interpolating_function = interpolate(level, lat, lon, t) grid_t = my_interpolating_function(points) my_interpolating_function = interpolate(level, lat, lon, z) grid_z = my_interpolating_function(points) return grid_t, grid_z
plt.ylabel("Leave One Out Cross-Validation Score") plt.xlabel("Log(Radiation)") plt.savefig("plots/cv_vs_val.pdf") plt.close() ## plot lat and lon vs cv score plt.scatter(d[:, 1], d[:, 0], s=out_cv * 500, alpha=.5) plt.xlabel("Longitude") plt.ylabel("Latitude") plt.savefig("plots/cv_vs_latlon.pdf") plt.close() ## correlation between methods ## performance between methods wrt to lat, lon model = interpolate(d[:, 0], d[:, 1], np.log(d[:, 2])) ## universal kriging or regression since central mean (so see error at the highest point) ## now get citizen data m = c.execute(""" select lat, lon, val from measurements_thin where datetime = "2011-09-24" """).fetchall() m = np.array(m) ## kriging interpolate and get the average difference between the two! smooth = interpolate(d[:, 0], d[:, 1], np.log(d[:, 2])) smooth.pick_points(m[:, 0], m[:, 1]) smooth.kriging() np.median(np.log(m[:, 2] / 300.0 * 1000.0) / smooth.z)
def run_aquaculture_model(args): ''' Run aquaculture model main routine @param listargs: list of arguments passed trough command-line or other script Must look like sys.argv so make sure is starts with scriptname. ''' # Parse command-line arguments and set parameters for script try: param = cmd_options_aquaculture.InputAgri(args) params = param.options params_var = param.options_var except SystemExit: raise MyError("Error has occured in the reading of the commandline options.") # Start timer and logging s = my_sys.SimpleTimer() log = my_logging.Log(params.outputdir,"%s_%i.log" % (params.scenarioname,params.year)) print "Log will be written to %s" % log.logFile # If no arguments are provided do a run with defaults in params if len(args) == 0: log.write_and_print("No arguments provided: starting default run...") # time start of run log.write_and_print("Starting run....") log.write("# Parameters used:",print_time=False,lcomment=False) for option in str(params_var).split(", "): log.write("--%s = %s" % (option.split(": ")[0].strip("{").strip("'"), option.split(": ")[1].strip("}").strip("'")), print_time=False,lcomment=False) log.write("All parameters used:") for option in str(params).split(", "): log.write("# %s = %s" % (option.split(": ")[0].strip("{").strip("'"), option.split(": ")[1].strip("}").strip("'")), print_time=False,lcomment=False) log.write("# End of all parameters used.",print_time=False,lcomment=False) # Check whether there are command-line arguments which are not used if (len(param.args) > 0): txt = "The following command line arguments will not be used:" log.write_and_print(txt + str(param.args)) # Write svn information of input and scripts to log file. log.write("******************************************************",print_time=False,lcomment=True) log.write("Version information:",print_time=False,lcomment=True) log.write("Version information main script:",print_time=False,lcomment=True) log.write("Revision $LastChangedDate: 2013-09-25 13:37:10 +0200 (Tue, 25 Sep 2013)",print_time=False,lcomment=True) log.write("Date $LastChangedRevision: 344 $",print_time=False,lcomment=True) #message = get_versioninfo.get_versioninfo(params.inputdir,params.outputdir) #message.extend(get_versioninfo.get_versioninfo("tools",params.outputdir)) #for item in range(len(message)): # log.write(str(message[item]),print_time=False,lcomment=True) log.write("******************************************************",print_time=False,lcomment=True) # Read mask of the input grids. We take the iso grid as mask for this. # The mask takes care to do the calculations on an efficient way. Only # the grid cells which are in the mask are calculated and stored. if (params.lmask): mask = ascraster.create_mask(params.file_mask, 0.0,'GT',numtype=float) log.write_and_print(s.interval("Reading mask")) else: mask = None log.write_and_print(s.interval("No mask is used for this simulation.")) # Read prods per province. Here no interpolation is done. So the exact year must be specified. production = general_class.read_general_file(params.fileproduction,sep=";",key=None,out_type="list") # Read N and P excretion. File must contain the proder Species;N_excretion;P_excretion N_excretion = general_class.read_general_file(params.fileN_excretion,sep=";",key="Species",out_type="dict") P_excretion = general_class.read_general_file(params.fileP_excretion,sep=";",key="Species",out_type="dict") # Make an excretion rate for this year. for key in N_excretion: years = [] vals = [] for name in N_excretion[key].get_attrib(): years.append(float(name)) vals.append(float(N_excretion[key].get_val(name))) N_excretion[key].add_item("N_excretion",interpolate.interpolate(params.year,years,vals,extrapol=1)) for key in P_excretion: years = [] vals = [] for name in P_excretion[key].get_attrib(): years.append(float(name)) vals.append(float(P_excretion[key].get_val(name))) P_excretion[key].add_item("P_excretion",interpolate.interpolate(params.year,years,vals,extrapol=1)) # Calculate the total N and P manure per province for each line in the production input file Nout = {} Pout = {} for item in range(len(production)): # Get the number of prods for the year specified. prod = production[item].get_val(str(params.year)) spec = production[item].get_val("Species") try: # Get the N and P excretion for this animal (kg per ton production) Nexcret = N_excretion[spec].get_val("N_excretion") Pexcret = P_excretion[spec].get_val("P_excretion") except KeyError: raise MyError("This animal " + spec + " has no excretion rate in file: " + params.fileP_excretion +\ " or in file " + params.fileN_excretion) # Multiply prods with excretion production[item].add_item("Nout",float(Nexcret)*float(prod)) production[item].add_item("Pout",float(Pexcret)*float(prod)) #BF=brackish fishponds, FF=freshwater fishponds, MF=marine water fishponds, BC=brackish cages, #FC=freshwater cages, MC=marine water cages, BP=brackish pens, #FP=freshwater pens, MP=marine water pens fresh_environments = ["BF", "FF", "BC","FC", "BP", "FP"] marine_environments = ["MF", "MC", "MP"] # Allocation of Nitrogen, fresh and brakish waters outgrid = ascraster.Asciigrid(ascii_file=params.fileiso,mask=mask) outgrid.add_values(outgrid.length*[0.0]) for environ in fresh_environments: grid = allocation_aquaculture.calculate(params,mask,production,environ=environ,substance="N") outgrid.add(grid) outgrid.write_ascii_file(params.fileNaqua) print "Total N of freshwater aquaculture in kg N: ",sum(outgrid.values) log.write_and_print(s.interval("Ready with allocation of N of freshwater aquaculture to grid cells.")) # Allocation of Phosphorus, fresh and brakish waters outgrid = ascraster.Asciigrid(ascii_file=params.fileiso,mask=mask) outgrid.add_values(outgrid.length*[0.0]) for environ in fresh_environments: grid = allocation_aquaculture.calculate(params,mask,production,environ=environ,substance="P") outgrid.add(grid) outgrid.write_ascii_file(params.filePaqua) print "Total P of freshwater aquaculture in kg P: ",sum(outgrid.values) log.write_and_print(s.interval("Ready with allocation of P of freshwater aquaculture to grid cells.")) # Allocation of Nitrogen, marine waters outgrid = ascraster.Asciigrid(ascii_file=params.fileprov_marine,mask=None) outgrid.add_values(outgrid.length*[0.0]) for environ in marine_environments: grid = allocation_aquaculture.calculate(params,mask,production,environ=environ,substance="N") outgrid.add(grid) outgrid.write_ascii_file(params.fileNmarine) print "Total N of marine aquaculture in kg N: ",sum(outgrid.values) log.write_and_print(s.interval("Ready with allocation of N of marine aquaculture to grid cells.")) # Allocation of Phosphorus, fresh and brakish waters outgrid = ascraster.Asciigrid(ascii_file=params.fileprov_marine,mask=None) outgrid.add_values(outgrid.length*[0.0]) for environ in marine_environments: grid = allocation_aquaculture.calculate(params,mask,production,environ=environ,substance="P") outgrid.add(grid) outgrid.write_ascii_file(params.filePmarine) print "Total P of marine aquaculture in kg P: ",sum(outgrid.values) log.write_and_print(s.interval("Ready with allocation of P of marine aquaculture to grid cells.")) fp = open(params.fileoutput_table,"w") lheader = True for item in range(len(production)): production[item].write(fp,sep=";",lheader=lheader,NoneValue="") lheader = False fp.close() log.write_and_print(s.total("Total run")) del log
def test_interpolate_degree_0(): assert_that(interpolate([(1, 2)]).coefficients).is_equal_to([2])
def buck_1_4(L,log,log_dia,gui_mode): prices = buckPCh.get_prices() Li = [L,0,0,0,0,0] #length iterators p16 = prices[0] p30 = prices[1] p36 = prices[2] it = [0,0,0,0,0,0] #iteration tracker p = [0,0,0,0,0,0] #price tracker p1 = [0,0,0,0,0,0] v = [0,0,0,0,0,0] #volume tracker v1 = [0,0,0,0,0,0] td = [0,0,0,0,0,0] #top diameter tracker td1 = [0,0,0,0,0,0] Lf = [0,0,0,0,0,0] #lengths tracker Lf2 = [0,0,0,0,0,0] #secondary lengths tracker lognum = 2 #log number control variable s=0 while s >= 0: if Li[s] <= (16 + (0.8333)): it[s] = 0 s = s - 1 if it[s] == 0: #either load start length or if Li[s] <= (40 + (0.8333)): #if log is within 40ft long # use the total Li[s] = round(Li[s] - ((0.8333) - 0.5)) #normalize the rounding to 10inch over Li[s] = Li[s] - (1 - (0.8333)) #ensure length divisible by 2 if ((1e-5) <= (math.fmod(Li[s],2) - (0.8333))): #set start log length Li[s] = Li[s] - 1 else: Li[s] = (40 + (0.8333)) else: Li[s] = Li[s] - 2 #decrease length by one value it[s] = it[s] + 1 # print 'log loop %i\n' %s # print 'Li[s] = %0.4f\n' %Li[s] #calculate length price dia = interpolate.interpolate(sum(Li),log,log_dia) dia = int(dia) #-->FIXME: Look at this later td[s] = dia v[s] = logvolume_2.logvolume_2(Li[s],dia) p[s] = buck1p.buck1p(Li[s],v[s],p16,p30,p36) Li[s+1] = L - sum(Li) #bump remaining length ahead sum_p = sum(p) if sum_p >= sum(p1): p2 = copy(p1) p1 = copy(p) v2 = copy(v1) v1 = copy(v) td2 = copy(td1) td1 = copy(td) Lf2 = copy(Lf) Lf = copy(Li) elif sum_p > sum(p2): p2 = copy(p) v2 = copy(v) td2 = copy(td) Lf2 = copy(Li) if s <= (lognum): s = s + 1 while (((s >= 0) & (Li[s] <= 16.8333)) | (s == lognum)): Li[s] = 0 #clear all previous log lengths from the top of the tree if (s+1) < len(Li): Li[s+1] = 0 p[s] = 0 v[s] = 0 td[s] = 0 it[s] = 0 s = s - 1 if gui_mode == 1 : # make grandios graphical table of data... file = open(sys.path[0]+os.sep+"output.txt",mode='w') i = 0 for entry in v1: # clean up output to be more user-friendly (clarity) if entry == 0: Lf[i] = 0 i = i + 1 i = 0 for entry in v2: # clean up output to be more user-friendly (clarity) if entry == 0: Lf2[i] = 0 i = i + 1 print >>file print >>file, "first choice..." print >>file, "Lengths are: [%i, %i, %i, %i, %i]" %(Lf[0], Lf[1], Lf[2], Lf[3], Lf[4]), "total:", sum(Lf) print >>file, "Volumes are:", v1, "total:", sum(v1) print >>file, "Top diams are:", td1 print >>file, "Prices are: [%3.3f, %3.3f, %3.3f, %3.3f, %3.3f]" %(p1[0], p1[1], p1[2], p1[3], p1[4]), "total:", sum(p1) print >>file print >>file, "second choice..." print >>file, "Lengths are: [%i, %i, %i, %i, %i]" %(Lf2[0], Lf2[1], Lf2[2], Lf2[3], Lf2[4]), "total:", sum(Lf2) print >>file, "Volumes are:", v2, "total:", sum(v2) print >>file, "Top diams are:", td2 print >>file, "Prices are: [%3.3f, %3.3f, %3.3f, %3.3f, %3.3f]" %(p2[0], p2[1], p2[2], p2[3], p2[4]), "total:", sum(p2) print >>file file.close() os.system("kwrite "+sys.path[0]+os.sep+"output.txt &") else: print print "first choice..." print "Lengths are: [%i, %i, %i, %i, %i]" %(Lf[0], Lf[1], Lf[2], Lf[3], Lf[4]), "total:", sum(Lf) print "Volumes are:", v1, "total:", sum(v1) print "Top diams are:", td1 print "Prices are: [%3.3f, %3.3f, %3.3f, %3.3f, %3.3f]" %(p1[0], p1[1], p1[2], p1[3], p1[4]), "total:", sum(p1) print print "second choice..." print "Lengths are: [%i, %i, %i, %i, %i]" %(Lf2[0], Lf2[1], Lf2[2], Lf2[3], Lf2[4]), "total:", sum(Lf2) print "Volumes are:", v2, "total:", sum(v2) print "Top diams are:", td2 print "Prices are: [%3.3f, %3.3f, %3.3f, %3.3f, %3.3f]" %(p2[0], p2[1], p2[2], p2[3], p2[4]), "total:", sum(p2) print
def test_interpolate_degree_1(): assert interpolate([(1, 2), (2, 3)]).coefficients == [1, 1]
def iteration(self, k, t0, dt, cycles, **kwargs): """Perform one PFASST iteration.""" levels = self.levels nlevels = len(levels) rank = self.mpi.rank ntime = self.mpi.ntime T = levels[0] # finest/top level B = levels[nlevels-1] # coarsest/bottom level self.state.cycle = 0 T.call_hooks('pre-iteration', **kwargs) # post receive requests if rank > 0: for iF in range(len(self.levels)-1): F = self.levels[iF] F.post_receive((F.level+1)*100+k) #### cycle for down, up in cycles: #### down for iF in down: self.state.cycle += 1 finest = iF == 0 coarsest = iF == nlevels - 1 F = levels[iF] if not coarsest: G = levels[iF+1] # get new initial value on coarsest level if coarsest: if rank > 0: F.receive((F.level+1)*100+k, blocking=coarsest) # sdc sweep F.call_hooks('pre-sweep', **kwargs) F.bSDC[0] = F.q0 for s in range(F.sweeps): F.sdc.sweep(F.bSDC, t0, dt, F.qSDC, F.fSDC, F.feval, **kwargs) F.qend[...] = F.qSDC[-1] F.call_hooks('post-sweep', **kwargs) # send new value forward if rank < ntime-1: F.send((F.level+1)*100+k, blocking=coarsest) # restrict if not coarsest: G.call_hooks('pre-restrict', **kwargs) restrict_time_space(F.qSDC, G.qSDC, F, G, **kwargs) restrict_space_sum_time(F.bSDC, G.bSDC, F, G, **kwargs) eval_at_sdc_nodes(t0, dt, G.qSDC, G.fSDC, G, **kwargs) G.bSDC[1:,:] += fas(dt, F.fSDC, G.fSDC, F, G, **kwargs) G.call_hooks('post-restrict', **kwargs) #### up for iF in up: self.state.cycle += 1 finest = iF == 0 F = levels[iF] G = levels[iF+1] # interpolate G.call_hooks('pre-interpolate', **kwargs) interpolate_time_space(F.qSDC, G.qSDC, F, G, **kwargs) eval_at_sdc_nodes(t0, dt, F.qSDC, F.fSDC, F, **kwargs) G.call_hooks('post-interpolate', **kwargs) # get new initial value if rank > 0: F.receive((F.level+1)*100+k) interpolate(F.q0, G.q0, F, G, **kwargs) # sdc sweep if not finest: F.call_hooks('pre-sweep', **kwargs) F.bSDC[0] = F.q0 for s in range(F.sweeps): F.sdc.sweep(F.bSDC, t0, dt, F.qSDC, F.fSDC, F.feval, **kwargs) F.qend[...] = F.qSDC[-1] F.call_hooks('post-sweep', **kwargs) #### done self.state.cycle = 0 T.call_hooks('post-iteration', **kwargs)
def buck2(L,log,log_dia,gui_mode): prices = buckPCh.get_prices() Li = [L,0,0,0,0,0] #length iterators #Lengths-to-Check Vector (255 terminated) LCV = [36,40,38,34,32,30,28,26,24,22,20,18,16,255] p16 = prices[0] p30 = prices[1] p36 = prices[2] it = [255,255,255,255,255,255] #iteration tracker (this currently is used # to track index of LCV) p = [0,0,0,0,0,0] #price tracker p1 = [0,0,0,0,0,0] p2 = [0,0,0,0,0,0] v = [0,0,0,0,0,0] #volume tracker v1 = [0,0,0,0,0,0] v2 = [0,0,0,0,0,0] td = [0,0,0,0,0,0] #top diameter tracker td1 = [0,0,0,0,0,0] td2 = [0,0,0,0,0,0] Lf = [0,0,0,0,0,0] #lengths tracker Lf2 = [0,0,0,0,0,0] #secondary lengths tracker lognum = 5 #log number control variable min_length = 100 #minimum log length variable for entry in LCV: #find minimum length if min_length > entry: min_length = entry s=0 while s >= 0: if it[s] == 255: #eg "top" of tree it[s] = 0 # (there will never be 255 LCV elements) for entry in LCV: if (entry + 0.8333) <= Li[s]: Li[s] = entry + 0.8333 it[s] = it[s] + 1 break it[s] = it[s] + 1 it[s] = it[s] - 1 if entry == 255: print "\n Too short!\n" break else: #middle of tree Li[s] = 0 Li[s+1] = 0 it[s] = it[s] + 1 while (L - sum(Li)) < (LCV[it[s]] + 0.8333): if (LCV[it[s]] == 255): break it[s] = it[s] + 1 if (LCV[it[s]] == 255) & (s == 0): break # END! QUIT! VAMOS! NOW! if (LCV[it[s]] == 255): #clear all previous log lengths from the top of the tree if (s+1) < len(Li): Li[s+1] = 0 Li[s] = 0 p[s] = 0 v[s] = 0 td[s] = 0 it[s] = 255 # there will never be 255 LCV elements s = s - 1 sum_Li = sum(Li) continue Li[s] = LCV[it[s]] + 0.8333 # print "s:",s,"Li:",Li,"it:",it # print 'log loop %i\n' %s # print 'Li[s] = %0.4f\n' %Li[s] # print 'it[s] = %i\n' %it[s] #calculate length price dia = interpolate.interpolate(sum(Li),log,log_dia) dia = int(dia) #-->FIXME: Look at this later td[s] = dia v[s] = logvolume_2.logvolume_2(Li[s],dia) p[s] = buck1p.buck1p(Li[s],v[s],p16,p30,p36) Li[s+1] = L - sum(Li) #bump remaining length ahead sum_p = sum(p) if sum_p >= sum(p1): p2 = copy(p1) p1 = copy(p) v2 = copy(v1) v1 = copy(v) td2 = copy(td1) td1 = copy(td) Lf2 = copy(Lf) Lf = copy(Li) elif sum_p >= sum(p2): p2 = copy(p) v2 = copy(v) td2 = copy(td) Lf2 = copy(Li) if (Li[s+1] >= (min_length + 0.8333)) & (s < (lognum - 1)): s = s + 1 if gui_mode == 1 : # make grandios graphical table of data... file = open(sys.path[0]+os.sep+"output.txt",mode='w') i = 0 for entry in v1: # clean up output to be more user-friendly (clarity) if entry == 0: Lf[i] = 0 i = i + 1 i = 0 for entry in v2: # clean up output to be more user-friendly (clarity) if entry == 0: Lf2[i] = 0 i = i + 1 print >>file print >>file, "first choice..." print >>file, "Lengths are: [%i, %i, %i, %i, %i]" %(Lf[0], Lf[1], Lf[2], Lf[3], Lf[4]), "total:", sum(Lf) print >>file, "Volumes are:", v1, "total:", sum(v1) print >>file, "Top diams are:", td1 print >>file, "Prices are: [%3.3f, %3.3f, %3.3f, %3.3f, %3.3f]" %(p1[0], p1[1], p1[2], p1[3], p1[4]), "total:", sum(p1) print >>file print >>file, "second choice..." print >>file, "Lengths are: [%i, %i, %i, %i, %i]" %(Lf2[0], Lf2[1], Lf2[2], Lf2[3], Lf2[4]), "total:", sum(Lf2) print >>file, "Volumes are:", v2, "total:", sum(v2) print >>file, "Top diams are:", td2 print >>file, "Prices are: [%3.3f, %3.3f, %3.3f, %3.3f, %3.3f]" %(p2[0], p2[1], p2[2], p2[3], p2[4]), "total:", sum(p2) print >>file # print >>file, "catch_loop:", catch_loop # print >>file file.close() os.system("zenity --title=\"Best Buck Lengths\" --info --no-wrap --text=\"`cat "+sys.path[0]+os.sep+"output.txt`\" &") else: print print "first choice..." print "Lengths are: [%i, %i, %i, %i, %i]" %(Lf[0], Lf[1], Lf[2], Lf[3], Lf[4]), "total:", sum(Lf) print "Volumes are:", v1, "total:", sum(v1) print "Top diams are:", td1 print "Prices are: [%3.3f, %3.3f, %3.3f, %3.3f, %3.3f]" %(p1[0], p1[1], p1[2], p1[3], p1[4]), "total:", sum(p1) print print "second choice..." print "Lengths are: [%i, %i, %i, %i, %i]" %(Lf2[0], Lf2[1], Lf2[2], Lf2[3], Lf2[4]), "total:", sum(Lf2) print "Volumes are:", v2, "total:", sum(v2) print "Top diams are:", td2 print "Prices are: [%3.3f, %3.3f, %3.3f, %3.3f, %3.3f]" %(p2[0], p2[1], p2[2], p2[3], p2[4]), "total:", sum(p2) print
## # A script for segmenting accelerometer data for inferring occupation. # @author: Manar Safi ## import pandas as pd import numpy as np import interpolate as ip PARALLEL = True CORES = 4 accel = pd.read_csv('./data/in.csv.pprc') accel = ip.interpolate(accel, PARALLEL, CORES) accel.to_csv('./data/out.csv.pprc', index=False)