def part2(busses): step = 1 time = 0 for offset, bus in busses: while ((time + offset) % bus != 0): time += step step = np.lcm(bus, step) return (time)
def compute(M,N): m,n,p,q = len(M), len(M[0]), len(N), len(N[0]) s = np.lcm(n,p) i1,i2 = s//n,s//p I1 = np.identity(i1) I2 = np.identity(i2) MI = np.kron(M,I1) NI = np.kron(N,I2) return np.matmul(MI,NI)
def createCsv(sampleRate, filename, wave): rpts = int(np.lcm(len(wave), 128)/len(wave)) f = open (filename, 'w') f.write("SampleRate={}\n".format(int(sampleRate))) f.write("SetConfig=true\n") f.write("Y1\n") for sample in np.tile(wave, rpts): f.write("{}\n".format(sample)) f.close()
def run2(self): first_bus_id = None bus_ids_dict = {} diff_from_first_bus = 0 with open(self.path, "r") as f: line_number = 0 for line in f: line = line.strip() if line_number == 0: my_timestamp = int(line) elif line_number == 1: for id in line.split(","): if id != "x": if first_bus_id is None: first_bus_id = int(id) bus_ids_dict[int(id)] = diff_from_first_bus diff_from_first_bus += 1 else: raise Exception(f"Invalid line number {line_number}") line_number += 1 largest_bus_id = max(bus_ids_dict.keys()) largest_diff_from_first_bus = bus_ids_dict[largest_bus_id] bus_ids_sorted = list(bus_ids_dict.keys()) bus_ids_sorted.sort(reverse=True) bus_ids_sorted.remove(largest_bus_id) time_differences_sorted = [] for bus_id in bus_ids_sorted: time_differences_sorted.append(bus_ids_dict[bus_id]) starting_timestamp = int(my_timestamp / largest_bus_id) * largest_bus_id largest_factor = bus_ids_sorted[0] largest_factor_start = self.get_first_match(starting_timestamp, largest_bus_id, largest_factor, time_differences_sorted[0] - largest_diff_from_first_bus) bus_ids_sorted.remove(bus_ids_sorted[0]) time_differences_sorted.remove(time_differences_sorted[0]) timestamp = largest_factor_start while bus_ids_sorted: firsts_timestamp = timestamp - largest_diff_from_first_bus while bus_ids_sorted: bus_id = bus_ids_sorted[0] time_difference = time_differences_sorted[0] if (firsts_timestamp + time_difference) % bus_id != 0: break else: largest_factor = np.lcm(largest_factor, bus_id, dtype='int64') bus_ids_sorted.remove(bus_ids_sorted[0]) time_differences_sorted.remove(time_differences_sorted[0]) if not bus_ids_sorted: print(f"part2: {timestamp - largest_diff_from_first_bus}") break timestamp += largest_bus_id*largest_factor
def test_dygraph(self): paddle.disable_static() x1 = paddle.to_tensor(self.x_np) x2 = paddle.to_tensor(self.y_np) result = paddle.lcm(x1, x2) self.assertEqual( np.allclose(np.lcm(self.x_np, self.y_np), result.numpy()), True) paddle.enable_static()
def ajax(): form = InputForm() if form.validate_on_submit(): x = form.x.data y = form.y.data result = str(np.lcm(x, y)) # calculate & parse to string for jsonify return jsonify(result=result) return jsonify(result=form.errors)
def createMat(sampleRate, filename, wave): rpts = int(np.lcm(len(wave), 128)/len(wave)) XDelta = 1 / sampleRate matparams = {'InputZoom':[[1]], 'XDelta':XDelta, 'XStart':[[0]], 'Y': np.tile(wave, rpts)} sio.savemat(filename, matparams)
def diff_resize_area(tensor, new_height_width): """Performs a resize op that passes gradients evenly. The tensor goes through a resize and pool where the resize and pool operations are determined by the Least Common Multiplier. Since resize with nearest_neighbors and avg_pool distributes the gradients from the output to input evenly, there's less of a chance of learning artifacts. First we resize to the LCM then avg_pool to new_height_width. This resize operation is only efficient in cases where LCM is small. This is typically the case when upsampling or downsampling by a factor of 2 (e.g H = 0.5 * new_H). Args: tensor: a tensor of shape [B, H, W, D] new_height_width: A tuple of length two which specifies new height, width respectively. Returns: The resize area tensor [B, H_new, W_new, D]. Raises: RuntimeError: If the LCM is larger than 10 x new_height_width, then raise an error to prevent inefficient memory usage. """ new_h, new_w = new_height_width unused_b, curr_h, curr_w, unused_d = tensor.shape.as_list() # The least common multiplier used to determine the intermediate resize # operation. l_h = np.lcm(curr_h, new_h) l_w = np.lcm(curr_w, new_w) if l_h == curr_h and l_w == curr_w: im = tensor elif (l_h < (10 * new_h) and l_w < (10 * new_w)): im = tf.compat.v1.image.resize_bilinear(tensor, [l_h, l_w], half_pixel_centers=True) else: raise RuntimeError("DifferentiableResizeArea is memory inefficient" "for resizing from (%d, %d) -> (%d, %d)" % (curr_h, curr_w, new_h, new_w)) lh_factor = l_h // new_h lw_factor = l_w // new_w if lh_factor == lw_factor == 1: return im return tf.nn.avg_pool2d(im, [lh_factor, lw_factor], [lh_factor, lw_factor], padding="VALID")
def simulate_moons_2(moons): i = 0 initial_state = [state(moons, x) for x in range(3)] cycle_len = [0, 0, 0] while (cycle_len[0] == 0 or cycle_len[1] == 0 or cycle_len[2] == 0): step(moons) i += 1 if i % 100 == 0: print(i, cycle_len) for d in range(3): if cycle_len[d] == 0 and state(moons, d) == initial_state[d]: cycle_len[d] = i print(cycle_len) print(cycle_len) result = np.lcm(cycle_len[0], np.lcm(cycle_len[1], cycle_len[2])) return result
def calculate_greatest_common_denominator(denominators): GCD = 0 if len(denominators) == 1: return denominators else: for i in range(len(denominators) - 1): cur_GCD = np.lcm(denominators[i], denominators[i + 1]) if cur_GCD > GCD: GCD = cur_GCD return GCD
def np_LCM(): x = 4 y = 6 print(np.lcm(x, y)) arr = np.array([3, 6, 9]) x = np.lcm.reduce(arr) print(x) arr = np.arange(1, 10) x = np.lcm.reduce(arr) print(x)
def calculate_digit(sum_cache, i, int_signal, times=1): window_length = ((i + 1) * 4) reminder = len(int_signal) % window_length if reminder == 0: fft = signle_cycle_fft(sum_cache, i, int_signal, window_length) return (fft * times) % 10 else: cycle = np.lcm(reminder, window_length) / reminder remaining_times = times % cycle effective_signal = int_signal * int(remaining_times) return signle_cycle_fft(sum_cache, i, effective_signal, window_length)
def cancel(arr, i, j, k): a = arr[j][i] b = arr[j][k] if b != 0: try: lcm = int(numpy.lcm(a, b)) except: return [] for t in range(len(arr)): arr[t].row[k] = arr[t][k] * (lcm // b) - arr[t][i] * (lcm // a)
def adjust_block_compatibility(self, ws, bs, gs): """Adjusts the compatibility of widths, bottlenecks, and groups.""" assert len(ws) == len(bs) == len(gs) assert all(w > 0 and b > 0 and g > 0 for w, b, g in zip(ws, bs, gs)) vs = [int(max(1, w * b)) for w, b in zip(ws, bs)] gs = [int(min(g, v)) for g, v in zip(gs, vs)] ms = [np.lcm(g, b) if b > 1 else g for g, b in zip(gs, bs)] vs = [max(m, int(round(v / m) * m)) for v, m in zip(vs, ms)] ws = [int(v / b) for v, b in zip(vs, bs)] assert all(w * b % g == 0 for w, b, g in zip(ws, bs, gs)) return ws, bs, gs
def combine(buses: List[Tuple[int, int]]) -> Tuple[int, int]: *buses_to_combine, (bus_time_period, wait_time) = buses if not buses_to_combine: return 0, bus_time_period curr_time, time_period = combine(buses_to_combine) while True: if get_wait_time(curr_time, bus_time_period) == wait_time: return curr_time, numpy.lcm(time_period, bus_time_period) curr_time += time_period
def solve(data=None): """ Simulate the motion of the moons in time steps. Within each time step, first update the velocity of every moon by applying gravity. Then, once all moons' velocities have been updated, update the position of every moon by applying velocity. Time progresses by one step once all of the positions are updated. """ moons = parse_input(data) x_moons = [] y_moons = [] z_moons = [] for moon in moons: x_moons.append(Moon(Point(moon.position[0]), Point(moon.velocity[0]))) y_moons.append(Moon(Point(moon.position[1]), Point(moon.velocity[1]))) z_moons.append(Moon(Point(moon.position[2]), Point(moon.velocity[2]))) x = find_loop(x_moons) y = find_loop(y_moons) z = find_loop(z_moons) return np.lcm(x, np.lcm(y, z))
def main(): pos = np.array(lmap(parse, read_lines()), dtype=np.int32) vel = np.zeros_like(pos, np.int32) initial_pos = pos.copy() periods = np.zeros(pos.shape[1], np.int64) periods_left = list(range(pos.shape[1])) assert len(periods_left) == 3 for i in range(1000): gravity = compute_gravity(pos) vel += gravity pos += vel vel = np.abs(vel) pos = np.abs(pos) energy = np.sum(vel.sum(axis=1) * pos.sum(axis=1)) print("1.)", energy) pos = initial_pos.copy() vel = np.zeros_like(pos) for i in range(240000): gravity = compute_gravity(pos) vel += gravity pos += vel period_found = None for j in periods_left: if np.all(pos[:, j] == initial_pos[:, j]): period_found = j periods[j] = i + 1 break # hopefully no similar periods if period_found is not None: periods_left.remove(period_found) if not periods_left: break periods += 1 # next step starts the period lcm = np.lcm(periods[0], np.lcm(periods[1], periods[2])) print("2.)", lcm)
def execute(self, context): r = round(self.r, 6) R = round(self.R, 6) d = round(self.d, 6) Rmr = round(R - r, 6) # R-r Rpr = round(R + r, 6) # R +r Rpror = round(Rpr / r, 6) # (R+r)/r Rmror = round(Rmr / r, 6) # (R-r)/r maxangle = 2 * math.pi * ( (np.lcm(round(self.R * 1000), round(self.r * 1000)) / (R * 1000))) if self.typecurve == "hypo": xstring = str(Rmr) + "*cos(t)+" + str(d) + "*cos(" + str( Rmror) + "*t)" ystring = str(Rmr) + "*sin(t)-" + str(d) + "*sin(" + str( Rmror) + "*t)" else: xstring = str(Rpr) + "*cos(t)-" + str(d) + "*cos(" + str( Rpror) + "*t)" ystring = str(Rpr) + "*sin(t)-" + str(d) + "*sin(" + str( Rpror) + "*t)" zstring = '(' + str(round( self.dip, 6)) + '*(sqrt(((' + xstring + ')**2)+((' + ystring + ')**2))))' print("x= " + str(xstring)) print("y= " + str(ystring)) print("z= " + str(zstring)) print("maxangle " + str(maxangle)) x = Expression(xstring, ["t"]) # make equation from string y = Expression(ystring, ["t"]) # make equation from string z = Expression(zstring, ["t"]) # make equation from string # build function to be passed to create parametric curve () def f(t, offset: float = 0.0): c = (x(t), y(t), z(t)) return c iter = int(maxangle * 10) if iter > 10000: # do not calculate more than 10000 points print("limiting calculatons to 10000 points") iter = 10000 parametric.create_parametric_curve(f, offset=0.0, min=0, max=maxangle, use_cubic=True, iterations=iter) return {'FINISHED'}
def get_output_digit(self, d): kernel_period = 4 * (d + 1) read_length = lcm(self.period, kernel_period) full_reads = self.data.length // read_length frag_reads = self.data.length % read_length out = 0 if 1 <= full_reads <= 9: for j in range(min(read_length, self.data.length)): out += self.data[j] * self.kernel[(j + 1) // (d + 1)] for j in range(frag_reads): out += self.data[j] * self.kernel[(j + 1) // (d + 1)] return abs(out) % 10
def generate(p=number.getPrime(randint(660, 700)), q=number.getPrime(randint(660, 700))): n = p * q phi = (p - 1) * (q - 1) carm_func = np.lcm(p - 1, q - 1) while True: e = randint(2, carm_func) if np.gcd(e, carm_func) == 1: break d = int(modinv(e, phi)) return (n, e, d)
def part_two(timetable: list, max_iter: int) -> int: num1 = timetable[0] # The step can get very large, np uses int32 by default, make sure it is initialized as int64 step = np.int64(num1) counter = 0 for num2 in timetable: # Find the LCM with offset for the previous aggregated number and the new one num1 = get_lcm(num1, num2, counter, step, max_iter) # Calculate the step for the next LCM calculation (major speed-up) step = np.lcm(step, num2) counter = counter + 1 return num1
def __abc_smc_plotting(fig: plt.Figure, y_obs: [[float]], priors: ["stats.Distribution"], fitting_model: Models.Model, model_hat: Models.Model, accepted_params: [[float]], weights: [float]) -> plt.Figure: n_params = (fitting_model.n_params - 2) if ( type(fitting_model) is Models.SIRModel) else fitting_model.n_params n_rows = max([1, np.lcm(n_params, fitting_model.dim_obs)]) gs = fig.add_gridspec(n_rows, 2) # plot fitted model row_step = n_rows // fitting_model.dim_obs for i in range(fitting_model.dim_obs): ax = fig.add_subplot(gs[i * row_step:(i + 1) * row_step, -1]) y_obs_dim = [y[i] for y in y_obs] Plotting.plot_accepted_observations(ax, fitting_model.x_obs, y_obs_dim, [], model_hat, dim=i) row_step = n_rows // n_params if (type(fitting_model) is Models.SIRModel): for i in range(2, fitting_model.n_params): ax = fig.add_subplot(gs[(i - 2) * row_step:(i - 2 + 1) * row_step, 0]) name = "theta_{}".format(i) accepted_parameter_values = [theta[i] for theta in accepted_params] Plotting.plot_parameter_posterior( ax, name, accepted_parameter_values, predicted_val=model_hat.params[i], prior=priors[i], dim=i, weights=weights) else: for i in range(fitting_model.n_params): ax = fig.add_subplot(gs[i * row_step:(i + 1) * row_step, 0]) name = "theta_{}".format(i) parameter_values = [theta[i] for theta in accepted_params] Plotting.plot_parameter_posterior( ax, name, accepted_parameter_values, predicted_val=model_hat.params[i], prior=priors[i], dim=i, weights=weights) return fig
def main(): # Command line arguments process_args() # Load vr data vr_data = preprocessing.load_vr_file(vr_file) vr_vec = vr_data.frames[:, 1] vr_fps = vr_data.fps vr_vec = (vr_vec - np.mean(vr_vec)) / np.std(vr_vec) # Load mocap data mocap_data = preprocessing.load_mocap_file_helper(open(mocap_file, "r")) mocap_vec = mocap_data.frames[:, 1] mocap_fps = mocap_data.fps mocap_vec = (mocap_vec - np.mean(mocap_vec)) / np.std(mocap_vec) # Upsample both sequences fps = np.lcm(vr_fps, mocap_fps) vr_vec_upsampled = np.interp(np.arange(0, vr_vec.size, vr_fps / fps), np.arange(vr_vec.size), vr_vec) mocap_vec_upsampled = np.interp( np.arange(0, mocap_vec.size, mocap_fps / fps), np.arange(mocap_vec.size), mocap_vec) predicted_offset = get_temporal_offset(mocap_vec_upsampled, vr_vec_upsampled, fps, 0, 10) print("Predicted Offset:", predicted_offset) #""" start, end, window, gap = 0, 60, 1, 30 offsets = [] while end < vr_vec.size / vr_fps: offset = get_temporal_offset(mocap_vec_upsampled, vr_vec_upsampled, fps, predicted_offset, 10, start=start, end=end) offsets.append(offset) start, end = start + gap, end + gap variability = np.array(offsets) - predicted_offset print("Variability:", variability) print("Average variability: ", np.mean(variability)) #""" plt.plot( np.arange(mocap_vec.size) / mocap_fps + predicted_offset, mocap_vec, 'b') plt.plot(np.arange(vr_vec.size) / vr_fps, vr_vec, 'r') plt.show()
def get_minimum_sample_time(self): """ Method to generate a minimum time-array as long as is needed to be to garantie every tooth mesh combination has been considered. """ # Get meshing time between two tooth time2tooth = (1 / self.rotational_frequency_in) / self.GearPropIn['no_teeth'] # Get lowest common multiple toothmeshlcm = np.lcm(self.GearPropIn['no_teeth'], self.GearPropOut['no_teeth']) min_time = time2tooth * toothmeshlcm return(min_time)
def part2(data): data = [(d, idx) for idx, d in enumerate(data) if d] guess, skip = data[0][0], data[0][0] idx = 1 total_len = len(data) while idx < total_len: x, i = data[idx] while (guess + i) % x != 0: guess += skip skip = np.lcm(skip, data[idx][0]) idx += 1 return guess
def löse2(self): kgv_old = kgv = uint64(1) id = dt = time = uint64(0) for dt, id in self.ids2: dt = dt % id # SollWartezeit > BusID == "ZyklusZeit des Busses" kgv_old = kgv kgv = lcm( kgv, uint64(id) ) #least common multiple from numpy (10 times faster as native *) for _ in range(id): if ((id - time) % id) == dt: break time += kgv_old return time
def adjust_block_compatibility(ws, bs, gs): """Adjusts the compatibility of widths, bottlenecks, and groups.""" assert len(ws) == len(bs) == len(gs) assert all(w > 0 and b > 0 and g > 0 for w, b, g in zip(ws, bs, gs)) vs = [int(max(1, w * b)) for w, b in zip(ws, bs)] # make sure widths not smaller than groups gs = [int(min(g, v)) for g, v in zip(gs, vs)] ms = [np.lcm(g, b) if b > 1 else g for g, b in zip(gs, bs)] # make suer that widths in bottlenecks are common multiple of bs and gs vs = [max(m, int(round(v / m) * m)) for v, m in zip(vs, ms)] ws = [int(v / b) for v, b in zip(vs, bs)] assert all(w * b % g == 0 for w, b, g in zip(ws, bs, gs)) return ws, bs, gs
def __sub__(self, other): # frac1-frac2, frac-int if not isinstance(other, (Frac, int)): raise ValueError("Invalid value format, it must be Frac or Integer value.") if isinstance(other, Frac): if self.y == other.y: self.x -= other.x return Frac(self.x, self.y) lcm = np.lcm(self.y, other.y) return Frac(self.x * (lcm // self.y) - other.x * (lcm // other.y), lcm) if isinstance(other, int): return Frac(self.x - self.y * other, self.y)
def generate_keys(): global private_key, n, g # hard code these # set these to be higher than SSS p = 5 q = 7 private_key = numpy.lcm(p - 1, q - 1) n = p * q g = n + 1 print("n", n) print("g", g) return n, g
def AWG_Sinewave(ifreq, Ioffset, Qoffset, Iamp, Qamp, Iphase, Qphase): ''' ifreq: IF frequency in MHz ''' AWG.Clear_ArbMemory(awgsess) WAVE = [] ifvoltag = [min(abs(Qamp), 1), min(abs(Iamp), 1)] # contain amplitude within 1V iffunction = ['sin', 'cos'] iffreq = [ifreq, ifreq] ifoffset = [Qoffset, Ioffset] ifphase = [Qphase, Iphase] # construct waveform: for ch in range(2): channel = str(ch + 1) Nperiod = lcm(round(1000 / iffreq[ch] / dt * 100), 800) // 100 print("Waveform contains %s points per sequence" % Nperiod) wavefom = [ ifvoltag[ch] * eval(iffunction[ch] + '(x*%s*%s/1000*2*pi + %s/180*pi)' % (dt, iffreq[ch], ifphase[ch])) + ifoffset[ch] for x in range(Nperiod) ] stat, wave = AWG.CreateArbWaveform(awgsess, wavefom) # print('Waveform channel %s: %s <%s>' %(channel, wave, status_code(stat))) WAVE.append(wave) # Building Sequences: for ch in range(2): channel = str(ch + 1) status, seqhandl = AWG.CreateArbSequence( awgsess, [WAVE[ch]], [1]) # loop# canbe >1 if longer sequence is needed in the future! # print('Sequence channel %s: %s <%s>' %(channel, seqhandl, status_code(status))) # Channel Assignment: stat = AWG.arb_sequence_handle(awgsess, RepCap=channel, action=["Set", seqhandl]) # print('Sequence channel %s embeded: %s <%s>' %(channel, stat[1], status_code(stat[0]))) # Trigger Settings: for ch in range(2): channel = str(ch + 1) AWG.operation_mode(awgsess, RepCap=channel, action=["Set", 0]) AWG.trigger_source_adv(awgsess, RepCap=channel, action=["Set", 0]) AWG.Init_Gen(awgsess) AWG.Send_Pulse(awgsess, 1) return