def propose(self): if self._dist == 'RoundedNormal': self.parameter.value = int( round(rnormal(self.parameter.value, self.proposal_sig))) # Default to normal random-walk proposal else: self.parameter.value = rnormal(self.parameter.value, self.proposal_sig)
def propose(self): # Propose new values using normal distribution if self.proposal_distribution == "Normal": # New normal deviate, centred on current value new_val = rnormal(self.stochastic.value, self.adaptive_scale_factor * self.proposal_sd) # Round before setting proposed value self.stochastic.value = round_array(new_val) elif self.proposal_distribution == "Poisson": k = shape(self.stochastic.value) # Add or subtract (equal probability) Poisson sample new_val = self.stochastic.value + rpoisson(self.adaptive_scale_factor * self.proposal_sd) * (-ones(k))**(random(k)>0.5) if self._positive: # Enforce positive values self.stochastic.value = abs(new_val) else: self.stochastic.value = new_val elif self.proposal_distribution == "Prior": self.stochastic.random()
def hop(self): """Hop proposal kernel""" if self.verbose>1: print '\t' + self._id + ' Running Hop proposal kernel' # Mask for values to move phi = self.phi if self._prime: xp, x = self.values else: x, xp = self.values if self.verbose>1: print '\t' + 'Current value of x = ' + str(x) sigma = max(phi*abs(xp - x))/3.0 x = (xp + sigma*rnormal())*phi + x*(phi==False) if self.verbose>1: print '\t' + 'Proposed value = ' + str(x) self.hastings_factor = self._g(x, xp, sigma) - self._g(self.stochastic.value, xp, sigma) self.stochastic.value = x
def blow(self): """Blow proposal kernel""" if self.verbose>1: print '\t' + self._id + ' Running Blow proposal kernel' # Mask for values to move phi = self.phi if self._prime: xp, x = self.values else: x, xp = self.values if self.verbose>1: print '\t' + 'Current value ' + str(x) sigma = max(phi*abs(xp - x)) x = x + phi*sigma*rnormal() if self.verbose>1: print '\t' + 'Proposed value = ' + str(x) self.hastings_factor = self._g(x, xp, sigma) - self._g(self.stochastic.value, xp, sigma) self.stochastic.value = x
def propose(self): """ This method is called by step() to generate proposed values if self.proposal_distribution is "Normal" (i.e. no proposal specified). """ if self.proposal_distribution == "Normal": self.stochastic.value = rnormal(self.stochastic.value, self.adaptive_scale_factor * self.proposal_sd) elif self.proposal_distribution == "Prior": self.stochastic.random()
def random(self, size=None): mu = self.mu lam = self.lam alpha = self.alpha v = rnormal(loc=0., scale=1., size=size)**2 x = mu + (mu**2) * v / (2. * lam) - mu / ( 2. * lam) * sqrt(4. * mu * lam * v + (mu * v)**2) z = runiform(low=0., high=1., size=size) # i = z > mu / (mu + x) # x[i] = (mu**2) / x[i] i = floor(z - mu / (mu + x)) * 2 + 1 x = (x**-i) * (mu**(i + 1)) return x + alpha
def propose(self): """ Proposals for positive definite matrix using random walk deviations on the Cholesky factor of the current value. """ # Locally store size of matrix dims = self.stochastic.value.shape # Add normal deviate to value and symmetrize dev = rnormal(0, self.adaptive_scale_factor * self.proposal_sd, size=dims) symmetrize(dev) # Replace self.stochastic.value = dev + self.stochastic.value
def random(self, size=None): u = runiform(low=0., high=1., size=size) n = rnormal(self.mu, self.sigma, size=size) return n - self.nu * log(u)
def cross_render_sample_jitter( target_sample, source_sample, grain_duration=0.01, grain_dur_jitter=0.0, grain_interval_jitter=0.0, grain_jitter=0.0, # proportional offset grain centers from each other density=4.0, verbose=0, code_size=4, hop_length=1024, frame_length=4096, delay_step_size=8, n_delays=128, source_anchor='left', # or 'center', not actually implemented dest_anchor='center', # or 'left' window='hann', n_workers=1, # parallelism chunk_size=None, # paralleism sync seamless=False, seed=None, random_flip=True, **kwargs): """ random everything; but all grains from a single fit share the same target location. When anchor is 'center', grain locations are centres. """ rseed(seed) if chunk_size is None: chunk_size = n_workers * 4 output_sample = sample.zero_sample(duration=target_sample.duration(), sr=target_sample.sr, stem=source_sample.stem, parent_path=target_sample.parent_path) output_sample.append_stem('_x_') output_sample.append_stem(target_sample.stem) kwarg_fn = timed_stream.DictStream(kwargs) inter_onset_period = grain_duration / density dest_high = target_sample.duration() approx_last_step = dest_high / inter_onset_period if verbose >= 10: print("gp", inter_onset_period, density, grain_duration, code_size) print('dest_high', dest_high) if verbose >= 2: print('approx_last_step', approx_last_step) param_list = [ (step, target_t, kwarg_fn(target_t)) for step, target_t in enumerate( pattern_iter.gamma_renewal_proc_scale(interval=inter_onset_period, var=inter_onset_period * grain_interval_jitter**2, high=dest_high, verbose=verbose)) ] # I chunk this for intermediate results: # https://stackoverflow.com/a/43085080 with Parallel(n_jobs=n_workers, verbose=verbose * 3) as pool: step = 0 target_t_prev = 0.0 try: for chunk in chunker(iter(param_list), chunk_size): for local_step, match in enumerate( pool([ delayed(match_from_sample)( target_sample=target_sample, source_sample=source_sample, target_t=target_t, verbose=verbose, hop_length=hop_length, frame_length=frame_length, delay_step_size=delay_step_size, n_delays=n_delays, anchor=source_anchor, window=window, code_size=code_size, **kw) for step, target_t, kw in chunk ])): grains = match.grains() ac_gains = match.gains() rates = match.rates() # if verbose >= 2: # elapsed = time() - start_time() # print( # "step", step+1, # "/", approx_last_step, ",", # elapsed, "/", # elapsed * (step+1)/approx_last_step) target_t = match.target_t target_t_inc = target_t - target_t_prev target_t_prev = target_t if verbose >= 3: print( "step", step, local_step, 't {:.5f}+{:.5f}'.format(target_t, target_t_inc), 'loss {:.5f}*{:.5f}'.format( match.loss, match.scale)) if verbose >= 16: print("match ", match) for grain, ac_gain, rate in zip(grains, ac_gains, rates): if ac_gain == 0.0: continue gain = sqrt(abs(ac_gain * rate)) if random_flip: gain = gain * (2 * randint(2) - 1) dest_t = match.target_t + np.asscalar( rnormal(loc=0.0, scale=grain_jitter, size=1), ) if seamless: base_duration = target_t_inc else: base_duration = inter_onset_period this_grain_duration = min( gamma_v(base_duration * density, grain_dur_jitter * base_duration * density), 5.0) if verbose >= 6: print( "grain", 'dest {:.5f}'.format(dest_t), 'duration {:.5f}*{:.5f}'.format( this_grain_duration, this_grain_duration / target_t_inc)) if verbose >= 19: print("is", grain, gain, rate) sample.overdub_t( dest_sample=output_sample, source_sample=grain.sample, dest_t=dest_t, source_t=grain.t, duration=this_grain_duration, rate=rate, mul=gain, window=window, ) step += 1 except StopIteration as e: print('some iterator broke {}'.format(e)) traceback.print_tb(e.__traceback__) except KeyboardInterrupt: print('stopping early') return output_sample
def cross_render_sample_adaptive( target_sample, source_sample, density=2.0, verbose=0, code_size=4, hop_length=1024, frame_length=4096, delay_step_size=8, grain_jitter=0.0, # proportional offset grain centers from each other n_delays=128, source_anchor='center', # or 'left', maybe not actually implemented? dest_anchor='center', # or 'left' analysis_window='cosine', synthesis_window='hann', seed=None, random_flip=True, match_rtol=0.01, # search early stopping parameter; not implemented progress=False, **kwargs): """ Fit one new match at a time. """ rseed(seed) sr = target_sample.sr output_sample = sample.zero_sample(duration=target_sample.duration(), sr=sr, stem=source_sample.stem, parent_path=target_sample.parent_path) output_sample.append_stem('_x_') output_sample.append_stem(target_sample.stem) kwarg_fn = timed_stream.DictStream(kwargs) grain_duration = frame_length / sr inter_onset_period = grain_duration / density dest_high = target_sample.duration() approx_last_step = dest_high / inter_onset_period if verbose >= 10: print("gp", inter_onset_period, density, grain_duration, code_size) print('dest_high', dest_high) if verbose >= 2: print('approx_last_step', approx_last_step) step = 0 target_t_prev = 0.0 target_t = 0.0 target_i_prev = 0 target_i = 0 early_stop = False while target_i < output_sample.end: try: target_i += hop_length target_t = target_i / sr kw = kwarg_fn(target_t) match = match_from_sample(target_sample=target_sample, source_sample=source_sample, target_t=target_t, hop_length=hop_length, frame_length=frame_length, delay_step_size=delay_step_size, n_delays=n_delays, anchor=source_anchor, window=analysis_window, code_size=code_size, match_rtol=match_rtol, verbose=verbose, **kw) grains = match.grains() ac_gains = match.gains() rates = match.rates() # if verbose >= 2: # elapsed = time() - start_time() # print( # "step", step+1, # "/", approx_last_step, ",", # elapsed, "/", # elapsed * (step+1)/approx_last_step) target_t_inc = target_t - target_t_prev target_t_prev = target_t target_i_prev = target_i if verbose >= 3: print("step", step, 't {:.5f}+{:.5f}'.format(target_t, target_t_inc), 'loss {:.5f}*{:.5f}'.format(match.loss, match.scale)) if verbose >= 16: print("match ", match) for g_i, grain, ac_gain, rate in zip(range(len(grains)), grains, ac_gains, rates): if ac_gain == 0.0: continue gain = sqrt(abs(ac_gain * rate)) if random_flip: gain = gain * (2 * randint(2) - 1) dest_t = target_t + np.asscalar( rnormal( loc=0.0, scale=grain_duration * grain_jitter, size=1)) this_grain_duration = target_t_inc * density if verbose >= 8: print( "subgrain", g_i, 'dest {:.5f}'.format(dest_t), 'gain {:.5f}'.format(gain), 'rate {:.5f}'.format(rate), 'duration {:.5f}*{:.5f}'.format( this_grain_duration, this_grain_duration / target_t_inc)) if verbose >= 19: print("is", grain, gain, rate) landscape = np.copy( output_sample.get_audio_data_t( dest_t, duration=this_grain_duration, anchor="center")) sample.overdub_t( dest_sample=output_sample, source_sample=grain.sample, dest_t=dest_t, source_t=grain.t, duration=this_grain_duration, rate=rate, mul=gain, window=synthesis_window, source_anchor=source_anchor, dest_anchor=dest_anchor, verbose=verbose, ) if verbose >= 6: y_so_far = output_sample.get_audio_data_t( 0, target_t + this_grain_duration) print( "glitch", np.percentile(np.abs(np.gradient(y_so_far)), [50, 95, 99, 99.5])) step += 1 if progress: print(step, '/', approx_last_step) except StopIteration as e: print('some iterator broke {}'.format(e)) traceback.print_tb(e.__traceback__) except KeyboardInterrupt: print('stopping early at {} -- {}'.format(step, target_t)) early_stop = True break return output_sample
from numpy.random import normal as rnormal from numpy import array, arange, sin, cos, pi, identity, matmul, zeros, cov from numpy.linalg import inv from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt del_t = 0.1 t = arange(0, 1000, del_t) x = .002*t + rnormal(0,.5,10000) y = .0000003*(t-500)**3 + rnormal(0,.5,10000) z = 4*sin(pi*t/1000) + rnormal(0,.5, 10000) xt = .002*t yt = .0000003*(t-500)**3 zt = 4*sin(pi*t/1000) v_x = array([(x[i] - x[i-1])/del_t for i in xrange(1,len(x))]+[(x[-1]-x[-2])/del_t]) v_y = array([(y[i] - y[i-1])/del_t for i in xrange(1,len(y))]+[(y[-1]-y[-2])/del_t]) v_z = array([(z[i] - z[i-1])/del_t for i in xrange(1,len(z))]+[(z[-1]-z[-2])/del_t]) X = array([x,v_x]) Y = array([y,v_y]) Z = array([z,v_z]) F = array([[1,del_t],[0,1]]) G = array([[del_t**2/2], [del_t]]) Q = matmul(G, G.transpose())*.5**2 Px = identity(2)*5