def random_xy_generator(count, x_min, x_max, y_min, y_max): data = [] for i in range(count): x = _rand(x_min, x_max) y = _rand(y_min, y_max) data.append((x, y)) return data
def func(self, t): if t == 0: return 0 elif t == 1: return 1 else: return _rand()
def random(x, y): from random import random as _rand m, n = np.shape(x) scores = np.zeros((m, 1)) for i in range(m): scores[i] = _rand() return scores, None
def random_values_generator(*, v_start=0.1, count=float('inf'), amp=0.2, v_min=None, v_max=None): if 0.0 in [v_start, v_min, v_max]: raise ValueError('start, min, and max values cannot be zero') v_start = float(v_start) if v_min is None: v_min = v_start / 10 if v_max is None: v_max = v_start * 10 v_min, v_max = float(v_min), float(v_max) if not v_min <= v_start <= v_max: raise ValueError('start not between min and max values') i = 0 v = v_start while i < count: i += 1 v = max(v_min, min(v_max, v + amp * v * (_rand() - _rand()))) if v == 0.0: v = max(v_min, min(v_max, ZERO)) yield v
def refract(self, ray, intersect, n): if _rand() < self.__frc: # Ray just passes through ray.pos = intersect else: super().refract(ray, intersect, n)
for n in range(-nl,nr+1): sumV += f[n+nl] * y[i+n] g.append(sumV) return g if len(self.filter) > len(y): raise TypeError("timeseries size must be bigger than size of filter") # pad the signal at the extremes with values taken from the signal itself # OLD padding: # firstvals = mattranslate(matabs(mattranslate(y[1:self.nright+1][::-1],-y[0])),y[0]) # lastvals = mattranslate(matabs(mattranslate(y[-self.nleft-1:-1][::-1], - y[-1])),y[-1]) # NEW padding: should preserve 1st order derivative at extremes ystart = y[0] #sum(y[0:int(self.nright/4)]) yend = y[-1] #sum(y[-int(self.nleft/4):-1]) firstvals = mattranslate(matscalarprod(mattranslate(y[1:self.nright+1][::-1],-ystart), -1),ystart) lastvals = mattranslate(matscalarprod(mattranslate(y[-self.nleft-1:-1][::-1], - yend), -1),yend) y = firstvals + y + lastvals # convolve the padded signal with the filter list return fconv(self.filter, y, self.nleft, self.nright) if __name__ == '__main__': from math import sin as _sin from random import random as _rand TS = [_rand()*0.05+_sin(2*3.14*j/(25+175*j/1000)) for j in range(1000)] print TS SGfilter = Savgol(nleft=16, nright=16, order=4, deriv=0) print SGfilter.filterTS(TS) SGfilter = Savgol(nleft=16, nright=16, order=4, deriv=1) print SGfilter.filterTS(TS)
def join_rsid(rsids, dbsnp_file, outfile, sort=True, as_df=False): """Use linux join to create a lookup table of rsids. Args: rsids (str/list): List of rsids as a file name (string), list of rsid, or Series. dbsnp_file (str): The dbsnp lookup file from make_lookup_tables. should be the .rslookup.rs_sort.txt file (zipped ok) outfile (str): Name of outfile to write to sort (bool): Pre sort the rsids as_df (bool): Return a dataframe Writes: A tab separated table of rsid, chrom, start, end for all rsids. Returns: DataFrame: Dataframe of written table, only returned if as_df is true. """ if isinstance(rsids, pd.core.series.Series): rsids = rsids.tolist() if isinstance(rsids, (list, tuple, set)): rsids = sorted(list(set(rsids))) tmpfile = outfile + '.rsids.tmp' with open(tmpfile, 'w') as fout: fout.write('\n'.join(rsids)) rsids = tmpfile else: tmpfile = None rsids = _os.path.abspath(rsids) outfile = _os.path.abspath(outfile) if sort: print('Sorting') cat = 'zcat' if rsids.endswith('gz') else 'cat' tmpfile = 'tmpsort_{}'.format(_rand(1000, 20000)) script = r"""{cat} {rsids} | sort > {tmp}; mv {tmp} {rsids}""" _call(script.format(cat=cat, rsids=rsids, tmp=tmpfile), shell=True) print('Joining') script = r"""join {rsids} {dbsnp} > {outfile}""" try: _call(script.format(rsids=rsids, dbsnp=dbsnp_file, outfile=outfile), stderr=_STDOUT, shell=True, universal_newlines=True) except _call_err as exc: print("Status : FAIL", exc.returncode, exc.output) raise exc print('Done, file {} has the joined list'.format(outfile)) if as_df: print('Getting DataFrame') try: df = pd.read_csv(outfile, sep=' ', header=None, index_col=0) except pd.io.common.EmptyDataError: print('Joined file empty, skipping') return None df.index.name = None df.columns = ['chrom', 'start', 'end'] return df