def __init__(self, img): self.img = img self.ch_range = np.empty( (3) ) # range of RGB color values (default: max range) self.range = [255, 255, 255] self.ch_range[0] = ma.ptp(img[..., 0]) # range of red values self.ch_range[1] = ma.ptp(img[..., 1]) # range of green values self.ch_range[2] = ma.ptp(img[..., 2]) # range of blue value
def checksum_puzzle_1(spreadsheet): # @documentation: All or parts of the documentation is missing! # The example had a row that was shorter than the other ones # this is the workaround for that. The way of reading in the # input shown in the solution to the second puzzle would have # sufficed for the given test (puzzle) input. line_split = spreadsheet.split("\n") column_split = [line.split("\t") for line in line_split] max_line_length = max([len(line) for line in column_split]) for line in column_split: while len(line) < max_line_length: line.append(-1) sheet = ma.masked_equal(ma.array(column_split, dtype=int), -1) checksum = ma.sum(ma.ptp(sheet, axis=1)) return checksum
def measure(mode, x, y, x0, x1): """ return the mean and standard deviation of y in the window x0 to x1 """ xm = ma.masked_outside(x, x0, x1) ym = ma.array(y, mask=ma.getmask(xm)) if mode == 'mean': r1 = ma.mean(ym) r2 = ma.std(ym) if mode == 'max': r1 = ma.max(ym) r2 = 0 if mode == 'min': r1 = ma.min(ym) r2 = 0 if mode == 'median': r1 = ma.median(ym) r2 = 0 if mode == 'p2p': # peak to peak r1 = ma.ptp(ym) r2 = 0 return (r1, r2)
def measure(mode, x, y, x0, x1, thresh=0): """ return the a measure of y in the window x0 to x1 """ xt = x.view(numpy.ndarray) # strip Metaarray stuff -much faster! v = y.view(numpy.ndarray) xm = ma.masked_outside(xt, x0, x1).T ym = ma.array(v, mask=ma.getmask(xm)) if mode == 'mean': r1 = ma.mean(ym) r2 = ma.std(ym) if mode == 'max' or mode == 'maximum': r1 = ma.max(ym) r2 = xm[ma.argmax(ym)] if mode == 'min' or mode == 'minimum': r1 = ma.min(ym) r2 = xm[ma.argmin(ym)] if mode == 'median': r1 = ma.median(ym) r2 = 0 if mode == 'p2p': # peak to peak r1 = ma.ptp(ym) r2 = 0 if mode == 'std': # standard deviation r1 = ma.std(ym) r2 = 0 if mode == 'var': # variance r1 = ma.var(ym) r2 = 0 if mode == 'cumsum': # cumulative sum r1 = ma.cumsum(ym) # Note: returns an array r2 = 0 if mode == 'anom': # anomalies = difference from averge r1 = ma.anom(ym) # returns an array r2 = 0 if mode == 'sum': r1 = ma.sum(ym) r2 = 0 if mode == 'area' or mode == 'charge': r1 = ma.sum(ym) / (ma.max(xm) - ma.min(xm)) r2 = 0 if mode == 'latency': # return first point that is > threshold sm = ma.nonzero(ym > thresh) r1 = -1 # use this to indicate no event detected r2 = 0 if ma.count(sm) > 0: r1 = sm[0][0] r2 = len(sm[0]) if mode == 'count': r1 = ma.count(ym) r2 = 0 if mode == 'maxslope': return (0, 0) slope = numpy.array([]) win = ma.flatnotmasked_contiguous(ym) st = int(len(win) / 20) # look over small ranges for k in win: # move through the slope measurementwindow tb = range(k - st, k + st) # get tb array newa = numpy.array(self.dat[i][j, thisaxis, tb]) ppars = numpy.polyfit( x[tb], ym[tb], 1) # do a linear fit - smooths the slope measures slope = numpy.append(slope, ppars[0]) # keep track of max slope r1 = numpy.amax(slope) r2 = numpy.argmax(slope) return (r1, r2)
def measure(mode, x, y, x0, x1, thresh = 0): """ return the a measure of y in the window x0 to x1 """ xt = x.view(numpy.ndarray) # strip Metaarray stuff -much faster! v = y.view(numpy.ndarray) xm = ma.masked_outside(xt, x0, x1).T ym = ma.array(v, mask = ma.getmask(xm)) if mode == 'mean': r1 = ma.mean(ym) r2 = ma.std(ym) if mode == 'max' or mode == 'maximum': r1 = ma.max(ym) r2 = xm[ma.argmax(ym)] if mode == 'min' or mode == 'minimum': r1 = ma.min(ym) r2 = xm[ma.argmin(ym)] if mode == 'median': r1 = ma.median(ym) r2 = 0 if mode == 'p2p': # peak to peak r1 = ma.ptp(ym) r2 = 0 if mode == 'std': # standard deviation r1 = ma.std(ym) r2 = 0 if mode == 'var': # variance r1 = ma.var(ym) r2 = 0 if mode == 'cumsum': # cumulative sum r1 = ma.cumsum(ym) # Note: returns an array r2 = 0 if mode == 'anom': # anomalies = difference from averge r1 = ma.anom(ym) # returns an array r2 = 0 if mode == 'sum': r1 = ma.sum(ym) r2 = 0 if mode == 'area' or mode == 'charge': r1 = ma.sum(ym)/(ma.max(xm)-ma.min(xm)) r2 = 0 if mode == 'latency': # return first point that is > threshold sm = ma.nonzero(ym > thresh) r1 = -1 # use this to indicate no event detected r2 = 0 if ma.count(sm) > 0: r1 = sm[0][0] r2 = len(sm[0]) if mode == 'count': r1 = ma.count(ym) r2 = 0 if mode == 'maxslope': return(0,0) slope = numpy.array([]) win = ma.flatnotmasked_contiguous(ym) st = int(len(win)/20) # look over small ranges for k in win: # move through the slope measurementwindow tb = range(k-st, k+st) # get tb array newa = numpy.array(self.dat[i][j, thisaxis, tb]) ppars = numpy.polyfit(x[tb], ym[tb], 1) # do a linear fit - smooths the slope measures slope = numpy.append(slope, ppars[0]) # keep track of max slope r1 = numpy.amax(slope) r2 = numpy.argmax(slope) return(r1, r2)
def measure(mode, x, y, x0, x1, thresh=0): """ return the a measure of y in the window x0 to x1 """ xm = ma.masked_outside(x, x0, x1) # .compressed() ym = ma.array(y, mask=ma.getmask(xm)) # .compressed() if mode == 'mean': r1 = np.mean(ym) r2 = np.std(ym) if mode == 'max' or mode == 'maximum': r1 = ma.max(ym) r2 = xm[ma.argmax(ym)] if mode == 'min' or mode == 'minimum': r1 = ma.min(ym) r2 = xm[ma.argmin(ym)] if mode == 'minormax': r1p = ma.max(ym) r1n = ma.min(ym) if ma.abs(r1p) > ma.abs(r1n): r1 = r1p r2 = xm[ma.argmax(ym)] else: r1 = r1n r2 = xm[ma.argmin(ym)] if mode == 'median': r1 = ma.median(ym) r2 = 0 if mode == 'p2p': # peak to peak r1 = ma.ptp(ym) r2 = 0 if mode == 'std': # standard deviation r1 = ma.std(ym) r2 = 0 if mode == 'var': # variance r1 = ma.var(ym) r2 = 0 if mode == 'cumsum': # cumulative sum r1 = ma.cumsum(ym) # Note: returns an array r2 = 0 if mode == 'anom': # anomalies = difference from averge r1 = ma.anom(ym) # returns an array r2 = 0 if mode == 'sum': r1 = ma.sum(ym) r2 = 0 if mode == 'area' or mode == 'charge': r1 = ma.sum(ym) / (ma.max(xm) - ma.min(xm)) r2 = 0 if mode == 'latency': # return first point that is > threshold sm = ma.nonzero(ym > thresh) r1 = -1 # use this to indicate no event detected r2 = 0 if ma.count(sm) > 0: r1 = sm[0][0] r2 = len(sm[0]) if mode == '1090': #measure 10-90% time, also returns max r1 = ma.max(ym) r2 = xm[ma.argmax(ym)] y10 = 0.1 * r1 y90 = 0.9 * r1 sm1 = ma.nonzero(ym >= y10) sm9 = ma.nonzero(ym >= y90) r1 = xm[sm9] - xm[sm1] if mode == 'count': r1 = ma.count(ym) r2 = 0 if mode == 'maxslope': return (0, 0) slope = np.array([]) win = ma.flatnotmasked_contiguous(ym) st = int(len(win) / 20) # look over small ranges for k in win: # move through the slope measurementwindow tb = range(k - st, k + st) # get tb array newa = np.array(self.dat[i][j, thisaxis, tb]) ppars = np.polyfit( x[tb], ym[tb], 1) # do a linear fit - smooths the slope measures slope = np.append(slope, ppars[0]) # keep track of max slope r1 = np.amax(slope) r2 = np.argmax(slope) return (r1, r2)
def inject(time0,flux0,**kw): """ Inject a transit into an existing time series. Parameters ---------- time0 : time series (required). flux0 : flux series (required). phase : Phase of ingress (phase * P = time of ingress) or: epoch : Epoch of mid transit. df : Depth of transit (ppm) s2n : Signal to noise (noise computed in a naive way). tdur : Transit duration (units of days). If not given, compute assuming Keplerian orbit Returns ------- f : the modified time series. """ t = time0.copy() f = flux0.copy() assert kw.has_key('epoch') ^ kw.has_key('phase') ,\ "Must specify epoch xor phase" assert kw.has_key('s2n') ^ kw.has_key('df') ,\ "Must specify s2n xor df" assert kw.has_key('P') , "Must specify Period" P = kw['P'] if kw.has_key('tdur'): tdur = kw['tdur'] else: tdur = a2tdur( P2a(P) ) tbase = ma.ptp(t) tfold = np.mod(t,P) if kw.has_key('s2n'): noise = ma.std(f) df = s2n * noise / np.sqrt( ntpts(P,tdur,tbase,lc) ) else: df = 1e-6*kw['df'] if kw.has_key('epoch'): epoch = kw['epoch'] else: epoch = kw['phase']*P epoch = np.mod(epoch,P) tm = abs( tfold - epoch ) # Time before (or after) midtransit. ie = 0.5*(tdur - lc) oe = 0.5*(tdur + lc) frac = ( tm - ie ) / lc idlo = np.where(tm < ie ) idfr = np.where( (tm > ie ) & (tm < oe) ) f[idlo] -= df f[idfr] -= df*(1-frac[idfr]) return f
def inject(time0, flux0, **kw): """ Inject a transit into an existing time series. Parameters ---------- time0 : time series (required). flux0 : flux series (required). phase : Phase of ingress (phase * P = time of ingress) or: epoch : Epoch of mid transit. df : Depth of transit (ppm) s2n : Signal to noise (noise computed in a naive way). tdur : Transit duration (units of days). If not given, compute assuming Keplerian orbit Returns ------- f : the modified time series. """ t = time0.copy() f = flux0.copy() assert ('epoch' in kw) ^ ('phase' in kw) ,\ "Must specify epoch xor phase" assert ('s2n' in kw) ^ ('df' in kw) ,\ "Must specify s2n xor df" assert 'P' in kw, "Must specify Period" P = kw['P'] if 'tdur' in kw: tdur = kw['tdur'] else: tdur = a2tdur(P2a(P)) tbase = ma.ptp(t) tfold = np.mod(t, P) if 's2n' in kw: noise = ma.std(f) df = s2n * noise / np.sqrt(ntpts(P, tdur, tbase, lc)) else: df = 1e-6 * kw['df'] if 'epoch' in kw: epoch = kw['epoch'] else: epoch = kw['phase'] * P epoch = np.mod(epoch, P) tm = abs(tfold - epoch) # Time before (or after) midtransit. ie = 0.5 * (tdur - lc) oe = 0.5 * (tdur + lc) frac = (tm - ie) / lc idlo = np.where(tm < ie) idfr = np.where((tm > ie) & (tm < oe)) f[idlo] -= df f[idfr] -= df * (1 - frac[idfr]) return f
def measure(mode, x, y, x0, x1, thresh=0): """ return the a measure of y in the window x0 to x1 """ xm = ma.masked_outside(x, x0, x1) # .compressed() ym = ma.array(y, mask=ma.getmask(xm)) # .compressed() if mode == "mean": r1 = np.mean(ym) r2 = np.std(ym) if mode == "max" or mode == "maximum": r1 = ma.max(ym) r2 = xm[ma.argmax(ym)] if mode == "min" or mode == "minimum": r1 = ma.min(ym) r2 = xm[ma.argmin(ym)] if mode == "minormax": r1p = ma.max(ym) r1n = ma.min(ym) if ma.abs(r1p) > ma.abs(r1n): r1 = r1p r2 = xm[ma.argmax(ym)] else: r1 = r1n r2 = xm[ma.argmin(ym)] if mode == "median": r1 = ma.median(ym) r2 = 0 if mode == "p2p": # peak to peak r1 = ma.ptp(ym) r2 = 0 if mode == "std": # standard deviation r1 = ma.std(ym) r2 = 0 if mode == "var": # variance r1 = ma.var(ym) r2 = 0 if mode == "cumsum": # cumulative sum r1 = ma.cumsum(ym) # Note: returns an array r2 = 0 if mode == "anom": # anomalies = difference from averge r1 = ma.anom(ym) # returns an array r2 = 0 if mode == "sum": r1 = ma.sum(ym) r2 = 0 if mode == "area" or mode == "charge": r1 = ma.sum(ym) / (ma.max(xm) - ma.min(xm)) r2 = 0 if mode == "latency": # return first point that is > threshold sm = ma.nonzero(ym > thresh) r1 = -1 # use this to indicate no event detected r2 = 0 if ma.count(sm) > 0: r1 = sm[0][0] r2 = len(sm[0]) if mode == "1090": # measure 10-90% time, also returns max r1 = ma.max(ym) r2 = xm[ma.argmax(ym)] y10 = 0.1 * r1 y90 = 0.9 * r1 sm1 = ma.nonzero(ym >= y10) sm9 = ma.nonzero(ym >= y90) r1 = xm[sm9] - xm[sm1] if mode == "count": r1 = ma.count(ym) r2 = 0 if mode == "maxslope": return (0, 0) slope = np.array([]) win = ma.flatnotmasked_contiguous(ym) st = int(len(win) / 20) # look over small ranges for k in win: # move through the slope measurementwindow tb = range(k - st, k + st) # get tb array newa = np.array(self.dat[i][j, thisaxis, tb]) ppars = np.polyfit(x[tb], ym[tb], 1) # do a linear fit - smooths the slope measures slope = np.append(slope, ppars[0]) # keep track of max slope r1 = np.amax(slope) r2 = np.argmax(slope) return (r1, r2)
def measure(mode, x, y, x0, x1, thresh=0, slopewin=1.0): """ return the a measure of y in the window x0 to x1 """ xm = ma.masked_outside(x, x0, x1)# .compressed() ym = ma.array(y, mask = ma.getmask(xm))# .compressed() if mode == 'mean': r1 = np.mean(ym) r2 = np.std(ym) if mode == 'max' or mode == 'maximum': r1 = ma.max(ym) r2 = xm[ma.argmax(ym)] if mode == 'min' or mode == 'minimum': r1 = ma.min(ym) r2 = xm[ma.argmin(ym)] if mode == 'minormax': r1p = ma.max(ym) r1n = ma.min(ym) if ma.abs(r1p) > ma.abs(r1n): r1 = r1p r2 = xm[ma.argmax(ym)] else: r1 = r1n r2 = xm[ma.argmin(ym)] if mode == 'median': r1 = ma.median(ym) r2 = 0 if mode == 'p2p': # peak to peak r1 = ma.ptp(ym) r2 = 0 if mode == 'std': # standard deviation r1 = ma.std(ym) r2 = 0 if mode == 'var': # variance r1 = ma.var(ym) r2 = 0 if mode == 'cumsum': # cumulative sum r1 = ma.cumsum(ym) # Note: returns an array r2 = 0 if mode == 'anom': # anomalies = difference from averge r1 = ma.anom(ym) # returns an array r2 = 0 if mode == 'sum': r1 = ma.sum(ym) r2 = 0 if mode == 'area' or mode == 'charge': r1 = ma.sum(ym)/(ma.max(xm)-ma.min(xm)) r2 = 0 if mode == 'latency': # return first point that is > threshold sm = ma.nonzero(ym > thresh) r1 = -1 # use this to indicate no event detected r2 = 0 if ma.count(sm) > 0: r1 = sm[0][0] r2 = len(sm[0]) if mode == '1090': #measure 10-90% time, also returns max r1 = ma.max(ym) r2 = xm[ma.argmax(ym)] y10 = 0.1*r1 y90 = 0.9*r1 sm1 = ma.nonzero(ym >= y10) sm9 = ma.nonzero(ym >= y90) r1 = xm[sm9] - xm[sm1] if mode == 'count': r1 = ma.count(ym) r2 = 0 if mode == 'maxslope': slope = [] win = ma.flatnotmasked_contiguous(ym) dt = x[1]-x[0] st = int(slopewin/dt) # use slopewin duration window for fit. print('st: ', st) for k, w in enumerate(win): # move through the slope measurementwindow tb = range(k-st, k+st) # get tb array ppars = np.polyfit(x[tb], ym[tb], 1) # do a linear fit - smooths the slope measures slope.append(ppars[0]) # keep track of max slope r1 = np.max(slope) r2 = np.argmax(slope) return(r1, r2)