def LST(y, m, d, ut1, EL): ''' calculate the Local sidereal time input: year,month,day(do not have to be integer),ut1,Longitude of the site (all input can be array) output: Local sidereal time in degree examples: LST(2012,10,29,19+53./60 -8 ,116.4) # the Local sidereal time in Beijing(+8) at 2012-10-29 19:53:00 output = 332.86374268340001 # (degree) ''' def J0(year, month, day): '''temp function of LST''' j0 = 367.0*year - np.fix(7.0*(year + np.fix((month + 9)/12.0))/4.0)+ np.fix(275.0*month/9) + day + 1721013.5 return j0 y = np.float64(y) m = np.float64(m) d = np.float64(d) ut1 = np.float64(ut1) EL = np.float64(EL) dd = np.fix(d) time = (d-dd)*24.0 ut = (ut1 + time)%24 j0 = J0(y, m, dd) j = (j0 - 2451545.0)/36525.0 g0 = 100.4606184 + (36000.77004*j) + (0.000387933*j**2)- 2.583e-8*j**3 g0 = g0%360 gst = g0 + (360.98564724*ut/24.0) lst = gst + EL lst = lst - 360.0*np.fix(lst/360.0) return lst
def dec2Dec2(Dec): ''' change degree to dec2 dec2 is the degree: np.array([ XXdegree, XXminute, XXsecond]) if dec2 is negative, it should be like np.array([-XXdegree, -XXminute, -XXsecond]) examples: for single degree input: np.array([degree]) dec2Dec2(np.array([45.51])) output = array([[ 45., 30., 36.]]) it is the dec2 representing 45 degree 30 minute 36 second for array degrees input: np.array([degree1, degree2, degree3....]) dec2Dec2(np.array([-256.32, -135.5, 10.3234, 256.3333333333])) output = array([[-256. , -19. , -12. ], [-135. , -30. , 0. ], [ 10. , 19. , 24.24 ], [ 256. , 19. , 59.99999988]]) they are 4 dec2s ''' Dec = np.float64(Dec) degree = np.fix(Dec) #print 'degree=',degree _point = (Dec-degree)*60.0 minute = np.fix(_point) #print 'minute=',minute arc = (_point-minute)*60.0 #print 'arc=',arc return np.array([degree,minute,arc]).T
def get_cofe_target(ut,lat,lon,target): #function to use ephem to find az and el of specified target for COFE #parameters: UT, LAt Lon Target cofe=ephem.Observer() cofe.elevation=0.0 year=2013 month=10 day=04 az=[] el=[] for u,la,lo in zip(ut,lat,lon): if (u >24): u=u-24 day=05 hour=int(np.fix(u)) minute=(u-hour)*60 iminute=int(np.fix(minute)) second=int(np.fix((minute-iminute)*60)) datestring=str(year)+'/'+str(month)+'/'+str(day)+' '+str(hour)+':'+str(iminute)+':'+str(second) datestring cofe.date=datestring cofe.lon=str(rtd*lo) cofe.lat=str(rtd*la) pos=ephem.__getattribute__(target)(cofe) az.append(pos.az) el.append(pos.alt) return np.array(az),np.array(el)
def nlfer(signal, pitch, parameters): #--------------------------------------------------------------- # Set parameters. #--------------------------------------------------------------- N_f0_min = np.around((parameters['f0_min']*2/float(signal.new_fs))*pitch.nfft) N_f0_max = np.around((parameters['f0_max']/float(signal.new_fs))*pitch.nfft) window = hanning(pitch.frame_size+2)[1:-1] data = np.zeros((signal.size)) #Needs other array, otherwise stride and data[:] = signal.filtered #windowing will modify signal.filtered #--------------------------------------------------------------- # Main routine. #--------------------------------------------------------------- samples = np.arange(int(np.fix(float(pitch.frame_size)/2)), signal.size-int(np.fix(float(pitch.frame_size)/2)), pitch.frame_jump) data_matrix = np.empty((len(samples), pitch.frame_size)) data_matrix[:, :] = stride_matrix(data, len(samples), pitch.frame_size, pitch.frame_jump) data_matrix *= window specData = np.fft.rfft(data_matrix, pitch.nfft) frame_energy = np.abs(specData[:, N_f0_min-1:N_f0_max]).sum(axis=1) pitch.set_energy(frame_energy, parameters['nlfer_thresh1']) pitch.set_frames_pos(samples)
def interval2semitone(interval, accidentals): # Need it to be int import numpy as np interval = int(interval) # semitone equivalents for intervals # interval 1,2,3,4,5,6,7 semitonetranslation = [0,2,4,5,7,9,11] semitone = 0 success = True if (interval > 0) and (interval < 50): # semitone value is the number of semitones equivalent to the interval # added to the number of accidentals (sharps are positive, flats are # negative) and the number of octaves above the reference note to # account for extensions des_index = int(np.mod(interval,8) + np.fix(interval/8))-1 semitone = int(semitonetranslation[des_index] + accidentals + 12*np.fix(interval/8)) else: success = False print 'Error in interval2semitone: out of range interval' return semitone, success
def detect_face_12net(cls_prob,roi,out_side,scale,width,height,threshold): in_side = 2*out_side+11 stride = 0 if out_side != 1: stride = float(in_side-12)/(out_side-1) (x,y) = np.where(cls_prob>=threshold) boundingbox = np.array([x,y]).T bb1 = np.fix((stride * (boundingbox) + 0 ) * scale) bb2 = np.fix((stride * (boundingbox) + 11) * scale) boundingbox = np.concatenate((bb1,bb2),axis = 1) dx1 = roi[0][x,y] dx2 = roi[1][x,y] dx3 = roi[2][x,y] dx4 = roi[3][x,y] score = np.array([cls_prob[x,y]]).T offset = np.array([dx1,dx2,dx3,dx4]).T boundingbox = boundingbox + offset*12.0*scale rectangles = np.concatenate((boundingbox,score),axis=1) rectangles = rect2square(rectangles) pick = [] for i in range(len(rectangles)): x1 = int(max(0 ,rectangles[i][0])) y1 = int(max(0 ,rectangles[i][1])) x2 = int(min(width ,rectangles[i][2])) y2 = int(min(height,rectangles[i][3])) sc = rectangles[i][4] if x2>x1 and y2>y1: pick.append([x1,y1,x2,y2,sc]) return NMS(pick,0.5,'iou')
def epoch_plot(t, y, period, name='none'): phase = (t/period)*1.0 - np.fix(t/period) sortidxs = np.argsort(phase) sorted_phase = phase[sortidxs] sorted_y = y[sortidxs] sorted_dates = t[sortidxs] pp = np.concatenate([sorted_phase, sorted_phase+1.0], 1) yy = np.concatenate([sorted_y, sorted_y], 1) epochs = np.fix(sorted_dates/period) epoch_list = set(epochs) mi = min(y)-0.15 ma2 = heapq.nlargest(2, y)[1] plt.clf() x = 0 for i in epoch_list: period_date_idxs = np.asarray(np.where(epochs == i)) period_mags = sorted_y[period_date_idxs] period_dates = sorted_dates[period_date_idxs] period_phases = sorted_phase[period_date_idxs] pp = np.concatenate([period_phases, period_phases + 1.0], 1) yy = np.concatenate([period_mags, period_mags], 1) plt.scatter(pp, yy, color=cm.jet(1.*x/len(epoch_list)), alpha=0.7, edgecolors='none') x +=1 print ma2, mi plt.ylim(ma2, mi) #plt.gca().invert_yaxis() plt.text(-0.4, mi+0.1, "period = {} days".format(period), fontsize=14) plt.xlabel("phase") plt.ylabel("V-band magnitude") if name !='none': #plt.title("{}".format(name)) plt.savefig("../output/{}_epoch.png".format(name)) plt.show()
def intermediate_profile(x, xhinge, delta): """Generate an intermediate profile of some quantity. Ferron et. al. 1998 Returns the 'top down' and 'bottom up' profiles as well as the average of the two. """ xf = np.flipud(x) xtd = np.zeros_like(x) xbu = np.zeros_like(x) ntd = np.fix(x[0]/delta - xhinge/delta) nbu = np.fix(xf[0]/delta - xhinge/delta) xtd[0] = xhinge + ntd*delta xbu[0] = xhinge + nbu*delta for i in xrange(len(x) - 1): ntd = np.fix(x[i+1]/delta - xtd[i]/delta) nbu = np.fix(xf[i+1]/delta - xbu[i]/delta) xtd[i+1] = xtd[i] + ntd*delta xbu[i+1] = xbu[i] + nbu*delta xbu = np.flipud(xbu) xav = (xtd + xbu)/2. return xtd, xbu, xav
def plot_imfs(self, time, tstart=None, tend=None, tunit='s', savefig_name=''): time = time - time[0] dt = time[1] - time[0] if tstart != None: tstart = int(np.fix(tstart / dt)) if tend != None: tend = int(np.fix(tend / dt)) num_subplot = self.num_imf + 1 fig = plt.figure() ax = fig.add_subplot(num_subplot, 1, 1) ax.plot(time[tstart:tend], self.input_signal[tstart:tend]) ax.set_ylabel('Data') ax.get_yaxis().set_label_coords(-0.1,0.5) for i in range(self.num_imf - 1): ax = fig.add_subplot(num_subplot, 1, i + 2) ax.plot(time[tstart:tend], self.imfs[:,i][tstart:tend]) ax.set_ylabel(r'$C_{' + str(i + 1) + '}$') ax.get_yaxis().set_label_coords(-0.1,0.5) ax = fig.add_subplot(num_subplot, 1, num_subplot) ax.plot(time[tstart:tend], self.imfs[:,-1][tstart:tend]) ax.get_yaxis().set_label_coords(-0.1,0.5) ax.set_ylabel('Trend') ax.set_xlabel('Time (' + tunit + ')') if savefig_name == '': plt.show() else: plt.savefig(savefig_name, format='eps', dpi=1000)
def generateBoundingBox(imap, reg, scale, t): # use heatmap to generate bounding boxes stride = 2 cellsize = 12 imap = np.transpose(imap) dx1 = np.transpose(reg[:, :, 0]) dy1 = np.transpose(reg[:, :, 1]) dx2 = np.transpose(reg[:, :, 2]) dy2 = np.transpose(reg[:, :, 3]) y, x = np.where(imap >= t) if y.shape[0] == 1: dx1 = np.flipud(dx1) dy1 = np.flipud(dy1) dx2 = np.flipud(dx2) dy2 = np.flipud(dy2) score = imap[(y, x)] reg = np.transpose(np.vstack([dx1[(y, x)], dy1[(y, x)], dx2[(y, x)], dy2[(y, x)]])) if reg.size == 0: reg = np.empty((0, 3)) bb = np.transpose(np.vstack([y, x])) q1 = np.fix((stride * bb + 1) / scale) q2 = np.fix((stride * bb + cellsize - 1 + 1) / scale) boundingbox = np.hstack([q1, q2, np.expand_dims(score, 1), reg]) return boundingbox, reg
def generate_bounding_box(imap, reg, scale, threshold): """Use heatmap to generate bounding boxes""" # pylint: disable=too-many-locals stride = 2 cellsize = 12 imap = np.transpose(imap) d_x1 = np.transpose(reg[:, :, 0]) d_y1 = np.transpose(reg[:, :, 1]) d_x2 = np.transpose(reg[:, :, 2]) d_y2 = np.transpose(reg[:, :, 3]) dim_y, dim_x = np.where(imap >= threshold) if dim_y.shape[0] == 1: d_x1 = np.flipud(d_x1) d_y1 = np.flipud(d_y1) d_x2 = np.flipud(d_x2) d_y2 = np.flipud(d_y2) score = imap[(dim_y, dim_x)] reg = np.transpose(np.vstack([d_x1[(dim_y, dim_x)], d_y1[(dim_y, dim_x)], d_x2[(dim_y, dim_x)], d_y2[(dim_y, dim_x)]])) if reg.size == 0: reg = np.empty((0, 3)) bbox = np.transpose(np.vstack([dim_y, dim_x])) q_1 = np.fix((stride * bbox + 1) / scale) q_2 = np.fix((stride * bbox + cellsize - 1 + 1) / scale) boundingbox = np.hstack([q_1, q_2, np.expand_dims(score, 1), reg]) return boundingbox, reg
def idftreal(A, N, M): ''' Calculates multiple 1D inverse DFT from complex to real Input: A - input complex vectors in column form (samples from zero to Nyquist) N - number of output samples (= number of implied complex input samples) M - number of input vectors Based on idftreal.m by R.G. Pratt ''' a = np.zeros((N, M)) n = np.arange(N).reshape((N,1)) # Set maximum non-Nyquist frequency index (works for even or odd N) imax = np.int(np.fix((N+1)//2)-1) k1 = np.arange(np.fix(N//2)+1) # Freq indices from zero to Nyquist k2 = np.arange(1, imax+1) # Freq indices except zero and Nyquist nk1 = n * k1.T nk2 = n * k2.T w = np.exp(-2j*np.pi / N) W = w**nk1 W2 = w**nk2 W[:,1:imax+1] += W2 # Add two matrices properly shifted a = np.dot(W, A[:np.fix(N//2)+1,:M]).real # (leads to doubling for non-Nyquist) return a
def _simulate_image(self): """ Generates the fake output. """ with self._acquisition_init_lock: pos = self.align.position.value logging.debug("Simulating image shift by %s", pos) ac, bc = pos.get("a"), pos.get("b") ang = math.radians(135) # AB->XY xc = -(ac * math.sin(ang) + bc * math.cos(ang)) yc = -(ac * math.cos(ang) - bc * math.sin(ang)) pixelSize = self.fake_img.metadata[model.MD_PIXEL_SIZE] self.fake_img.metadata[model.MD_ACQ_DATE] = time.time() x_pxs = xc / pixelSize[0] y_pxs = yc / pixelSize[1] # Image shifted based on LensAligner position z = 1j # imaginary unit self.deltar = x_pxs self.deltac = y_pxs nr, nc = self.fake_img.shape array_nr = numpy.arange(-numpy.fix(nr / 2), numpy.ceil(nr / 2)) array_nc = numpy.arange(-numpy.fix(nc / 2), numpy.ceil(nc / 2)) Nr = fft.ifftshift(array_nr) Nc = fft.ifftshift(array_nc) [Nc, Nr] = numpy.meshgrid(Nc, Nr) sim_img = fft.ifft2(fft.fft2(self.fake_img) * numpy.power(math.e, z * 2 * math.pi * (self.deltar * Nr / nr + self.deltac * Nc / nc))) output = model.DataArray(abs(sim_img), self.fake_img.metadata) return output
def jd2gps(jd): """ % JD2GPS Converts Julian date to GPS week number (since % 1980.01.06) and seconds of week. % Usage: [gpsweek,sow,rollover]=jd2gps(jd) % Input: jd - Julian date % Output: gpsweek - GPS week number % sow - seconds of week since 0 hr, Sun. % rollover - number of GPS week rollovers (modulus 1024) % Copyright (c) 2011, Michael R. Craymer % All rights reserved. % Email: [email protected] """ jdgps = cal2jd(1980,1,6); # beginning of GPS week numbering nweek = int(np.fix((jd-jdgps)/7.)) sow = (jd - (jdgps+nweek*7)) * 3600*24 rollover = np.fix(nweek/1024) # rollover every 1024 weeks gpsweek = int(nweek) # rollover is being returned as an array? # should just be an int return gpsweek,sow,rollover
def cos_pattern(AO_resolution, freq): # Experiment spec: sinewave on angles of stepper motor: # 60 degree amplitude (max), 15 Hz (max) # from Karin and Holger 12-apr-2011 15:00:00 step = 360.0/5000 #...step(/microstep) angle (degree) amp = 60 /2 #...sine wave amplitude (peak to peak degree) # freq = 1 #...sine wave frequency (Hz) Y = numpy.arange(0,amp,step) X = (1/(2*numpy.pi*freq))*numpy.arcsin(Y/amp) # AO_resolution = 2.5e-6 # (us) resolution: 400k Hz maximum, the period of which is 2.5 us # AO_resolution = 5e-6 # (us) resolution: 200k Hz maximum, the period of which is 5 us # AO_resolution = 1e-4 # (us) resolution: 10k Hz maximum, the period of which is 100 us Xs = numpy.fix(X / AO_resolution ) SM_sig = numpy.zeros(numpy.fix(Xs[numpy.size(Xs)-1])+1) for i in range(numpy.size(Xs)): SM_sig[Xs[i]]=1 rev_SM_sig = numpy.zeros(numpy.fix(Xs[numpy.size(Xs)-1])+1) for i in range(numpy.size(SM_sig)): rev_SM_sig[i]=SM_sig[numpy.size(SM_sig)-1-i] # duration = 2 * X[numpy.size(X)-1] # (second) duration of the signal train wavelet = 5 * numpy.append( rev_SM_sig , SM_sig) wavelet = numpy.append(wavelet, wavelet) wavelet = numpy.append(wavelet, numpy.array([0])) return wavelet
def symmetrized_dot_cloud(waveform, angle=60, top=50, lag=1): """ same as symmetrized dots but returns a 2d point cloud instead of a boolean image. note the return value is a tuple (x, y) of int ndarrays, which is equivalent to (cols, rows). """ # normalize the waveform to 0-top waveform = normalize(waveform, top) # roll the waveform by lag to get the angles we'll use roll_waveform = np.roll(waveform, lag) # now compute the angles first_angle = np.concatenate( [base_angle + roll_waveform for base_angle in np.arange(0, 360, angle)]) second_angle = np.concatenate( [base_angle - roll_waveform for base_angle in np.arange(0, 360, angle)]) all_angles = np.deg2rad(np.concatenate([first_angle, second_angle])) # tile the original waveform until it matches the length of the angles num_repeats = int(all_angles.size / waveform.size) waveform = np.tile(waveform, num_repeats) # now make the point cloud x_cols = waveform * np.cos(all_angles) + top y_rows = waveform * np.sin(all_angles) + top return (np.fix(x_cols).astype(np.int), np.fix(y_rows).astype(np.int))
def stim(w=360): global SM_step SM_step = 360.0/5000.0 #...step(/microstep) angle (degree) amp = 360.0 /2 #...sine wave amplitude (peak to peak degree) freq = 1 #...sine wave frequency (Hz) Y = numpy.arange(0,amp,SM_step) X = (1/(2*numpy.pi*freq))*numpy.arcsin(Y/amp) #w = 180.0 #... deg/sec #tLmt = (360.0/w)/4/(SM_step) #tLmt = 1.0/5000 global tLmt tLmt = SM_step / w #... sec/step dX = numpy.zeros(numpy.size(X)) dX[0] = X[0] for i in range(1, numpy.size(X)-1): dX[i] = X[i] - X[i-1] for i in range(1,numpy.size(X)): if dX[i] < tLmt: dX[i] = tLmt XX = numpy.zeros(numpy.size(X)) for i in range(1,numpy.size(XX)): XX[i] = XX[i-1] + dX[i] # pylab.plot(X,Y);pylab.show(); AO_resolution = 5e-6 # (us) resolution: 200k Hz maximum, the period of which is 5 us Xs = numpy.fix(XX / AO_resolution ) SM_sig = numpy.zeros(numpy.fix(Xs[numpy.size(Xs)-1])+1) #... numpy.size(Xs)-1 for i in range(numpy.size(Xs)): SM_sig[Xs[i]]=1 rev_SM_sig = numpy.zeros(numpy.fix(Xs[numpy.size(Xs)-1])+1) for i in range(numpy.size(SM_sig)): rev_SM_sig[i]=SM_sig[numpy.size(SM_sig)-1-i] # duration = 2 * X[numpy.size(X)-1] # (second) duration of the signal train wavelet = 5 * numpy.append( rev_SM_sig,SM_sig ) wavelet = numpy.append(wavelet, wavelet) wavelet = numpy.append(wavelet, numpy.array([0])) SM_dir = numpy.append(5*numpy.ones(numpy.size(rev_SM_sig)+numpy.size(SM_sig)),numpy.zeros(numpy.size(rev_SM_sig)+numpy.size(SM_sig))) SM_dir = numpy.append(SM_dir, numpy.array([0])) # for easier data processing sig_head = 5*numpy.ones(20) sig_tail = numpy.append(numpy.zeros(20),5*numpy.ones(20)) sig_tail = numpy.append(sig_tail,numpy.zeros(1)) SM_pulse = numpy.append(sig_head,wavelet) SM_pulse = numpy.append(SM_pulse,sig_tail) SM_dir = numpy.append(sig_head,SM_dir) SM_dir = numpy.append(SM_dir,sig_tail) stim = numpy.append(SM_pulse, SM_dir) #wavelet = 5 * numpy.ones(40000) return stim
def coherr(C,J1,J2,p=0.05,Nsp1=None,Nsp2=None): """ Function to compute lower and upper confidence intervals on coherency (absolute value of coherence). C: coherence (real or complex) J1,J2: tapered fourier transforms p: the target P value (default 0.05) Nsp1: number of spikes in J1, used for finite size correction. Nsp2: number of spikes in J2, used for finite size correction. Default is None, for no correction Outputs: CI: confidence interval for C, N x 2 array, (lower, upper) phi_std: stanard deviation of phi, N array """ from numpy import iscomplexobj, absolute, fix, zeros, setdiff1d, real, sqrt,\ arctanh, tanh from scipy.stats import t J1 = _combine_trials(J1) J2 = _combine_trials(J2) N,K = J1.shape assert J1.shape==J2.shape, "J1 and J2 must have the same dimensions." assert N == C.size, "S and J lengths don't match" if iscomplexobj(C): C = absolute(C) pp = 1 - p/2 dof = 2*K dof1 = dof if Nsp1 is None else fix(2.*Nsp1*dof/(2.*Nsp1+dof)) dof2 = dof if Nsp2 is None else fix(2.*Nsp2*dof/(2.*Nsp2+dof)) dof = min(dof1,dof2) Cerr = zeros((N,2)) tcrit = t(dof-1).ppf(pp).tolist() atanhCxyk = zeros((N,K)) phasefactorxyk = zeros((N,K),dtype='complex128') for k in xrange(K): indxk = setdiff1d(range(K),[k]) J1k = J1[:,indxk] J2k = J2[:,indxk] eJ1k = real(J1k * J1k.conj()).sum(1) eJ2k = real(J2k * J2k.conj()).sum(1) eJ12k = (J1k.conj() * J2k).sum(1) Cxyk = eJ12k/sqrt(eJ1k*eJ2k) absCxyk = absolute(Cxyk) atanhCxyk[:,k] = sqrt(2*K-2)*arctanh(absCxyk) phasefactorxyk[:,k] = Cxyk / absCxyk atanhC = sqrt(2*K-2)*arctanh(C); sigma12 = sqrt(K-1)* atanhCxyk.std(1) Cu = atanhC + tcrit * sigma12 Cl = atanhC - tcrit * sigma12 Cerr[:,0] = tanh(Cl / sqrt(2*K-2)) Cerr[:,1] = tanh(Cu / sqrt(2*K-2)) phistd = (2*K-2) * (1 - absolute(phasefactorxyk.mean(1))) return Cerr, phistd
def gaussian_bandpass_analytic(s, sample_rate, frequencies, bandwidths, round=True): """ Compute the analytic signal in a set of bandpass channels :param s: the raw signal :param sample_rate: the signal's sample_rate :param frequencies: a list of center frequencies for gaussian filters :param bandwidths: a list of bandwidths for gaussian filters :param round: pad with zeros to next multiple of 2. Since we are doing so many iffts, this can speed things up a lot. :return analytic_signal: an array of dimension len(frequencies) * len(s) """ if isinstance(bandwidths, (int, float)): bandwidths = [bandwidths] if len(bandwidths) != len(frequencies): if len(bandwidths) == 1: bandwidths = list(bandwidths) * len(frequencies) else: raise ValueError("bandwidths should be the same length as frequencies") # Enforce even window_length to have a symmetric window window_length = np.fix(np.fix(6 * sample_rate / (min(bandwidths) * 2.0 * np.pi)) / 2) * 2; if round: pow2_length = 2 ** np.ceil(np.log2(len(s) + window_length)) window_length = (pow2_length - len(s)) / 2 # Pad the input with zeros padded = np.pad(s, (int(window_length / 2), int(np.ceil(window_length / 2))), 'constant') input_length = len(padded) input_start = int(window_length / 2) # Assign space for output analytic_signal = np.zeros((len(frequencies), len(s)), dtype=np.complex128) # Digital filtering spectrum = fft(padded) fft_freqs = fftfreq(input_length, d=(1.0 / sample_rate)) nonzero_inds = fft_freqs >= 0 positive_inds = fft_freqs > 0 frequency_filter = np.zeros_like(spectrum) for ii, (freq, bw) in enumerate(zip(frequencies, bandwidths)): # Create the digital filter for this band frequency_filter[nonzero_inds] = np.exp(-0.5 * (fft_freqs[nonzero_inds] - freq) ** 2 / float(bw) ** 2) # Compute the filtered spectrum filtered_spectrum = frequency_filter * spectrum # Double the values of the positive frequencies for the analytic signal filtered_spectrum[positive_inds] *= 2 # Compute the analytic signal bfinput = ifft(filtered_spectrum) # Remove the padding analytic_signal[ii] = bfinput[input_start: input_start + len(s)] return analytic_signal
def note2degree(note, root): import numpy as np success = True degree = '' # interval translations on the line of fifths fifthtranslations = [1,5,2,6,3,7,4]; # get note and root natural position and accidentals on line of fifths noteposition, success1 = note2fifthposition(note) rootposition, success2 = note2fifthposition(root) if success1 and success2: # take the difference between the two note positions for relative positions # of notes with respect to one and other fifthsdifference = noteposition - rootposition + 1 # natural difference on line of fifths fifthsinterval = np.mod((fifthsdifference-1),7); i = 0; # find number of accidentals apart on line of fifths if fifthsdifference < 0: # if above 0 then either natural or sharp #if final position is negative then calculate number of flats # remembering to include the extra first flat (-1) accidentals = np.fix((fifthsdifference)/7) -1 else: # note is a natural or has a number of sharps accidentals = np.fix(fifthsdifference/7); # put the required number of sharps or flats into the output string if accidentals > 0: for i in range(accidentals): degree = degree + '#' elif accidentals <= 0: abs_acc = int(np.abs(accidentals)) for i in range(abs_acc): degree = degree + 'b' # find interval value from translation array interval = fifthtranslations[int(fifthsinterval)] degree = degree + str(interval) else: success = False return degree,success
def Find_wav_kurt(x,h,g,h1,h2,h3,nlevel,Sc,Fr,opt,Fs=1): # [c,Bw,fc,i] = Find_wav_kurt(x,h,g,h1,h2,h3,nlevel,Sc,Fr,opt2) # Sc = -log2(Bw)-1 with Bw the bandwidth of the filter # Fr is in [0 .5] # # ------------------- # J. Antoni : 12/2004 # ------------------- level = np.fix((Sc))+ ((Sc%1) >= 0.5) * (np.log2(3)-1) Bw = 2**(-level-1) freq_w = np.arange(0,2**(level-1)) / 2**(level+1) + Bw/2. J = np.argmin(np.abs(freq_w-Fr)) fc = freq_w[J] i = int(np.round(fc/Bw-1./2)) if level % 1 == 0: acoeff = binary(i, level) bcoeff = np.array([]) temp_level = level else: i2 = np.fix((i/3.)) temp_level = np.fix((level))-1 acoeff = binary(i2,temp_level) bcoeff = i-i2*3 acoeff = acoeff[::-1] c = K_wpQ_filt(x,h,g,h1,h2,h3,acoeff,bcoeff,temp_level) print c kx = kurt(c,opt) print "kx", kx sig = np.median(np.abs(c))/np.sqrt(np.pi/2.) print sig threshold = sig*raylinv(np.array([.999,]),np.array([1,])) print "threshold", threshold spec = int(raw_input(' Do you want to see the envelope spectrum (yes = 1 ; no = 0): ')) fig = plt.figure() t = np.arange(len(x))/Fs tc = np.linspace(t[0],t[-1],len(c)) plt.subplot(2+spec,1,1) plt.plot(t,x,'k',label='Original Signal') plt.subplot(2+spec,1,2) plt.plot(tc,np.abs(c),'k') plt.plot(tc,threshold*np.ones(len(c)),'--r') #~ plt.title('Envlp of the filtr sgl, Bw=Fs/2^{'+(level+1)+'}, fc='+(Fs*fc)+'Hz, Kurt='+(np.round(np.abs(10*kx))/10)+', \alpha=.1%'] plt.xlabel('time [s]') if spec == 1: print nextpow2(len(c)) nfft = int(nextpow2(len(c))) env = np.abs(c)**2 S = np.abs(np.fft.fft(env.ravel()-np.mean(env)*np.hanning(len(env))/len(env),nfft)) f = np.linspace(0, 0.5*Fs/2**level,nfft/2) plt.subplot(313) plt.plot(f,S[:nfft/2],'k') plt.title('Fourier transform magnitude of the squared envelope') plt.xlabel('frequency [Hz]') plt.show() return [c,Bw,fc]
def jd2date(jd): """This function finds the Year, month, day, hour, minute and second given the Julian date. Algorithm : Set up starting values Find the elapsed days through the year in a loop Call routine to find each individual value Author : Capt Dave Vallado USAFA/DFAS 719-472-4109 26 Feb 1990 In Ada : Dr Ron Lisowski USAFA/DFAS 719-472-4110 17 May 1995 In Matlab : LtCol Thomas Yoder USAFA/DFAS 719-333-4110 Spring 2000 In Python : Shankar Kulumani GWU 630-336-6257 2017 06 15 Inputs : JD - Julian Date days from 4713 B.C. OutPuts : Yr - Year 1900 .. 2100 Mon - Month 1 .. 12 D - Day 1 .. 28,29,30,31 H - Hour 0 .. 23 M - Minute 0 .. 59 S - Second 0.0 .. 59.999 Locals : days - Day of year plus fraction of a day days Tu - Julian Centuries from 1 Jan 1900 Temp - Temporary Long_Float value LeapYrs - Number of Leap years from 1900 Constants : None. Coupling : DayofYr2MDHMS - Finds Month, day, hour, minute and second given Days and Yr References : 1988 Almanac for Computers pg. B2 Escobal pg. 17-19 Kaplan pg. 329-330 """ temp = jd - 2415019.5 tu = temp / 365.25 yr = 1900 + np.fix(tu) leapyrs = np.fix((yr - 1900 - 1) * 0.25) days = temp - ((yr - 1900) * 365.0 + leapyrs) # check for beginning of year if days < 1.0: yr = yr - 1 leapyrs = np.fix((yr - 1900 - 1) * 0.25) days = temp - ((yr - 1900) * 365.0 + leapyrs) mon, day, h, m, s = dayofyr2mdhms(yr, days) return (yr, mon, day, h, m, s)
def sccs_bit_sync(y,Ns): """ rx_symb_d,clk,track = sccs_bit_sync(y,Ns) ////////////////////////////////////////////////////// Symbol synchronization algorithm using SCCS ////////////////////////////////////////////////////// y = baseband NRZ data waveform Ns = nominal number of samples per symbol Reworked from ECE 5675 Project Translated from m-code version Mark Wickert April 2014 """ # decimated symbol sequence for SEP rx_symb_d = np.zeros(int(np.fix(len(y)/Ns))) track = np.zeros(int(np.fix(len(y)/Ns))) bit_count = -1 y_abs = np.zeros(len(y)) clk = np.zeros(len(y)) k = Ns+1 #initial 1-of-Ns symbol synch clock phase # Sample-by-sample processing required for i in xrange(len(y)): #y_abs(i) = abs(round(real(y(i)))) if i >= Ns: # do not process first Ns samples # Collect timing decision unit (TDU) samples y_abs[i] = np.abs(np.sum(y[i-Ns+1:i+1])) # Update sampling instant and take a sample # For causality reason the early sample is 'i', # the on-time or prompt sample is 'i-1', and # the late sample is 'i-2'. if (k == 0): # Load the samples into the 3x1 TDU register w_hat. # w_hat[1] = late, w_hat[2] = on-time; w_hat[3] = early. w_hat = y_abs[i-2:i+1] bit_count += 1 if w_hat[1] != 0: if w_hat[0] < w_hat[2]: k = Ns-1 clk[i-2] = 1 rx_symb_d[bit_count] = y[i-2-int(np.round(Ns/2))-1] elif w_hat[0] > w_hat[2]: k = Ns+1 clk[i] = 1 rx_symb_d[bit_count] = y[i-int(np.round(Ns/2))-1] else: k = Ns clk[i-1] = 1 rx_symb_d[bit_count] = y[i-1-int(round(Ns/2))-1] else: k = Ns clk[i-1] = 1 rx_symb_d[bit_count] = y[i-1-int(round(Ns/2))] track[bit_count] = np.mod(i,Ns) k -= 1 # Trim the final output to bit_count rx_symb_d = rx_symb_d[:bit_count] return rx_symb_d, clk, track
def __init__(self, x, wind = 20, over = 50): self.vect = x self.wind = wind self.over = over self.nover = np.fix(self.wind*self.over/100) self.nadv = self.wind - self.nover self.nrecs = np.fix((len(x) - self.nover)/self.nadv) self.nfft = nextpow2(self.wind) self.X,self.Y,self.Z = PreP.FIR2(self.vect)
def decrease(val, dN): # auxiliary function to remove unnecesary Fourier freq. dN=np.array(dN) N=np.array(val.shape[-dN.size:]) ibeg = np.array(np.fix((N-dN+(dN % 2))/2), dtype=np.int) iend = np.array(np.fix((N+dN+(dN % 2))/2), dtype=np.int) if dN.size==2: return val[:,:,ibeg[0]:iend[0],ibeg[1]:iend[1]] elif dN.size==3: return val[:,:,ibeg[0]:iend[0],ibeg[1]:iend[1],ibeg[2]:iend[2]]
def Find_wav_kurt(x, h, g, h1, h2, h3, nlevel, Sc, Fr, Fs=1, verbose=False): """ TODO flesh out this doc-string J. Antoni : 12/2004 Translation to Python: T. Lecocq 02/2012 :param x: signal :param h: lowpass filter :param g: highpass filter :param h1: filter parameter returned by get_h_parameters :param h2: filter parameter returned by get_h_parameters :param h3: filter parameter returned by get_h_parameters :param nlevel: number of decomposition levels :param Sc: Sc = -log2(Bw)-1 with Bw the bandwidth of the filter :param Fr: in the range [0, 0.5] :param Fs: Sampling frequency of signal x :param verbose: If ``True`` outputs debugging information :type x: numpy array :type h: numpy array :type g: numpy array :type h1: numpy array :type h2: numpy array :type h3: numpy array :type nlevel: integer :type Fr: float :type: Fs: integer :returns: c, s, threshold, Bw, fc """ level = np.fix((Sc))+((Sc % 1) >= 0.5)*(np.log2(3)-1) Bw = 2**(-level-1) freq_w = np.arange(0, 2**level) / 2**(level+1) + Bw/2. J = np.argmin(np.abs(freq_w-Fr)) fc = freq_w[J] i = int(np.round(fc/Bw-1./2)) if level % 1 == 0: acoeff = binary(i, int(level)) bcoeff = [] temp_level = level else: i2 = int(np.fix((i/3.))) temp_level = np.fix((level))-1 acoeff = binary(i2, int(temp_level)) bcoeff = i-i2*3 acoeff = acoeff[::-1] c = K_wpQ_filt(x, h, g, h1, h2, h3, acoeff, bcoeff, temp_level) t = np.arange(len(x))/float(Fs) tc = np.linspace(t[0], t[-1], len(c)) s = np.real(c*np.exp(2j*np.pi*fc*Fs*tc)) sig = np.median(np.abs(c))/np.sqrt(np.pi/2.) threshold = sig*raylinv(np.array([.999, ]), np.array([1, ])) return c, s, threshold, Bw, fc
def get_xil(N, Y): """ it produces discrete frequencies of Fourier series xil[i] = ZNl[i]/Y[i] """ xil = [] for m in np.arange(np.size(N)): xil.append(np.arange(np.fix(-N[m]/2.), np.fix(N[m]/2.+0.5))/Y[m]) return xil
def RFSpline(self, x, u): """ u is continuous, find closest two u integers this is for single x, mainly for finding roots """ u_lower = int(np.fix(u)) u_upper = int(np.fix(u)) + 1 return self.randmat[u_lower, x] + (u - u_lower) * (self.randmat[u_upper, x] - self.randmat[u_lower, x])
def RFSplineArray(self, u_array): """ this returns an array of Random Fields, length of u array """ u_lowers = np.fix(u_array) u_uppers = np.fix(u_array) + 1 lower_fields = self.randmat.transpose()[zip(*enumerate(u_lowers))] upper_fields = self.randmat.transpose()[zip(*enumerate(u_uppers))] return lower_fields + (u_array - u_lowers) * (upper_fields - lower_fields)
def test_getitem_setitem_ellipsis(): s = Series(np.random.randn(10)) np.fix(s) result = s[...] assert_series_equal(result, s) s[...] = 5 assert (result == 5).all()
def kurto(origin_time, info, opdict): """ Finds for each Waveloc event and for each station the best filtering parameters for kurtosis computation. Writes them into the dictionary info. :param origin_time: origin time of the signal :param info: dictionary of parameters :param opdict: dictionary of the Waveloc parameters and options :type origin_time: utcdatetime :type info: dictionary :type opdict: dictionary :rtype: dictionary :returns: info """ verbose = opdict['verbose'] kwin = opdict['kwin'] start_time = origin_time - 5.0 end_time = origin_time + 20.0 dt = info['dt'] # Trace x = waveval(info['data_ini'], start_time, end_time, dt, info['tdeb_data']) if not x.any() and x.all(): return info # Initial kurtosis (trace filtered between 4-10Hz) kurtx = waveval(info['kurt_ini'], start_time, end_time, dt, info['tdeb_kurt']) kurtx = smooth(kurtx) N = len(x) N2 = np.log2(N) - 7 nlevel = int(np.fix(N2)) snr_ref = np.max(np.abs(x)) / np.mean(np.abs(x)) snr_kurt_ref = np.max(np.abs(kurtx)) / np.mean(np.abs(kurtx)) kmax_ref = np.max(kurtx) # maximum of the kurtosis # Compute the kurtogram and keep best frequencies if verbose: import matplotlib.gridspec as gridspec G = gridspec.GridSpec(3, 2) fig = plt.figure(figsize=(15, 6)) fig.set_facecolor('white') fig.add_subplot(G[:, 0]) Kwav, Level_w, freq_w, c, f_lower, f_upper = \ Fast_Kurtogram(np.array(x, dtype=float), nlevel, verbose, Fs=1/dt, opt2=1) # Comparison of the kurtosis computed in the new frequency band and the old # one (criterion : snr, kmax) # 1. Read the initial data wf = Waveform() wf.read_from_file(info['data_file'], starttime=start_time - kwin, endtime=end_time + kwin) nbpts = int(kwin * 1. / dt) # 2. Filter the trace with kurtogram frequencies wf.bp_filter(f_lower, f_upper) x_filt = wf.values x_filt = x_filt[nbpts:-nbpts] # 3. Compute the kurtosis wf.process_kurtosis(kwin, recursive=opdict['krec']) new_kurtx = wf.values if opdict['krec']: new_kurtx = new_kurtx[nbpts + 1:-nbpts - 1] else: new_kurtx = new_kurtx[:-nbpts - 1] snr = np.max(np.abs(x_filt)) / np.mean(np.abs(x_filt)) snr_kurt = np.max(np.abs(new_kurtx)) / np.mean(np.abs(new_kurtx)) kmax = np.max(new_kurtx) if snr > snr_ref and kmax >= kmax_ref: info['filter'].append( (round(f_lower * 100) / 100, round(f_upper * 100) / 100)) if 'new_kurt_file' in info: info = write_file(info, start_time, end_time, new_kurtx) else: info['filter'].append((0, 50)) if verbose and snr > 3: print "snr:", snr, " ; snr_ref:", snr_ref print "snr new kurtosis:", snr_kurt, " ; snr kurtosis reference:",\ snr_kurt_ref print "kurtosis max, kurt_ref :", kmax, kmax_ref plot_trace(fig, G, x, x_filt, kurtx, new_kurtx, info, f_lower, f_upper, snr, snr_ref, snr_kurt, kmax, kmax_ref, origin_time) plt.show() return info
def fix(arr): return np.fix(arr)
# x1 = qq1 # y1 = qq2 # x2 = qq3 # y2 = qq4 # for i in range(len(total_boxes)): # print('lll', x1[i], y1[i], x2[i], y2[i]) # plt.gca().add_patch( # plt.Rectangle((x1[i], y1[i]), x2[i] - x1[i], y2[i] - y1[i], edgecolor='r', facecolor='none')) # --韦访添加 # plt.imshow(scale_img) # plt.show() # exit() total_boxes = np.transpose( np.vstack([qq1, qq2, qq3, qq4, total_boxes[:, 4]])) total_boxes = align.detect_face.rerec(total_boxes.copy()) total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4]).astype(np.int32) dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = align.detect_face.pad( total_boxes.copy(), w, h) # R-Net numbox = total_boxes.shape[0] if numbox > 0: # second stage R-Net 对于P-Net输出的bb,缩放到24x24大小 tempimg = np.zeros((24, 24, 3, numbox)) for k in range(0, numbox): tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3)) tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = img[y[k] - 1:ey[k], x[k] - 1:ex[k], :] if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[ 0] == 0 and tmp.shape[1] == 0: # R-Net输入大小为24*24,所以要进行缩放
def LinearRegression_KFold_Sort(Subjects_Data, Subjects_Score, Fold_Quantity, ResultantFolder, Permutation_Flag): if not os.path.exists(ResultantFolder): os.mkdir(ResultantFolder) Subjects_Quantity = len(Subjects_Score) # Sort the subjects score Sorted_Index = np.argsort(Subjects_Score) Subjects_Data = Subjects_Data[Sorted_Index, :] Subjects_Score = Subjects_Score[Sorted_Index] EachFold_Size = np.int(np.fix(np.divide(Subjects_Quantity, Fold_Quantity))) MaxSize = EachFold_Size * Fold_Quantity EachFold_Max = np.ones(Fold_Quantity, np.int) * MaxSize tmp = np.arange(Fold_Quantity - 1, -1, -1) EachFold_Max = EachFold_Max - tmp Remain = np.mod(Subjects_Quantity, Fold_Quantity) for j in np.arange(Remain): EachFold_Max[j] = EachFold_Max[j] + Fold_Quantity Fold_Corr = [] Fold_MAE = [] Fold_Weight = [] for j in np.arange(Fold_Quantity): Fold_J_Index = np.arange(j, EachFold_Max[j], Fold_Quantity) Subjects_Data_test = Subjects_Data[Fold_J_Index, :] Subjects_Score_test = Subjects_Score[Fold_J_Index] Subjects_Data_train = np.delete(Subjects_Data, Fold_J_Index, axis=0) Subjects_Score_train = np.delete(Subjects_Score, Fold_J_Index) if Permutation_Flag: # If doing permutation, the training scores should be permuted, while the testing scores remain Subjects_Index_Random = np.arange(len(Subjects_Score_train)) np.random.shuffle(Subjects_Index_Random) Subjects_Score_train = Subjects_Score_train[Subjects_Index_Random] if j == 0: RandIndex = {'Fold_0': Subjects_Index_Random} else: RandIndex['Fold_' + str(j)] = Subjects_Index_Random normalize = preprocessing.MinMaxScaler() Subjects_Data_train = normalize.fit_transform(Subjects_Data_train) Subjects_Data_test = normalize.transform(Subjects_Data_test) clf = linear_model.LinearRegression() clf.fit(Subjects_Data_train, Subjects_Score_train) Fold_J_Score = clf.predict(Subjects_Data_test) Fold_J_Corr = np.corrcoef(Fold_J_Score, Subjects_Score_test) Fold_J_Corr = Fold_J_Corr[0, 1] Fold_Corr.append(Fold_J_Corr) Fold_J_MAE = np.mean( np.abs(np.subtract(Fold_J_Score, Subjects_Score_test))) Fold_MAE.append(Fold_J_MAE) Fold_J_result = { 'Index': Fold_J_Index, 'Test_Score': Subjects_Score_test, 'Predict_Score': Fold_J_Score, 'Corr': Fold_J_Corr, 'MAE': Fold_J_MAE } Fold_J_FileName = 'Fold_' + str(j) + '_Score.mat' ResultantFile = os.path.join(ResultantFolder, Fold_J_FileName) sio.savemat(ResultantFile, Fold_J_result) Fold_Corr = [0 if np.isnan(x) else x for x in Fold_Corr] Mean_Corr = np.mean(Fold_Corr) Mean_MAE = np.mean(Fold_MAE) Res_NFold = { 'Mean_Corr': Mean_Corr, 'Mean_MAE': Mean_MAE } ResultantFile = os.path.join(ResultantFolder, 'Res_NFold.mat') sio.savemat(ResultantFile, Res_NFold) return (Mean_Corr, Mean_MAE)
def colifilt(X, ha, hb): """ Filter the columns of image X using the two filters ha and hb = reverse(ha). ha operates on the odd samples of X and hb on the even samples. Both filters should be even length, and h should be approx linear phase with a quarter sample advance from its mid pt (i.e `:math:`|h(m/2)| > |h(m/2 + 1)|`). .. code-block:: text ext left edge right edge ext Level 2: ! | ! | ! +q filt on x b b a a a a b b -q filt on o a a b b b b a a Level 1: ! | ! | ! odd filt on . b b b b a a a a a a a a b b b b odd filt on . a a a a b b b b b b b b a a a a The output is interpolated by two from the input sample rate and the results from the two filters, Ya and Yb, are interleaved to give Y. Symmetric extension with repeated end samples is used on the composite X columns before each filter is applied. .. codeauthor:: Rich Wareham <*****@*****.**>, August 2013 .. codeauthor:: Cian Shaffrey, Cambridge University, August 2000 .. codeauthor:: Nick Kingsbury, Cambridge University, August 2000 """ # Make sure all inputs are arrays X = asfarray(X) ha = asfarray(ha) hb = asfarray(hb) r, c = X.shape if r % 2 != 0: raise ValueError('No. of rows in X must be a multiple of 2') if ha.shape != hb.shape: raise ValueError('Shapes of ha and hb must be the same') if ha.shape[0] % 2 != 0: raise ValueError('Lengths of ha and hb must be even') m = ha.shape[0] m2 = np.fix(m * 0.5) Y = np.zeros((r * 2, c), dtype=X.dtype) if not np.any(np.nonzero(X[:])[0]): return Y if m2 % 2 == 0: # m/2 is even, so set up t to start on d samples. # Set up vector for symmetric extension of X with repeated end samples. # Use 'reflect' so r < m2 works OK. xe = reflect(np.arange(-m2, r + m2, dtype=np.int), -0.5, r - 0.5) t = np.arange(3, r + m, 2) if np.sum(ha * hb) > 0: ta = t tb = t - 1 else: ta = t - 1 tb = t # Select odd and even samples from ha and hb. Note that due to 0-indexing # 'odd' and 'even' are not perhaps what you might expect them to be. hao = as_column_vector(ha[0:m:2]) hae = as_column_vector(ha[1:m:2]) hbo = as_column_vector(hb[0:m:2]) hbe = as_column_vector(hb[1:m:2]) s = np.arange(0, r * 2, 4) Y[s, :] = _column_convolve(X[xe[tb - 2], :], hae) Y[s + 1, :] = _column_convolve(X[xe[ta - 2], :], hbe) Y[s + 2, :] = _column_convolve(X[xe[tb], :], hao) Y[s + 3, :] = _column_convolve(X[xe[ta], :], hbo) else: # m/2 is odd, so set up t to start on b samples. # Set up vector for symmetric extension of X with repeated end samples. # Use 'reflect' so r < m2 works OK. xe = reflect(np.arange(-m2, r + m2, dtype=np.int), -0.5, r - 0.5) t = np.arange(2, r + m - 1, 2) if np.sum(ha * hb) > 0: ta = t tb = t - 1 else: ta = t - 1 tb = t # Select odd and even samples from ha and hb. Note that due to 0-indexing # 'odd' and 'even' are not perhaps what you might expect them to be. hao = as_column_vector(ha[0:m:2]) hae = as_column_vector(ha[1:m:2]) hbo = as_column_vector(hb[0:m:2]) hbe = as_column_vector(hb[1:m:2]) s = np.arange(0, r * 2, 4) Y[s, :] = _column_convolve(X[xe[tb], :], hao) Y[s + 1, :] = _column_convolve(X[xe[ta], :], hbo) Y[s + 2, :] = _column_convolve(X[xe[tb], :], hae) Y[s + 3, :] = _column_convolve(X[xe[ta], :], hbe) return Y
Remove the decimals, and return the float number closest to zero. Use the trunc() and fix() functions. Example Truncate elements of following array: import numpy as np arr = np.trunc([-3.1666, 3.6667]) print(arr) Example Same example, using fix(): import numpy as np arr = np.fix([-3.1666, 3.6667]) print(arr) Rounding The around() function increments preceding digit or decimal by 1 if >=5 else do nothing. E.g. round off to 1 decimal point, 3.16666 is 3.2 Example Round off 3.1666 to 2 decimal places: import numpy as np arr = np.around(3.1666, 2) print(arr)
def double2time(v): hh = np.int32(np.fix(v / 3600.)) mm = np.int32(np.fix((v - hh * 3600) / 60.)) ss = v - hh * 3600. - mm * 60. return "%02d" % hh + ":%02d" % mm + ":%07.4f" % ss
def detect_face_base_pr_net(input_image, p_predict_func, r_predict_func): img = misc.imread(input_image, mode='RGB') h, w, _ = img.shape minsize = np.min([h, w]) m = 12 / math.floor(minsize * 0.1) minsize = minsize * m factor_count = 0 factor = 0.709 #scale factor scales = [] while (minsize >= 12): scales.append(m * np.power(factor, factor_count)) minsize = minsize * factor factor_count += 1 total_boxes = np.empty((0, 9)) # print("scale num" + str(len(scales))) for i in range(len(scales)): hs = int(np.ceil(h * scales[i])) ws = int(np.ceil(w * scales[i])) new_img = cv2.resize(img, (ws, hs), interpolation=cv2.INTER_AREA) image = np.expand_dims(new_img, axis=0) predict_result = p_predict_func([image]) pre_label = predict_result[0] pre_box = predict_result[1] boxes, _ = generateBoundingBox(predict_result[0][0, :, :, 1], predict_result[1][0, :, :, ], scales[i], 0.6) pick = nms(boxes.copy(), 0.5, 'Union') if boxes.size > 0 and pick.size > 0: boxes = boxes[pick, :] total_boxes = np.append(total_boxes, boxes, axis=0) numbox = total_boxes.shape[0] #239 if numbox > 0: pick = nms(total_boxes.copy(), 0.7, 'Union') # return (239,) total_boxes = total_boxes[pick, :] bbw = total_boxes[:, 2] - total_boxes[:, 0] bbh = total_boxes[:, 3] - total_boxes[:, 1] x1 = total_boxes[:, 0] + total_boxes[:, 5] * bbw y1 = total_boxes[:, 1] + total_boxes[:, 6] * bbh x2 = total_boxes[:, 2] + total_boxes[:, 7] * bbw y2 = total_boxes[:, 3] + total_boxes[:, 8] * bbh total_boxes = np.transpose( np.vstack([x1, y1, x2, y2, total_boxes[:, 4]])) # (239, 5) 4:xmin,ymin,xmax,ymax,prob total_boxes = rerec(total_boxes.copy()) total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4]).astype(np.int32) dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad( total_boxes.copy(), w, h) # pdb.set_trace() numbox = total_boxes.shape[0] #239 if numbox > 0: # second stage tempimg = np.zeros((numbox, 24, 24, 3)) for k in range(0, numbox): tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3)) tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = img[y[k] - 1:ey[k], x[k] - 1:ex[k], :] if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[ 0] == 0 and tmp.shape[1] == 0: tempimg[k, :, :, :] = cv2.resize(tmp, (24, 24), interpolation=cv2.INTER_AREA) else: return np.empty() out = r_predict_func([tempimg]) out0 = np.transpose(out[0]) #classification out1 = np.transpose(out[1]) #regression score = out0[1, :] ipass = np.where(score > 0.7) total_boxes = np.hstack([ total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1) ]) mv = out1[:, ipass[0]] if total_boxes.shape[0] > 0: pick = nms(total_boxes, 0.7, 'Union') total_boxes = total_boxes[pick, :] total_boxes = bbreg(total_boxes.copy(), np.transpose(mv[:, pick])) total_boxes = rerec(total_boxes.copy()) # numbox = total_boxes.shape[0] # print(numbox) # for i in range(numbox): # boxes = total_boxes[i,0:-1] # cv2.rectangle(img, (int(boxes[0]),int(boxes[1])), (int(boxes[2]),int(boxes[3])), (0,255,0)) # misc.imsave(str(uuid.uuid4()) + '.jpg', img) # print("over=====") return total_boxes
def detect_face(img, minsize, pnet, rnet, onet, threshold, factor): """Detects faces in an image, and returns bounding boxes and points for them. img: input image minsize: minimum faces' size pnet, rnet, onet: caffemodel threshold: threshold=[th1, th2, th3], th1-3 are three steps's threshold factor: the factor used to create a scaling pyramid of face sizes to detect in the image. """ # !!! https://www.cnblogs.com/the-home-of-123/p/9857056.html 代码注释 others factor_count = 0 total_boxes = np.empty((0, 9)) # (2>q1+2>q2+1>score+4>reg)>>9 points = np.empty(0) h = img.shape[0] w = img.shape[1] minl = np.amin([h, w]) # 为了贴合不同应用场景,有的场景人脸大,有的人脸小。 # 因此,在常用每个像素点对应12*12的感受野下,如果想弄成20*20的感受野的话,就需要把原图缩小12/minsize(20) = 0.6的比例,作为新的原图输入网络。 m = 12.0 / minsize minl = minl * m # create scale pyramid 金字塔每一级缩小百分比,第一级是 12/minsize(20) = 0.6>>新的原图,第二级是 新的原图0.6 * 0.704^2,第三级是 新的原图0.6 * 0.704^3 # 一直缩小缩小金字塔层图,直到缩小到短边的边长(像素点个数), 大于感受野的边长12为止。 scales = [] # 存储每层缩小的比率(缩小过程中固定长宽比) while minl >= 12: scales += [m * np.power(factor, factor_count)] minl = minl * factor factor_count += 1 # first stage for scale in scales: hs = int(np.ceil(h * scale)) ws = int(np.ceil(w * scale)) im_data = imresample(img, (hs, ws)) # 下采样图片缩小 im_data = (im_data - 127.5) / 128.0 # 图片归一化,255/2 ,归一化到 [-1,1],收敛更快 img_x = np.expand_dims(im_data, 0) # array 多套一层括号 img_y = np.transpose(img_x, (0, 2, 1, 3)) # out = pnet(img_y) # pnet的输出结果 out0 = np.transpose(out[0], (0, 2, 1, 3)) out1 = np.transpose(out[1], (0, 2, 1, 3)) # out0 是边框偏度(tx,ty,tw,th),out1 是是否有人脸 boxes, _ = generateBoundingBox(out1[0, :, :, 1].copy(), out0[0, :, :, :].copy(), scale, threshold[0]) # inter-scale nms pick = nms(boxes.copy(), 0.5, 'Union') if boxes.size > 0 and pick.size > 0: boxes = boxes[pick, :] total_boxes = np.append(total_boxes, boxes, axis=0) # 每种规模都会找出一些box,每种规模找出的满足阈值条件的box个数会有不同多个,不管是何种规模,都append到一起作为候选框 ,存放在 total里 [n_boxs, 9] numbox = total_boxes.shape[0] if numbox > 0: pick = nms(total_boxes.copy(), 0.7, 'Union') # ###提高阈值,进一步进行nms total_boxes = total_boxes[pick, :] # q1和q2是原始图片的左上右下坐标 (2:q1+2:q2+1:score+4:reg) >> [x1,y1,x2,y2,score,tx1,ty1,tx2,ty2] >> 9 regw = total_boxes[:, 2] - total_boxes[:, 0] regh = total_boxes[:, 3] - total_boxes[:, 1] qq1 = total_boxes[:, 0] + total_boxes[:, 5] * regw # 先做平移,在左上角原始坐标上,放缩tx倍的w宽度,得到原始图片上的坐标位置 qq2 = total_boxes[:, 1] + total_boxes[:, 6] * regh # 先做平移,在左上角原始坐标上,放缩ty倍的h高度,得到原始图片上的坐标位置 qq3 = total_boxes[:, 2] + total_boxes[:, 7] * regw # 先做平移,在右下角原始坐标上,放缩tx倍的w宽度,得到原始图片上的坐标位置 qq4 = total_boxes[:, 3] + total_boxes[:, 8] * regh # 先做平移,在右下角原始坐标上,放缩ty倍的h高度,得到原始图片上的坐标位置 total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:, 4]])) # 依次为平移后的左上角,右下角坐标及该部分得分 [n_boxs, 5] total_boxes = rerec(total_boxes.copy()) #平移左上角和右下角后,进行延伸,变成正方型 total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4]).astype(np.int32) # 修正后的坐标'向上取整,得到新的坐标点 dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h) # 对坐标进行修剪,使其不超出图片大小,返回[新bbox平移???], [新box下标] ,[bbox旧宽度,bbox旧高度] numbox = total_boxes.shape[0] if numbox > 0: # second stage tempimg = np.zeros((24, 24, 3, numbox)) # pnet的输出,先进行剪裁再下采样之后,存储在矩阵24*24*3*numbox中,用于输入到rnet for k in range(0, numbox): tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3)) # 候选框第一个图片 tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = img[y[k] - 1:ey[k], x[k] - 1:ex[k], :] #剪裁??? if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0: tempimg[:, :, :, k] = imresample(tmp, (24, 24)) # 下采样 else: return np.empty() tempimg = (tempimg - 127.5) / 128.0 tempimg1 = np.transpose(tempimg, (3, 1, 0, 2)) out = rnet(tempimg1) out0 = np.transpose(out[0]) # 回归预测框坐标偏置 out1 = np.transpose(out[1]) # 预测得分 score = out1[1, :] ipass = np.where(score > threshold[1]) # 筛选人脸高概率的像素点 total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)]) # 筛选人脸高概率的像素点对应的边框 mv = out0[:, ipass[0]] # 筛选人脸高概率的像素点对应的边框偏置 if total_boxes.shape[0] > 0: pick = nms(total_boxes, 0.7, 'Union') # 对rnet的结果进行nms筛选 total_boxes = total_boxes[pick, :] # 提取筛选结果 total_boxes = bbreg(total_boxes.copy(), np.transpose(mv[:, pick])) # 用偏置修正边框 total_boxes = rerec(total_boxes.copy()) numbox = total_boxes.shape[0] if numbox > 0: # third stage total_boxes = np.fix(total_boxes).astype(np.int32) # 将rnet的输出结果进行向下取整,得到候选框,[x1,y1,x2,y2,score] dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h) # 原始bbox窗大小 (956,956,3),但是x1=-1,超出了原始图片的下界0, tempimg = np.zeros((48, 48, 3, numbox)) for k in range(0, numbox): tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3)) # 原始bbox窗大小 (956,956,3),但是x1=-1,超出了原始图片的下界0, tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = img[y[k] - 1:ey[k], x[k] - 1:ex[k], :] # (956, 953, 3) if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0: tempimg[:, :, :, k] = imresample(tmp, (48, 48)) else: return np.empty() tempimg = (tempimg - 127.5) / 128.0 tempimg1 = np.transpose(tempimg, (3, 1, 0, 2)) out = onet(tempimg1) out0 = np.transpose(out[0]) # 偏置 out1 = np.transpose(out[1]) # 眼睛2,嘴角2,鼻子1,5点 * 2(x, y) ,总共10个数 out2 = np.transpose(out[2]) # 是否有脸的概率 score = out2[1, :] points = out1 ipass = np.where(score > threshold[2]) points = points[:, ipass[0]] # 5点的x和y坐标的偏置系数 顺序>> [x左眼,x右眼,x鼻子,x左嘴角,x右嘴角; y左眼,y右眼,y鼻子,y左嘴角,y右嘴角],“point[01234] * w”为 box坐标系中的点x坐标值,,“point[56789] * h”为 box坐标系中的5点y坐标值 total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)]) # 最终的高概率像素点对应的候选框--在原始图片上的坐标x1y1x2y2 mv = out0[:, ipass[0]] # 最终的高概率像素点对应的待修正偏移量 w = total_boxes[:, 2] - total_boxes[:, 0] + 1 h = total_boxes[:, 3] - total_boxes[:, 1] + 1 points[0:5, :] = np.tile(w, (5, 1)) * points[0:5, :] + np.tile(total_boxes[:, 0], (5, 1)) - 1 # 在原有box上五点mark5点的x坐标进行修正=“point[01234] * w”为 box坐标系中的点x坐标值 + 原始图片上box所在位置左上角x1的坐标值”,从而得到原始图片坐标系上5点的x坐标值 points[5:10, :] = np.tile(h, (5, 1)) * points[5:10, :] + np.tile(total_boxes[:, 1], (5, 1)) - 1 # 同上一行,x变成y就可以了;新的points里面存的是原始图片坐标系下 landmark坐标信息 [x左眼,x右眼,x鼻子,x左嘴角,x右嘴角; y左眼,y右眼,y鼻子,y左嘴角,y右嘴角] if total_boxes.shape[0] > 0: total_boxes = bbreg(total_boxes.copy(), np.transpose(mv)) # 用onet的4坐标值偏置修正边框 pick = nms(total_boxes.copy(), 0.7, 'Min') total_boxes = total_boxes[pick, :] points = points[:, pick] return total_boxes, points # 返回原始图片上坐标信息和人脸概率分数 [x1,y1,x2,y2,score],[x左眼,x右眼,x鼻子,x左嘴角,x右嘴角; y左眼,y右眼,y鼻子,y左嘴角,y右嘴角]
def detect_face(img, gts, imgpath, p_model): image_copy = cv2.imread(imgpath) factor = 0.709 minsize = 20 factor_count = 0 total_boxes = np.zeros((0, 9), np.float) h = img.shape[0] w = img.shape[1] minl = min(h, w) img = img.astype(float) m = 12.0 / minsize minl = minl * m # 0.6倍的宽一直到12 # create scale pyramid scales = [0.6] while minl >= 12: scales.append(m * pow(factor, factor_count)) minl *= factor factor_count += 1 for scale in scales: hs = int(np.ceil(h * scale)) ws = int(np.ceil(w * scale)) img_x = transform.resize(img, (hs, ws)) img_data = image_generalize(img_x) img_data = np.expand_dims(img_data, axis=0) img_data1 = img_data out = p_model.predict(img_data) cla = out[0][0, :, :, 0] bbox = out[1][0] #print(cla.shape) boxes = generateBoundingBox(cla, bbox, scale, 0.6) if boxes.shape[0] != 0: pick = nms(boxes, 0.5, 'Union') #print('pick:',pick) if len(pick) > 0: boxes = boxes[pick, :] if boxes.shape[0] != 0: total_boxes = np.concatenate((total_boxes, boxes), axis=0) numbox = total_boxes.shape[0] if numbox > 0: # nms pick = nms(total_boxes, 0.7, 'Union') if len(pick) > 0: total_boxes = total_boxes[pick, :] # revise and convert to square regh = total_boxes[:, 3] - total_boxes[:, 1] regw = total_boxes[:, 2] - total_boxes[:, 0] t1 = total_boxes[:, 0] + total_boxes[:, 5] * regw t2 = total_boxes[:, 1] + total_boxes[:, 6] * regh t3 = total_boxes[:, 2] + total_boxes[:, 7] * regw t4 = total_boxes[:, 3] + total_boxes[:, 8] * regh total_boxes = np.array([t1, t2, t3, t4]).T total_boxes = rerec(total_boxes) #print("[4]:",total_boxes.shape[0]) total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4]) #print("[4.5]:",total_boxes.shape[0]) [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = pad(total_boxes, w, h) #print( tmpw[11], tmph[11]) numbox = total_boxes.shape[0] if numbox > 0: # second stage # construct input for RNet tempimg = np.zeros((numbox, 24, 24, 3)) # (24, 24, 3, numbox) for k in range(numbox): if tmph[k] < 0 or tmpw[k] < 0 or int( x[k]) == int(ex[k]) + 1 or int(y[k]) == int(ey[k]) + 1: continue tmp = np.zeros((int(tmph[k]) + 1, int(tmpw[k]) + 1, 3)) try: tmp[int(dy[k]):int(edy[k]) + 1, int(dx[k]):int(edx[k]) + 1] = image_copy[int(y[k]):int(ey[k]) + 1, int(x[k]):int(ex[k]) + 1] resized_img = cv2.resize(tmp, (24, 24), interpolation=cv2.INTER_LINEAR) crop = [int(x[k]), int(y[k]), int(ex[k] + 1), int(ey[k] + 1)] size = int(ey[k]) + 1 - int(y[k]) + 1 Iou, index = IoU2(crop, gts) if (Iou >= 0.65): global p_idx offset_x1 = (gts[index][0] - crop[0]) / float(size) offset_y1 = (gts[index][1] - crop[1]) / float(size) offset_x2 = (gts[index][2] - crop[2]) / float(size) offset_y2 = (gts[index][3] - crop[3]) / float(size) save_file = os.path.join(pos_save_dir, "%s.jpg" % p_idx) f1.write(pos_save_dir + '/' + "%s" % p_idx + '.jpg' + ' 1 %f %f %f %f\n' % (offset_x1, offset_y1, offset_x2, offset_y2)) cv2.imwrite(save_file, resized_img) p_idx += 1 elif (Iou >= 0.4): global d_idx offset_x1 = (gts[index][0] - crop[0]) / float(size) offset_y1 = (gts[index][1] - crop[1]) / float(size) offset_x2 = (gts[index][2] - crop[2]) / float(size) offset_y2 = (gts[index][3] - crop[3]) / float(size) save_file = os.path.join(part_save_dir, "%s.jpg" % d_idx) f3.write(part_save_dir + '/' + "%s" % d_idx + '.jpg' + ' -1 %f %f %f %f\n' % (offset_x1, offset_y1, offset_x2, offset_y2)) cv2.imwrite(save_file, resized_img) d_idx += 1 elif (Iou < 0.3): global n_idx save_file = os.path.join(neg_save_dir, "%s.jpg" % n_idx) f2.write(neg_save_dir + '/' + "%s" % n_idx + '.jpg' + ' 0 -1 -1 -1 -1\n') cv2.imwrite(save_file, resized_img) n_idx += 1 except: continue global total_idx total_idx += 1 if total_idx % 50 == 0: print('total:', total_idx) print('positive:', p_idx) print('negative:', n_idx) print('part:', d_idx)
def fixp(self, y, scaling='mult'): """ Return fixed-point integer or fractional representation for `y` (scalar or array-like) with the same shape as `y`. Saturation / two's complement wrapping happens outside the range +/- MSB, requantization (round, floor, fix, ...) is applied on the ratio `y / LSB`. Parameters ---------- y: scalar or array-like object input value (floating point format) to be quantized scaling: String Determine the scaling before and after quantizing / saturation *'mult'* float in, int out: `y` is multiplied by `self.scale` *before* quantizing / saturating **'div'**: int in, float out: `y` is divided by `self.scale` *after* quantizing / saturating. **'multdiv'**: float in, float out (default): both of the above For all other settings, `y` is transformed unscaled. Returns ------- float scalar or ndarray with the same shape as `y`, in the range `-2*self.MSB` ... `2*self.MSB-self.LSB` Examples: --------- >>> q_obj_a = {'WI':1, 'WF':6, 'ovfl':'sat', 'quant':'round'} >>> myQa = Fixed(q_obj_a) # instantiate fixed-point object myQa >>> myQa.resetN() # reset overflow counter >>> a = np.arange(0,5, 0.05) # create input signal >>> aq = myQa.fixed(a) # quantize input signal >>> plt.plot(a, aq) # plot quantized vs. original signal >>> print(myQa.N_over, "overflows!") # print number of overflows >>> # Convert output to same format as input: >>> b = np.arange(200, dtype = np.int16) >>> btype = np.result_type(b) >>> # MSB = 2**7, LSB = 2**2: >>> q_obj_b = {'WI':7, 'WF':-2, 'ovfl':'wrap', 'quant':'round'} >>> myQb = Fixed(q_obj_b) # instantiate fixed-point object myQb >>> bq = myQb.fixed(b) >>> bq = bq.astype(btype) # restore original variable type """ #====================================================================== # (1) : Convert input argument into proper floating point scalars / # arrays and initialize flags #====================================================================== if np.shape(y): # create empty arrays for result and overflows with same shape as y # for speedup, test for invalid types SCALAR = False y = np.asarray(y) # convert lists / tuples / ... to numpy arrays yq = np.zeros(y.shape) over_pos = over_neg = np.zeros(y.shape, dtype=bool) self.ovr_flag = np.zeros(y.shape, dtype=int) if np.issubdtype(y.dtype, np.number): pass elif y.dtype.kind in {'U', 'S'}: # string or unicode try: y = y.astype(np.float64) # try to convert to float except (TypeError, ValueError): try: np.char.replace(y, ' ', '') # remove all whitespace y = y.astype(complex) # try to convert to complex except (TypeError, ValueError ) as e: # try converting elements recursively y = list( map( lambda y_scalar: self.fixp(y_scalar, scaling=scaling), y)) else: logger.error("Argument '{0}' is of type '{1}',\n" "cannot convert to float.".format(y, y.dtype)) y = np.zeros(y.shape) else: SCALAR = True # get rid of errors that have occurred upstream if y is None or str(y) == "": y = 0 # If y is not a number, convert to string, remove whitespace and convert # to complex format: elif not np.issubdtype(type(y), np.number): y = qstr(y) y = y.replace(' ', '') # remove all whitespace try: y = float(y) except (TypeError, ValueError): try: y = complex(y) except (TypeError, ValueError) as e: logger.error("Argument '{0}' yields \n {1}".format( y, e)) y = 0.0 over_pos = over_neg = yq = 0 self.ovr_flag = 0 # convert pseudo-complex (imag = 0) and complex values to real y = np.real_if_close(y) if np.iscomplexobj(y): logger.warning( "Casting complex values to real before quantization!") # quantizing complex objects is not supported yet y = y.real scaling = scaling.lower() y_in = y # y before scaling / quantizing #====================================================================== # (2) : Multiply by `scale` factor before requantization and saturation # when `scaling=='mult'`or 'multdiv' #====================================================================== y = y / self.LSB if scaling in {'mult', 'multdiv'}: y = y * self.scale #====================================================================== # (3) : Divide by LSB and apply selected quantization method to convert # floating point inputs to "fixpoint integers" arrays # Next, multiply by LSB to restore original scale #===================================================================== if self.quant == 'floor': yq = np.floor(y) # largest integer i, such that i <= x (= binary truncation) elif self.quant == 'round': yq = np.round(y) # rounding, also = binary rounding elif self.quant == 'fix': yq = np.fix(y) # round to nearest integer towards zero ("Betragsschneiden") elif self.quant == 'ceil': yq = np.ceil(y) # smallest integer i, such that i >= x elif self.quant == 'rint': yq = np.rint(y) # round towards nearest int elif self.quant == 'none': yq = y # return unquantized value else: raise Exception('Unknown Requantization type "%s"!' % (self.quant)) yq = yq * self.LSB logger.debug("y_in={0} | y={1} | yq={2}".format(y_in, y, yq)) #====================================================================== # (4) : Handle Overflow / saturation in relation to MSB #===================================================================== if self.ovfl == 'none': pass else: # Bool. vectors with '1' for every neg./pos overflow: over_neg = (yq < self.MIN) over_pos = (yq > self.MAX) # create flag / array of flags for pos. / neg. overflows self.ovr_flag = over_pos.astype(int) - over_neg.astype(int) # No. of pos. / neg. / all overflows occured since last reset: self.N_over_neg += np.sum(over_neg) self.N_over_pos += np.sum(over_pos) self.N_over = self.N_over_neg + self.N_over_pos # Replace overflows with Min/Max-Values (saturation): if self.ovfl == 'sat': yq = np.where(over_pos, self.MAX, yq) # (cond, true, false) yq = np.where(over_neg, self.MIN, yq) # Replace overflows by two's complement wraparound (wrap) elif self.ovfl == 'wrap': yq = np.where( over_pos | over_neg, yq - 4. * self.MSB * np.fix( (np.sign(yq) * 2 * self.MSB + yq) / (4 * self.MSB)), yq) else: raise Exception('Unknown overflow type "%s"!' % (self.ovfl)) return None #====================================================================== # (5) : Divide result by `scale` factor when `scaling=='div'`or 'multdiv' # - frmt2float() # - input_coeffs when quantizing the coefficients # float2frmt passes on the scaling argument #====================================================================== if scaling in {'div', 'multdiv'}: yq = yq / self.scale if SCALAR and isinstance(yq, np.ndarray): yq = yq.item() # convert singleton array to scalar return yq
def infomax(data, weights=None, l_rate=None, block=None, w_change=1e-12, anneal_deg=60., anneal_step=0.9, extended=True, n_subgauss=1, kurt_size=6000, ext_blocks=1, max_iter=200, random_state=None, blowup=1e4, blowup_fac=0.5, n_small_angle=20, use_bias=True, verbose=None): """Run (extended) Infomax ICA decomposition on raw data. Parameters ---------- data : np.ndarray, shape (n_samples, n_features) The whitened data to unmix. weights : np.ndarray, shape (n_features, n_features) The initialized unmixing matrix. Defaults to None, which means the identity matrix is used. l_rate : float This quantity indicates the relative size of the change in weights. Defaults to ``0.01 / log(n_features ** 2)``. .. note:: Smaller learning rates will slow down the ICA procedure. block : int The block size of randomly chosen data segments. Defaults to floor(sqrt(n_times / 3.)). w_change : float The change at which to stop iteration. Defaults to 1e-12. anneal_deg : float The angle (in degrees) at which the learning rate will be reduced. Defaults to 60.0. anneal_step : float The factor by which the learning rate will be reduced once ``anneal_deg`` is exceeded: ``l_rate *= anneal_step.`` Defaults to 0.9. extended : bool Whether to use the extended Infomax algorithm or not. Defaults to True. n_subgauss : int The number of subgaussian components. Only considered for extended Infomax. Defaults to 1. kurt_size : int The window size for kurtosis estimation. Only considered for extended Infomax. Defaults to 6000. ext_blocks : int Only considered for extended Infomax. If positive, denotes the number of blocks after which to recompute the kurtosis, which is used to estimate the signs of the sources. In this case, the number of sub-gaussian sources is automatically determined. If negative, the number of sub-gaussian sources to be used is fixed and equal to n_subgauss. In this case, the kurtosis is not estimated. Defaults to 1. max_iter : int The maximum number of iterations. Defaults to 200. %(random_state)s blowup : float The maximum difference allowed between two successive estimations of the unmixing matrix. Defaults to 10000. blowup_fac : float The factor by which the learning rate will be reduced if the difference between two successive estimations of the unmixing matrix exceededs ``blowup``: ``l_rate *= blowup_fac``. Defaults to 0.5. n_small_angle : int | None The maximum number of allowed steps in which the angle between two successive estimations of the unmixing matrix is less than ``anneal_deg``. If None, this parameter is not taken into account to stop the iterations. Defaults to 20. use_bias : bool This quantity indicates if the bias should be computed. Defaults to True. %(verbose)s Returns ------- unmixing_matrix : np.ndarray, shape (n_features, n_features) The linear unmixing operator. References ---------- .. [1] A. J. Bell, T. J. Sejnowski. An information-maximization approach to blind separation and blind deconvolution. Neural Computation, 7(6), 1129-1159, 1995. .. [2] T. W. Lee, M. Girolami, T. J. Sejnowski. Independent component analysis using an extended infomax algorithm for mixed subgaussian and supergaussian sources. Neural Computation, 11(2), 417-441, 1999. """ from scipy.stats import kurtosis rng = check_random_state(random_state) # define some default parameters max_weight = 1e8 restart_fac = 0.9 min_l_rate = 1e-10 degconst = 180.0 / np.pi # for extended Infomax extmomentum = 0.5 signsbias = 0.02 signcount_threshold = 25 signcount_step = 2 # check data shape n_samples, n_features = data.shape n_features_square = n_features**2 # check input parameters # heuristic default - may need adjustment for large or tiny data sets if l_rate is None: l_rate = 0.01 / math.log(n_features**2.0) if block is None: block = int(math.floor(math.sqrt(n_samples / 3.0))) logger.info('Computing%sInfomax ICA' % ' Extended ' if extended else ' ') # collect parameters nblock = n_samples // block lastt = (nblock - 1) * block + 1 # initialize training if weights is None: weights = np.identity(n_features, dtype=np.float64) else: weights = weights.T BI = block * np.identity(n_features, dtype=np.float64) bias = np.zeros((n_features, 1), dtype=np.float64) onesrow = np.ones((1, block), dtype=np.float64) startweights = weights.copy() oldweights = startweights.copy() step = 0 count_small_angle = 0 wts_blowup = False blockno = 0 signcount = 0 initial_ext_blocks = ext_blocks # save the initial value in case of reset # for extended Infomax if extended: signs = np.ones(n_features) for k in range(n_subgauss): signs[k] = -1 kurt_size = min(kurt_size, n_samples) old_kurt = np.zeros(n_features, dtype=np.float64) oldsigns = np.zeros(n_features) # trainings loop olddelta, oldchange = 1., 0. while step < max_iter: # shuffle data at each step permute = random_permutation(n_samples, rng) # ICA training block # loop across block samples for t in range(0, lastt, block): u = np.dot(data[permute[t:t + block], :], weights) u += np.dot(bias, onesrow).T if extended: # extended ICA update y = np.tanh(u) weights += l_rate * np.dot( weights, BI - signs[None, :] * np.dot(u.T, y) - np.dot(u.T, u)) if use_bias: bias += l_rate * np.reshape( np.sum(y, axis=0, dtype=np.float64) * -2.0, (n_features, 1)) else: # logistic ICA weights update y = 1.0 / (1.0 + np.exp(-u)) weights += l_rate * np.dot(weights, BI + np.dot(u.T, (1.0 - 2.0 * y))) if use_bias: bias += l_rate * np.reshape( np.sum((1.0 - 2.0 * y), axis=0, dtype=np.float64), (n_features, 1)) # check change limit max_weight_val = np.max(np.abs(weights)) if max_weight_val > max_weight: wts_blowup = True blockno += 1 if wts_blowup: break # ICA kurtosis estimation if extended: if ext_blocks > 0 and blockno % ext_blocks == 0: if kurt_size < n_samples: rp = np.floor( rng.uniform(0, 1, kurt_size) * (n_samples - 1)) tpartact = np.dot(data[rp.astype(int), :], weights).T else: tpartact = np.dot(data, weights).T # estimate kurtosis kurt = kurtosis(tpartact, axis=1, fisher=True) if extmomentum != 0: kurt = (extmomentum * old_kurt + (1.0 - extmomentum) * kurt) old_kurt = kurt # estimate weighted signs signs = np.sign(kurt + signsbias) ndiff = (signs - oldsigns != 0).sum() if ndiff == 0: signcount += 1 else: signcount = 0 oldsigns = signs if signcount >= signcount_threshold: ext_blocks = np.fix(ext_blocks * signcount_step) signcount = 0 # here we continue after the for loop over the ICA training blocks # if weights in bounds: if not wts_blowup: oldwtchange = weights - oldweights step += 1 angledelta = 0.0 delta = oldwtchange.reshape(1, n_features_square) change = np.sum(delta * delta, dtype=np.float64) if step > 2: angledelta = math.acos( np.sum(delta * olddelta) / math.sqrt(change * oldchange)) angledelta *= degconst if verbose: logger.info( 'step %d - lrate %5f, wchange %8.8f, angledelta %4.1f deg' % (step, l_rate, change, angledelta)) # anneal learning rate oldweights = weights.copy() if angledelta > anneal_deg: l_rate *= anneal_step # anneal learning rate # accumulate angledelta until anneal_deg reaches l_rate olddelta = delta oldchange = change count_small_angle = 0 # reset count when angledelta is large else: if step == 1: # on first step only olddelta = delta # initialize oldchange = change if n_small_angle is not None: count_small_angle += 1 if count_small_angle > n_small_angle: max_iter = step # apply stopping rule if step > 2 and change < w_change: step = max_iter elif change > blowup: l_rate *= blowup_fac # restart if weights blow up (for lowering l_rate) else: step = 0 # start again wts_blowup = 0 # re-initialize variables blockno = 1 l_rate *= restart_fac # with lower learning rate weights = startweights.copy() oldweights = startweights.copy() olddelta = np.zeros((1, n_features_square), dtype=np.float64) bias = np.zeros((n_features, 1), dtype=np.float64) ext_blocks = initial_ext_blocks # for extended Infomax if extended: signs = np.ones(n_features) for k in range(n_subgauss): signs[k] = -1 oldsigns = np.zeros(n_features) if l_rate > min_l_rate: if verbose: logger.info('... lowering learning rate to %g' '\n... re-starting...' % l_rate) else: raise ValueError('Error in Infomax ICA: unmixing_matrix matrix' 'might not be invertible!') # prepare return values return weights.T
def clean_iter(tim, freq, vis, imageprefix, ncpu, twidth, doreg, ephemfile, ephem, msinfofile, outlierfile, field, spw, selectdata, uvrange, antenna, scan, observation, intent, mode, resmooth,gridmode, wprojplanes, facets, cfcache, rotpainc, painc, aterm, psterm, mterm, wbawp, conjbeams, epjtable, interpolation, niter, gain, threshold, psfmode, imagermode, ftmachine, mosweight, scaletype, multiscale, negcomponent, smallscalebias, interactive, mask, nchan, start, width, outframe, veltype, imsize, cell, phasecenter, restfreq, stokes, weighting, robust, uvtaper, outertaper, innertaper, modelimage, restoringbeam, pbcor, minpb, usescratch, noise, npixels, npercycle, cyclefactor, cyclespeedup, nterms, reffreq, chaniter, flatnoise, allowchunk, btidx): from taskinit import ms from taskinit import qa #from __casac__.quanta import quanta as qa from __main__ import default, inp from clean import clean bt = btidx #0 if bt+twidth < len(tim)-1: et = btidx+twidth-1 else: et = len(tim)-1 tim_d = tim/3600./24.-np.fix(tim/3600./24.) if bt == 0: bt_d=tim_d[bt]-((tim_d[bt+1]-tim_d[bt])/2) else: bt_d=tim_d[bt]-((tim_d[bt]-tim_d[bt-1])/2) if et == (len(tim)-1) or et == -1: et_d=tim_d[et]+((tim_d[et]-tim_d[et-1])/2) else: et_d=tim_d[et]+((tim_d[et+1]-tim_d[et])/2) timerange = qa.time(qa.quantity(bt_d,'d'),prec=9)[0] + '~' + \ qa.time(qa.quantity(et_d,'d'),prec=9)[0] tmid = (bt_d + et_d)/2. btstr=qa.time(qa.quantity(bt_d,'d'),prec=9,form='fits')[0] print 'cleaning timerange: ' + timerange image0=btstr.replace(':','').replace('-','') imname=imageprefix+image0 if not os.path.exists(imname): #inp(taskname = 'clean') clean(vis=vis,imagename=imname,outlierfile=outlierfile,field=field, spw=spw,selectdata=selectdata,timerange=timerange,uvrange=uvrange, antenna=antenna,scan=scan, observation=str(observation),intent=intent, mode=mode, resmooth=resmooth, gridmode=gridmode, wprojplanes=wprojplanes,facets=facets,cfcache=cfcache,rotpainc=rotpainc, painc=painc, psterm=psterm,aterm=aterm,mterm=mterm,wbawp=wbawp,conjbeams=conjbeams, epjtable=epjtable,interpolation=interpolation,niter=niter, gain=gain, threshold=threshold,psfmode=psfmode,imagermode=imagermode, ftmachine=ftmachine,mosweight=mosweight,scaletype=scaletype, multiscale=multiscale,negcomponent=negcomponent, smallscalebias=smallscalebias,interactive=interactive, mask=mask,nchan=nchan,start=start,width=width,outframe=outframe, veltype=veltype,imsize=imsize,cell=cell,phasecenter=phasecenter, restfreq=restfreq,stokes=stokes,weighting=weighting, robust=robust,uvtaper=uvtaper,outertaper=outertaper, innertaper=innertaper,modelimage=modelimage, restoringbeam=restoringbeam,pbcor=pbcor,minpb=minpb, usescratch=usescratch,noise=noise,npixels=npixels,npercycle=npercycle, cyclefactor=cyclefactor,cyclespeedup=cyclespeedup,nterms=nterms, reffreq=reffreq,chaniter=chaniter,flatnoise=flatnoise, allowchunk=False) clnjunks=['.flux','.mask','.model','.psf','.residual'] for clnjunk in clnjunks: if os.path.exists(imname+clnjunk): shutil.rmtree(imname+clnjunk) if doreg: # check if ephemfile and msinfofile exist if not ephem: print("ephemeris info does not exist!") return reftime = [timerange] helio=vla_prep.ephem_to_helio(msinfo = msinfofile, ephem = ephem, reftime = reftime) imagefile=[imname+'.image'] fitsfile=[imname+'.fits'] vla_prep.imreg(imagefile = imagefile, fitsfile = fitsfile, helio = helio, toTb = False, scl100 = True) return
def Find_wav_kurt(x, h, g, h1, h2, h3, nlevel, Sc, Fr, Fs=1, verbose=False): """ TODO flesh out this doc-string J. Antoni : 12/2004 Translation to Python: T. Lecocq 02/2012 :param x: signal :param h: lowpass filter :param g: highpass filter :param h1: filter parameter returned by get_h_parameters :param h2: filter parameter returned by get_h_parameters :param h3: filter parameter returned by get_h_parameters :param nlevel: number of decomposition levels :param Sc: Sc = -log2(Bw)-1 with Bw the bandwidth of the filter :param Fr: in the range [0, 0.5] :param Fs: Sampling frequency of signal x :param verbose: If ``True`` outputs debugging information :type x: numpy array :type h: numpy array :type g: numpy array :type h1: numpy array :type h2: numpy array :type h3: numpy array :type nlevel: integer :type Fr: float :type: Fs: integer :returns: c, s, threshold, Bw, fc """ level = np.fix((Sc)) + ((Sc % 1) >= 0.5) * (np.log2(3) - 1) Bw = 2**(-level - 1) freq_w = np.arange(0, 2**level) / 2**(level + 1) + Bw / 2. J = np.argmin(np.abs(freq_w - Fr)) fc = freq_w[J] i = int(np.round(fc / Bw - 1. / 2)) if level % 1 == 0: acoeff = binary(i, int(level)) bcoeff = [] temp_level = level else: i2 = int(np.fix((i / 3.))) temp_level = np.fix((level)) - 1 acoeff = binary(i2, int(temp_level)) bcoeff = i - i2 * 3 acoeff = acoeff[::-1] c = K_wpQ_filt(x, h, g, h1, h2, h3, acoeff, bcoeff, temp_level) t = np.arange(len(x)) / float(Fs) tc = np.linspace(t[0], t[-1], len(c)) s = np.real(c * np.exp(2j * np.pi * fc * Fs * tc)) sig = np.median(np.abs(c)) / np.sqrt(np.pi / 2.) threshold = sig * raylinv(np.array([ .999, ]), np.array([ 1, ])) return c, s, threshold, Bw, fc
def LinearRegression_KFold(Subjects_Data, Subjects_Score, Fold_Quantity, ResultantFolder): if not os.path.exists(ResultantFolder): os.mkdir(ResultantFolder) Subjects_Quantity = len(Subjects_Score) # Sort the subjects score Sorted_Index = np.argsort(Subjects_Score) Subjects_Data = Subjects_Data[Sorted_Index, :] Subjects_Score = Subjects_Score[Sorted_Index] EachFold_Size = np.int(np.fix(np.divide(Subjects_Quantity, Fold_Quantity))) MaxSize = EachFold_Size * Fold_Quantity EachFold_Max = np.ones(Fold_Quantity, np.int) * MaxSize tmp = np.arange(Fold_Quantity - 1, -1, -1) EachFold_Max = EachFold_Max - tmp Remain = np.mod(Subjects_Quantity, Fold_Quantity) for j in np.arange(Remain): EachFold_Max[j] = EachFold_Max[j] + Fold_Quantity Fold_Corr = [] Fold_MAE = [] Fold_Weight = [] for j in np.arange(Fold_Quantity): Fold_J_Index = np.arange(j, EachFold_Max[j], Fold_Quantity) Subjects_Data_test = Subjects_Data[Fold_J_Index, :] Subjects_Score_test = Subjects_Score[Fold_J_Index] Subjects_Data_train = np.delete(Subjects_Data, Fold_J_Index, axis=0) Subjects_Score_train = np.delete(Subjects_Score, Fold_J_Index) normalize = preprocessing.MinMaxScaler() Subjects_Data_train = normalize.fit_transform(Subjects_Data_train) Subjects_Data_test = normalize.transform(Subjects_Data_test) clf = linear_model.LinearRegression() clf.fit(Subjects_Data_train, Subjects_Score_train) Fold_J_Score = clf.predict(Subjects_Data_test) Fold_J_Corr = np.corrcoef(Fold_J_Score, Subjects_Score_test) Fold_J_Corr = Fold_J_Corr[0, 1] Fold_Corr.append(Fold_J_Corr) Fold_J_MAE = np.mean( np.abs(np.subtract(Fold_J_Score, Subjects_Score_test))) Fold_MAE.append(Fold_J_MAE) Fold_Weight.append(clf.coef_) Fold_J_result = { 'Index': Fold_J_Index, 'Test_Score': Subjects_Score_test, 'Predict_Score': Fold_J_Score, 'Weight': clf.coef_, 'Corr': Fold_J_Corr, 'MAE': Fold_J_MAE } Fold_J_FileName = 'Fold_' + str(j) + '_Score.mat' ResultantFile = os.path.join(ResultantFolder, Fold_J_FileName) sio.savemat(ResultantFile, Fold_J_result) Fold_Corr = [0 if np.isnan(x) else x for x in Fold_Corr] Mean_Corr = np.mean(Fold_Corr) Mean_MAE = np.mean(Fold_MAE) Weight_Sum = np.transpose([0] * len(clf.coef_)) Frequency = np.transpose([0] * len(clf.coef_)) for j in np.arange(Fold_Quantity): mask = np.transpose([int(tmp > 0) for tmp in Fold_Weight[j]]) Frequency = Frequency + mask Weight_Sum = Weight_Sum + Fold_Weight[j] Weight_Average = np.divide(Weight_Sum, Frequency) Weight_Average = np.nan_to_num(Weight_Average) Res_NFold = { 'Mean_Corr': Mean_Corr, 'Mean_MAE': Mean_MAE, 'Weight_Avg': Weight_Average, 'Frequency': Frequency } ResultantFile = os.path.join(ResultantFolder, 'Res_NFold.mat') sio.savemat(ResultantFile, Res_NFold) return
def coldfilt(X, ha, hb): """Filter the columns of image X using the two filters ha and hb = reverse(ha). ha operates on the odd samples of X and hb on the even samples. Both filters should be even length, and h should be approx linear phase with a quarter sample advance from its mid pt (i.e. :math:`|h(m/2)| > |h(m/2 + 1)|`). .. code-block:: text ext top edge bottom edge ext Level 1: ! | ! | ! odd filt on . b b b b a a a a a a a a b b b b odd filt on . a a a a b b b b b b b b a a a a Level 2: ! | ! | ! +q filt on x b b a a a a b b -q filt on o a a b b b b a a The output is decimated by two from the input sample rate and the results from the two filters, Ya and Yb, are interleaved to give Y. Symmetric extension with repeated end samples is used on the composite X columns before each filter is applied. Raises ValueError if the number of rows in X is not a multiple of 4, the length of ha does not match hb or the lengths of ha or hb are non-even. .. codeauthor:: Rich Wareham <*****@*****.**>, August 2013 .. codeauthor:: Cian Shaffrey, Cambridge University, August 2000 .. codeauthor:: Nick Kingsbury, Cambridge University, August 2000 """ # Make sure all inputs are arrays X = asfarray(X) ha = asfarray(ha) hb = asfarray(hb) r, c = X.shape if r % 4 != 0: raise ValueError('No. of rows in X must be a multiple of 4') if ha.shape != hb.shape: raise ValueError('Shapes of ha and hb must be the same') if ha.shape[0] % 2 != 0: raise ValueError('Lengths of ha and hb must be even') m = ha.shape[0] m2 = np.fix(m * 0.5) # Set up vector for symmetric extension of X with repeated end samples. xe = reflect(np.arange(-m, r + m), -0.5, r - 0.5) # Select odd and even samples from ha and hb. Note that due to 0-indexing # 'odd' and 'even' are not perhaps what you might expect them to be. hao = as_column_vector(ha[0:m:2]) hae = as_column_vector(ha[1:m:2]) hbo = as_column_vector(hb[0:m:2]) hbe = as_column_vector(hb[1:m:2]) t = np.arange(5, r + 2 * m - 2, 4) r2 = r // 2 Y = np.zeros((r2, c), dtype=X.dtype) if np.sum(ha * hb) > 0: s1 = slice(0, r2, 2) s2 = slice(1, r2, 2) else: s2 = slice(0, r2, 2) s1 = slice(1, r2, 2) # Perform filtering on columns of extended matrix X(xe,:) in 4 ways. Y[s1, :] = _column_convolve(X[xe[t - 1], :], hao) + _column_convolve( X[xe[t - 3], :], hae) Y[s2, :] = _column_convolve(X[xe[t], :], hbo) + _column_convolve( X[xe[t - 2], :], hbe) return Y
def detect_face(img, minsize, PNet, RNet, ONet, threshold, fastresize, factor): img2 = img.copy() factor_count = 0 total_boxes = np.zeros((0, 9), np.float) points = [] h = img.shape[0] w = img.shape[1] minl = min(h, w) img = img.astype(float) m = 12.0 / minsize minl = minl * m #total_boxes = np.load('total_boxes.npy') #total_boxes = np.load('total_boxes_242.npy') #total_boxes = np.load('total_boxes_101.npy') # create scale pyramid scales = [] while minl >= 12: scales.append(m * pow(factor, factor_count)) minl *= factor factor_count += 1 # first stage for scale in scales: hs = int(np.ceil(h * scale)) ws = int(np.ceil(w * scale)) if fastresize: im_data = (img - 127.5) * 0.0078125 # [0,255] -> [-1,1] im_data = cv2.resize(im_data, (ws, hs)) # default is bilinear else: im_data = cv2.resize(img, (ws, hs)) # default is bilinear im_data = (im_data - 127.5) * 0.0078125 # [0,255] -> [-1,1] #im_data = imResample(img, hs, ws); print "scale:", scale im_data = np.swapaxes(im_data, 0, 2) im_data = np.array([im_data], dtype=np.float) PNet.blobs['data'].reshape(1, 3, ws, hs) PNet.blobs['data'].data[...] = im_data out = PNet.forward() boxes = generateBoundingBox(out['prob1'][0, 1, :, :], out['conv4-2'][0], scale, threshold[0]) if boxes.shape[0] != 0: #print boxes[4:9] #print 'im_data', im_data[0:5, 0:5, 0], '\n' #print 'prob1', out['prob1'][0,0,0:3,0:3] pick = nms(boxes, 0.5, 'Union') if len(pick) > 0: boxes = boxes[pick, :] if boxes.shape[0] != 0: total_boxes = np.concatenate((total_boxes, boxes), axis=0) #np.save('total_boxes_101.npy', total_boxes) ##### # 1 # ##### print("Pnet boxes:", total_boxes.shape[0]) #print total_boxes #return total_boxes, [] numbox = total_boxes.shape[0] if numbox > 0: # nms pick = nms(total_boxes, 0.7, 'Union') total_boxes = total_boxes[pick, :] #print("[2]:",total_boxes.shape[0]) # revise and convert to square regh = total_boxes[:, 3] - total_boxes[:, 1] regw = total_boxes[:, 2] - total_boxes[:, 0] t1 = total_boxes[:, 0] + total_boxes[:, 5] * regw t2 = total_boxes[:, 1] + total_boxes[:, 6] * regh t3 = total_boxes[:, 2] + total_boxes[:, 7] * regw t4 = total_boxes[:, 3] + total_boxes[:, 8] * regh t5 = total_boxes[:, 4] total_boxes = np.array([t1, t2, t3, t4, t5]).T #print "[3]:",total_boxes.shape[0] #print regh #print regw #print 't1',t1 #print total_boxes total_boxes = rerec(total_boxes) # convert box to square #print("[4]:",total_boxes.shape[0]) total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4]) #print("[4.5]:",total_boxes.shape[0]) #print total_boxes [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = pad(total_boxes, w, h) #print total_boxes.shape #print total_boxes numbox = total_boxes.shape[0] if numbox > 0: # second stage #print 'tmph', tmph #print 'tmpw', tmpw #print "y,ey,x,ex", y, ey, x, ex, #print "edy", edy #tempimg = np.load('tempimg.npy') # construct input for RNet tempimg = np.zeros((numbox, 24, 24, 3)) # (24, 24, 3, numbox) for k in range(numbox): tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3)) #print "dx[k], edx[k]:", dx[k], edx[k] #print "dy[k], edy[k]:", dy[k], edy[k] #print "img.shape", img[y[k]:ey[k]+1, x[k]:ex[k]+1].shape #print "tmp.shape", tmp[dy[k]:edy[k]+1, dx[k]:edx[k]+1].shape tmp[int(dy[k]):int(edy[k] + 1), int(dx[k]):int(edx[k] + 1)] = img[int(y[k]):int(ey[k] + 1), int(x[k]):int(ex[k] + 1)] #print "y,ey,x,ex", y[k], ey[k], x[k], ex[k] #print "tmp", tmp.shape tempimg[k, :, :, :] = cv2.resize(tmp, (24, 24)) #tempimg[k,:,:,:] = imResample(tmp, 24, 24) #print 'tempimg', tempimg[k,:,:,:].shape #print tempimg[k,0:5,0:5,0] #print tempimg[k,0:5,0:5,1] #print tempimg[k,0:5,0:5,2] #print k #print tempimg.shape #print tempimg[0,0,0,:] tempimg = ( tempimg - 127.5) * 0.0078125 # done in imResample function wrapped by python #np.save('tempimg.npy', tempimg) # RNet tempimg = np.swapaxes(tempimg, 1, 3) #print tempimg[0,:,0,0] RNet.blobs['data'].reshape(numbox, 3, 24, 24) RNet.blobs['data'].data[...] = tempimg out = RNet.forward() #print out['conv5-2'].shape #print out['prob1'].shape score = out['prob1'][:, 1] #print 'score', score pass_t = np.where(score > threshold[1])[0] #print 'pass_t', pass_t score = np.array([score[pass_t]]).T total_boxes = np.concatenate((total_boxes[pass_t, 0:4], score), axis=1) #print("[5]:",total_boxes.shape[0]) #print total_boxes #print "1.5:",total_boxes.shape mv = out['conv5-2'][pass_t, :].T #print "mv", mv if total_boxes.shape[0] > 0: pick = nms(total_boxes, 0.7, 'Union') #print 'pick', pick if len(pick) > 0: total_boxes = total_boxes[pick, :] #print("[6]:",total_boxes.shape[0]) total_boxes = bbreg(total_boxes, mv[:, pick]) #print("[7]:",total_boxes.shape[0]) total_boxes = rerec(total_boxes) #print("[8]:",total_boxes.shape[0]) ##### # 2 # ##### #print("2:",total_boxes.shape) numbox = total_boxes.shape[0] if numbox > 0: # third stage total_boxes = np.fix(total_boxes) [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = pad(total_boxes, w, h) #print 'tmpw', tmpw #print 'tmph', tmph #print 'y ', y #print 'ey', ey #print 'x ', x #print 'ex', ex tempimg = np.zeros((numbox, 48, 48, 3)) for k in range(numbox): tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3)) tmp[int(dy[k]):int(edy[k] + 1), int(dx[k]):int(edx[k] + 1)] = img[int(y[k]):int(ey[k] + 1), int(x[k]):int(ex[k] + 1)] tempimg[k, :, :, :] = cv2.resize(tmp, (48, 48)) tempimg = (tempimg - 127.5) * 0.0078125 # [0,255] -> [-1,1] # ONet tempimg = np.swapaxes(tempimg, 1, 3) ONet.blobs['data'].reshape(numbox, 3, 48, 48) ONet.blobs['data'].data[...] = tempimg out = ONet.forward() score = out['prob1'][:, 1] points = out['conv6-3'] pass_t = np.where(score > threshold[2])[0] points = points[pass_t, :] score = np.array([score[pass_t]]).T total_boxes = np.concatenate((total_boxes[pass_t, 0:4], score), axis=1) #print("[9]:",total_boxes.shape[0]) mv = out['conv6-2'][pass_t, :].T w = total_boxes[:, 3] - total_boxes[:, 1] + 1 h = total_boxes[:, 2] - total_boxes[:, 0] + 1 points[:, 0:5] = np.tile(w, (5, 1)).T * points[:, 0:5] + np.tile( total_boxes[:, 0], (5, 1)).T - 1 points[:, 5:10] = np.tile(h, (5, 1)).T * points[:, 5:10] + np.tile( total_boxes[:, 1], (5, 1)).T - 1 if total_boxes.shape[0] > 0: total_boxes = bbreg(total_boxes, mv[:, :]) #print("[10]:",total_boxes.shape[0]) pick = nms(total_boxes, 0.7, 'Min') #print pick if len(pick) > 0: total_boxes = total_boxes[pick, :] #print("[11]:",total_boxes.shape[0]) points = points[pick, :] ##### # 3 # ##### #print("3:",total_boxes.shape) return total_boxes, points
def detect_face(img, minsize, pnet, rnet, onet, threshold, factor): # im: input image # minsize: minimum of faces' size # pnet, rnet, onet: caffemodel # threshold: threshold=[th1 th2 th3], th1-3 are three steps's threshold # fastresize: resize img from last scale (using in high-resolution images) if fastresize==true factor_count = 0 total_boxes = np.empty((0, 9)) points = np.empty(0) h = img.shape[0] w = img.shape[1] minl = np.amin([h, w]) m = 12.0 / minsize minl = minl * m #print('... factor_count ',factor_count,' | total_boxes ',total_boxes,' | points ',points,' \n... h ',h,' w ',w,' | minl ',minl,' m ',m) # creat scale pyramid scales = [] while minl >= 12: scales += [m * np.power(factor, factor_count)] minl = minl * factor factor_count += 1 #print('minl ',minl,' | factor_count ',factor_count) # print('scales : ',scales, ' | factor_count : ',factor_count) # first stage for j in range(len(scales)): scale = scales[j] hs = int(np.ceil(h * scale)) ws = int(np.ceil(w * scale)) im_data = imresample(img, (hs, ws)) im_data = (im_data - 127.5) * 0.0078125 img_x = np.expand_dims(im_data, 0) img_y = np.transpose(img_x, (0, 2, 1, 3)) out = pnet(img_y) out0 = np.transpose(out[0], (0, 2, 1, 3)) out1 = np.transpose(out[1], (0, 2, 1, 3)) boxes, _ = generateBoundingBox(out1[0, :, :, 1].copy(), out0[0, :, :, :].copy(), scale, threshold[0]) #print('scale ',scale, ' | out0 ',out1,' | out1',out1,' | boxes ',boxes) #print('scale ',"%1.6f" %scale,' | boxes ',len(boxes),end='') # inter-scale nms pick = nms(boxes.copy(), 0.5, 'Union') #print(pick) if boxes.size > 0 and pick.size > 0: boxes = boxes[pick, :] total_boxes = np.append(total_boxes, boxes, axis=0) #print(' | boxes after nms : ',len(boxes), end='') # print() numbox = total_boxes.shape[0] # print('after pnet numbox ',numbox) if numbox > 0: pick = nms(total_boxes.copy(), 0.7, 'Union') total_boxes = total_boxes[pick, :] regw = total_boxes[:, 2] - total_boxes[:, 0] regh = total_boxes[:, 3] - total_boxes[:, 1] qq1 = total_boxes[:, 0] + total_boxes[:, 5] * regw qq2 = total_boxes[:, 1] + total_boxes[:, 6] * regh qq3 = total_boxes[:, 2] + total_boxes[:, 7] * regw qq4 = total_boxes[:, 3] + total_boxes[:, 8] * regh total_boxes = np.transpose( np.vstack([qq1, qq2, qq3, qq4, total_boxes[:, 4]])) total_boxes = rerec(total_boxes.copy()) total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4]).astype(np.int32) dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad( total_boxes.copy(), w, h) if numbox > 0: # second stage tempimg = np.zeros((24, 24, 3, numbox)) for k in range(0, numbox): tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3)) tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = img[y[k] - 1:ey[k], x[k] - 1:ex[k], :] if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[ 0] == 0 and tmp.shape[1] == 0: tempimg[:, :, :, k] = imresample(tmp, (24, 24)) else: return np.empty() tempimg = (tempimg - 127.5) * 0.0078125 tempimg1 = np.transpose(tempimg, (3, 1, 0, 2)) out = rnet(tempimg1) out0 = np.transpose(out[0]) out1 = np.transpose(out[1]) score = out1[1, :] ipass = np.where(score > threshold[1]) total_boxes = np.hstack([ total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1) ]) mv = out0[:, ipass[0]] if total_boxes.shape[0] > 0: pick = nms(total_boxes, 0.7, 'Union') total_boxes = total_boxes[pick, :] total_boxes = bbreg(total_boxes.copy(), np.transpose(mv[:, pick])) total_boxes = rerec(total_boxes.copy()) numbox = total_boxes.shape[0] # print('after rnet numbox ',numbox) if numbox > 0: # third stage total_boxes = np.fix(total_boxes).astype(np.int32) dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad( total_boxes.copy(), w, h) tempimg = np.zeros((48, 48, 3, numbox)) for k in range(0, numbox): tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3)) tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = img[y[k] - 1:ey[k], x[k] - 1:ex[k], :] if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[ 0] == 0 and tmp.shape[1] == 0: tempimg[:, :, :, k] = imresample(tmp, (48, 48)) else: return np.empty() tempimg = (tempimg - 127.5) * 0.0078125 tempimg1 = np.transpose(tempimg, (3, 1, 0, 2)) out = onet(tempimg1) out0 = np.transpose(out[0]) out1 = np.transpose(out[1]) out2 = np.transpose(out[2]) score = out2[1, :] points = out1 ipass = np.where(score > threshold[2]) points = points[:, ipass[0]] total_boxes = np.hstack([ total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1) ]) mv = out0[:, ipass[0]] w = total_boxes[:, 2] - total_boxes[:, 0] + 1 h = total_boxes[:, 3] - total_boxes[:, 1] + 1 points[0:5, :] = np.tile(w, (5, 1)) * points[0:5, :] + np.tile( total_boxes[:, 0], (5, 1)) - 1 points[5:10, :] = np.tile(h, (5, 1)) * points[5:10, :] + np.tile( total_boxes[:, 1], (5, 1)) - 1 if total_boxes.shape[0] > 0: total_boxes = bbreg(total_boxes.copy(), np.transpose(mv)) pick = nms(total_boxes.copy(), 0.7, 'Min') total_boxes = total_boxes[pick, :] points = points[:, pick] numbox = total_boxes.shape[0] # print('after pnet numbox ',numbox) return total_boxes, points
def _downsample( image: np.ndarray, ds_matrix: np.ndarray, ) -> np.ndarray: """Downsamples an image based on a given downsampling matrix. **This function should seldom ever be used. Instead, one should often use :func:`resample`. The :func:`resample` function can do everything that this function can do and more. This function is faster than the :func:`resample` function for downsampling without interpolation.** Downsamples an image by the given downsampling matrix argument, ds_matrix. For an input domain vector **mn** = [**m**, **n**] and an input image **f** (**mn**), the output image **g** (**.**) is defined by **g** (**mn**) = **f** (**M** @ **mn**) where **M** is the downsampling matrix ds_matrix. :type image: ``numpy.ndarray`` :param image: An image to be downsampled. :type ds_matrix: ``numpy.ndarray`` (dtype must be integer) :param ds_matrix: The downsampling matrix **M** to be used. :rtype: ``numpy.ndarray`` :return: The downsampled image Examples: >>> image = np.array([[ 0, 1, 2, 3, 4, 5, 6, 7], ... [10, 11, 12, 13, 14, 15, 16, 17], ... [20, 21, 22, 23, 24, 25, 26, 27], ... [30, 31, 32, 33, 34, 35, 36, 37], ... [40, 41, 42, 43, 44, 45, 46, 47], ... [50, 51, 52, 53, 54, 55, 56, 57], ... [60, 61, 62, 63, 64, 65, 66, 67], ... [70, 71, 72, 73, 74, 75, 76, 77]]) >>> M = np.array([[2, 0], [0, 2]]) >>> downsampled_image = downsample(image, M) >>> downsampled_image array([[ 0., 2., 4., 6.], [20., 22., 24., 26.], [40., 42., 44., 46.], [60., 62., 64., 66.]]) >>> M2 = np.array([[1, 2], [2, 1]]) >>> downsample(downsampled_image, M2) array([[ 0., 0., 0., 60.], [ 0., 0., 42., 0.], [ 0., 24., 66., 0.], [ 6., 0., 0., 0.]]) """ assert (2, 2) == ds_matrix.shape, "Argument 'ds_matrix' must be an " \ "ndarray with shape (2, 2)" assert np.issubdtype(ds_matrix.dtype, np.integer), "Argument " \ "'ds_matrix' must be an ndarray with an integer dtype" ds_matrix_det = ds_matrix[0, 0] * ds_matrix[1, 1] \ - ds_matrix[0, 1] * ds_matrix[1, 0] assert 0 != ds_matrix_det, "Argument 'ds_matrix' must be nonsingular" height = image.shape[0] width = image.shape[1] ds_matrix_inv_scaled = np.array([[ds_matrix[1, 1], -ds_matrix[0, 1]], [-ds_matrix[1, 0], ds_matrix[0, 0]]]) kl_extrema = np.array(np.meshgrid([0, height - 1], [0, width - 1])).reshape(2, -1) mn_extrema = (1 / ds_matrix_det) * (ds_matrix_inv_scaled @ kl_extrema) m_min = np.min(mn_extrema[0]) m_max = np.max(mn_extrema[0]) n_min = np.min(mn_extrema[1]) n_max = np.max(mn_extrema[1]) kl = np.array(np.meshgrid(np.arange(0, height), np.arange(0, width))).reshape(2, -1) mn = ds_matrix_inv_scaled @ kl mn_lattice_indices = np.all(0 == mn % ds_matrix_det, axis=0) mn = ((1 / ds_matrix_det) * mn[:, mn_lattice_indices]).astype(int) kl = kl[:, mn_lattice_indices] ds_image = np.zeros((np.floor(m_max - m_min + 1).astype(int), np.floor(n_max - n_min + 1).astype(int))) mn_offset = np.fix(np.array([[m_min], [n_min]])).astype(int) ds_image[tuple(mn - mn_offset)] = image[tuple(kl)] return ds_image
def detect_face(img, minsize, pnet, rnet, onet, threshold, factor): # return : boundings and landmarks # im: input image # minsize: minimum of faces' size # pnet, rnet, onet: caffemodel # threshold: threshold=[th1 th2 th3], th1-3 are three steps's threshold # fastresize: resize img from last scale (using in high-resolution images) if fastresize==true factor_count = 0 total_boxes = np.empty((0, 9)) points = [] h = img.shape[0] w = img.shape[1] minl = np.amin([h, w]) m = 12.0 / minsize minl = minl * m # creat scale pyramid scales = [] while minl >= 12: scales += [m * np.power(factor, factor_count)] minl = minl * factor factor_count += 1 # first stage for j in range(len(scales)): scale = scales[j] hs = int(np.ceil(h * scale)) ws = int(np.ceil(w * scale)) im_data = imresample(img, (hs, ws)) im_data = (im_data - 127.5) * 0.0078125 # normalize #print("im_data.shape = ", im_data.shape) img_x = np.expand_dims(im_data, 0) #print("img_x.shape = ", img_x.shape) img_y = np.transpose(img_x, (0, 2, 1, 3)) #print("img_y.shape = ", img_y.shape) out = pnet(img_y) #print("out[0].shape = ", out[0].shape) #print("out[1].shape = ", out[1].shape) out0 = np.transpose(out[0], (0, 2, 1, 3)) #out[0].shape = (1, 40, 24, 4) out1 = np.transpose(out[1], (0, 2, 1, 3)) #out[0].shape = (1, 40, 24, 2) # def generateBoundingBox(imap, reg, scale, t): boxes, _ = generateBoundingBox(out1[0,:,:,1].copy(), out0[0,:,:,:].copy(), scale, \ threshold[0]) # inter-scale nms pick = nms(boxes.copy(), 0.5, 'Union') if boxes.size > 0 and pick.size > 0: boxes = boxes[pick, :] total_boxes = np.append(total_boxes, boxes, axis=0) numbox = total_boxes.shape[0] if numbox > 0: pick = nms(total_boxes.copy(), 0.7, 'Union') total_boxes = total_boxes[pick, :] regw = total_boxes[:, 2] - total_boxes[:, 0] regh = total_boxes[:, 3] - total_boxes[:, 1] qq1 = total_boxes[:, 0] + total_boxes[:, 5] * regw qq2 = total_boxes[:, 1] + total_boxes[:, 6] * regh qq3 = total_boxes[:, 2] + total_boxes[:, 7] * regw qq4 = total_boxes[:, 3] + total_boxes[:, 8] * regh total_boxes = np.transpose( np.vstack([qq1, qq2, qq3, qq4, total_boxes[:, 4]])) total_boxes = rerec(total_boxes.copy()) total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4]).astype(np.int32) # 现在total_boxes经过校正且square dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad( total_boxes.copy(), w, h) # (y, x)(ey, ex) 是原图box的左上和右下坐标 # (dy,dx) (edy, edx) : ((1, 1) 到(边长, 边长) # tmpw == tmph 是原图box的边长 numbox = total_boxes.shape[0] if numbox > 0: # second stage tempimg = np.zeros((24, 24, 3, numbox)) for k in range(0, numbox): tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3)) # copy img to temp pixel-wise tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = img[y[k] - 1:ey[k], x[k] - 1:ex[k], :] if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[ 0] == 0 and tmp.shape[1] == 0: #print("tmp.shape = ", tmp.shape) tempimg[:, :, :, k] = imresample(tmp, (24, 24)) #print("tempimg.shape : ",tempimg.shape) else: return np.empty() tempimg = (tempimg - 127.5) * 0.0078125 tempimg1 = np.transpose(tempimg, (3, 1, 0, 2)) out = rnet(tempimg1) out0 = np.transpose(out[0]) out1 = np.transpose(out[1]) score = out1[1, :] ipass = np.where(score > threshold[1]) total_boxes = np.hstack([ total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1) ]) mv = out0[:, ipass[0]] if total_boxes.shape[0] > 0: pick = nms(total_boxes, 0.7, 'Union') total_boxes = total_boxes[pick, :] total_boxes = bbreg(total_boxes.copy(), np.transpose(mv[:, pick])) total_boxes = rerec(total_boxes.copy()) numbox = total_boxes.shape[0] if numbox > 0: # third stage total_boxes = np.fix(total_boxes).astype(np.int32) dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad( total_boxes.copy(), w, h) tempimg = np.zeros((48, 48, 3, numbox)) for k in range(0, numbox): tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3)) tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = img[y[k] - 1:ey[k], x[k] - 1:ex[k], :] if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[ 0] == 0 and tmp.shape[1] == 0: tempimg[:, :, :, k] = imresample(tmp, (48, 48)) else: return np.empty() tempimg = (tempimg - 127.5) * 0.0078125 tempimg1 = np.transpose(tempimg, (3, 1, 0, 2)) out = onet(tempimg1) out0 = np.transpose(out[0]) out1 = np.transpose(out[1]) out2 = np.transpose(out[2]) # print("out.shape : ") # print(out0.shape)#(4, 4) # print(out1.shape)#(10, 4) # print(out2.shape)#(2, 4) score = out2[1, :] #(1, 4) # print(score) points = out1 ipass = np.where(score > threshold[2]) points = points[:, ipass[0]] # select convinced points(landmarks) #print("total_boxes.shape : ",total_boxes.shape) total_boxes = np.hstack([ total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1) ]) #print("total_boxes.shape : ",total_boxes.shape) # now total_box.shape is (4, 5) 绝对位置 mv = out0[:, ipass[0]] # 相对位置 w = total_boxes[:, 2] - total_boxes[:, 0] + 1 h = total_boxes[:, 3] - total_boxes[:, 1] + 1 # print("w.shape = ", w.shape) # print(total_boxes[:,2]) # print(w) # print("np.tile(w,(5, 1)) = ", np.tile(w,(5, 1)).shape) points[0:5, :] = np.tile(w, (5, 1)) * points[0:5, :] + np.tile( total_boxes[:, 0], (5, 1)) - 1 points[5:10, :] = np.tile(h, (5, 1)) * points[5:10, :] + np.tile( total_boxes[:, 1], (5, 1)) - 1 if total_boxes.shape[0] > 0: total_boxes = bbreg(total_boxes.copy(), np.transpose(mv)) pick = nms(total_boxes.copy(), 0.7, 'Min') total_boxes = total_boxes[pick, :] points = points[:, pick] return total_boxes, points
def generateBoundingBox(imap, reg, scale, t): """Use heatmap to generate bounding boxes""" stride = 2 cellsize = 12 # 获取x1,y1,x2,y2的坐标 print("进入generate") #print(imap.shape) imap = np.transpose(imap) print(imap.shape) #print(type(imap)) dx1 = np.transpose(reg[:, :, 0]) dy1 = np.transpose(reg[:, :, 1]) dx2 = np.transpose(reg[:, :, 2]) dy2 = np.transpose(reg[:, :, 3]) print("进入reg") #print(reg[:, :, 0].shape) print(dx1) print(dy1) print(dx2) print(dy2) # 获取可信度大于阈值的人脸框的坐标 print(imap) y, x = np.where(imap >= t) print(y) print(x) #print(type(y)) #print(y.shape) #print(y.shape[0]) # 只有一个符合的情况 if y.shape[0] == 1: #print("进入if判断") dx1 = np.flipud(dx1) #翻转矩阵 dy1 = np.flipud(dy1) dx2 = np.flipud(dx2) dy2 = np.flipud(dy2) # 筛选出符合条件的框 print("_____________") # a= imap[(y,x)] # print(a) score = imap[(y, x)] print(score) print("_____________") #print(dx1[(y, x)].shape) print([dx1[(y, x)], dy1[(y, x)], dx2[(y, x)], dy2[(y, x)]]) print((np.vstack([dx1[(y, x)], dy1[(y, x)], dx2[(y, x)], dy2[(y, x)]])).shape) print("_____________") reg = np.transpose( np.vstack([dx1[(y, x)], dy1[(y, x)], dx2[(y, x)], dy2[(y, x)]])) print(reg.shape) if reg.size == 0: #print("进入if") reg = np.empty((0, 3)) # 还原尺度 print("_____________") #print(np.vstack([y,x])) bb = np.transpose(np.vstack([y, x])) print(bb) print('进入计算部分') #print(stride * bb) print(scale) # #print((stride * bb + 1)) #print((stride * bb + 1) / scale) q1 = np.fix((stride * bb + 1) / scale) q2 = np.fix((stride * bb + cellsize - 1 + 1) / scale) print(q1) print(q2) # shape(None, 9) #print(np.expand_dims(score, 0)) #print(np.expand_dims(score, 1)) boundingbox = np.hstack([q1, q2, np.expand_dims(score, 1), reg]) #print(boundingbox) return boundingbox, reg
Output basename = %s\n\ Start time = %s\n\ End time = %s\n' % (options.data_file, options.output_file, options.starttime, options.endtime)) # read waveform between time limits wf = Waveform() wf.read_from_file(options.data_file, starttime=tdeb, endtime=tfin) dt = wf.delta x = wf.values print(wf.stream) # set up parameters for kurtogram analysis N = len(x) N2 = np.log2(N) - 7 nlevel = int(np.fix(N2)) c, flower, fupper = Fast_Kurtogram(x, nlevel, options.verbose, Fs=1 / dt, opt2=1) logging.info("Frequency band for best kurtogram : %.2f Hz - %.2f Hz" % (flower, fupper)) filt_name = "filt_%s" % options.output_file kurt_name = "kurt_%s" % options.output_file wf.bp_filter(flower, fupper, rmean=True, taper=True) wf.write_to_file_filled(filt_name, format='MSEED') wf.process_kurtosis(100 * dt, recursive=True, post_taper=True) wf.write_to_file_filled(kurt_name, format='MSEED')
def ptclean(vis, imageprefix, ncpu, twidth, doreg, ephemfile, msinfofile, outlierfile, field, spw, selectdata, timerange, uvrange, antenna, scan, observation, intent, mode, resmooth,gridmode, wprojplanes, facets, cfcache, rotpainc, painc, aterm, psterm, mterm, wbawp, conjbeams, epjtable, interpolation, niter, gain, threshold, psfmode, imagermode, ftmachine, mosweight, scaletype, multiscale, negcomponent, smallscalebias, interactive, mask, nchan, start, width, outframe, veltype, imsize, cell, phasecenter, restfreq, stokes, weighting, robust, uvtaper, outertaper, innertaper, modelimage, restoringbeam, pbcor, minpb, usescratch, noise, npixels, npercycle, cyclefactor, cyclespeedup, nterms, reffreq, chaniter, flatnoise, allowchunk): if not (type(ncpu) is int): casalog.post('ncpu should be an integer') ncpu = 8 if doreg: # check if ephemfile and msinfofile exist try: ephem=vla_prep.read_horizons(ephemfile = ephemfile) except ValueError: print("error in reading ephemeris file") if not os.path.isfile(msinfofile): print("msinfofile does not exist!") else: ephem=None # get number of time pixels ms.open(vis) ms.selectinit() timfreq = ms.getdata(['time', 'axis_info'], ifraxis = True) tim = timfreq['time'] dt = tim[1]-tim[0] #need to change to median of all time intervals freq = timfreq['axis_info']['freq_axis']['chan_freq'].flatten() ms.close() if twidth < 1 or twidth > len(tim): casalog.post('twidth not between 1 and # of time pixels in the dataset. Change to 1') twidth = 1 # find out the start and end time index according to the parameter timerange # if not defined (empty string), use start and end from the entire time of the ms if not timerange: btidx = 0 etidx = len(tim) else: try: (tstart, tend)=timerange.split('~') bt_s = qa.convert(qa.quantity(tstart,'s'),'s')['value'] et_s = qa.convert(qa.quantity(tend,'s'),'s')['value'] # only time is given but not date, add the date (at 0 UT) from the first record if bt_s < 86400. or et_s < 86400.: bt_s += np.fix(qa.convert(qa.quantity(tim[0],'s'),'d')['value'])*86400. et_s += np.fix(qa.convert(qa.quantity(tim[0],'s'),'d')['value'])*86400. btidx = np.argmin(np.abs(tim-bt_s)) etidx = np.argmin(np.abs(tim-et_s)) # make the indice back to those bracket by the timerange if tim[btidx] < bt_s: btidx += 1 if tim[etidx] > et_s: etidx -= 1 if etidx <= btidx: print "ending time must be greater than starting time" print "reinitiating to the entire time range" btidx = 0 etidx = len(tim) except ValueError: print "keyword 'timerange' has a wrong format" btstr=qa.time(qa.quantity(tim[btidx],'s'),prec=9)[0] etstr=qa.time(qa.quantity(tim[etidx],'s'),prec=9)[0] iterable = range(btidx, etidx+1, twidth) print 'First time pixel: '+btstr print 'Last time pixel: '+etstr print str(len(iterable))+' images to clean...' # partition clnpart = partial(clean_iter, tim, freq, vis, imageprefix, ncpu, twidth, doreg, ephemfile, ephem, msinfofile, outlierfile, field, spw, selectdata, uvrange, antenna, scan, observation, intent, mode, resmooth,gridmode, wprojplanes, facets, cfcache, rotpainc, painc, aterm, psterm, mterm, wbawp, conjbeams, epjtable, interpolation, niter, gain, threshold, psfmode, imagermode, ftmachine, mosweight, scaletype, multiscale, negcomponent, smallscalebias, interactive, mask, nchan, start, width, outframe, veltype, imsize, cell, phasecenter, restfreq, stokes, weighting, robust, uvtaper, outertaper, innertaper, modelimage, restoringbeam, pbcor, minpb, usescratch, noise, npixels, npercycle, cyclefactor, cyclespeedup, nterms, reffreq, chaniter, flatnoise, allowchunk) timelapse = 0 casalog.post('Perform clean in parallel ...') t0 = time() pool = mp.Pool(ncpu) dummy = pool.map_async(clnpart, iterable) pool.close() pool.join() t1 = time() timelapse = t1 - t0 print 'It took %f secs to complete' % timelapse
def bulk_detect_face(images, detection_window_size_ratio, pnet, rnet, onet, threshold, factor): """Detects faces in a list of images images: list containing input images detection_window_size_ratio: ratio of minimum face size to smallest image dimension pnet, rnet, onet: caffemodel threshold: threshold=[th1 th2 th3], th1-3 are three steps's threshold [0-1] factor: the factor used to create a scaling pyramid of face sizes to detect in the image. """ all_scales = [None] * len(images) images_with_boxes = [None] * len(images) for i in range(len(images)): images_with_boxes[i] = {'total_boxes': np.empty((0, 9))} # create scale pyramid for index, img in enumerate(images): all_scales[index] = [] h = img.shape[0] w = img.shape[1] minsize = int(detection_window_size_ratio * np.minimum(w, h)) factor_count = 0 minl = np.amin([h, w]) if minsize <= 12: minsize = 12 m = 12.0 / minsize minl = minl * m while minl >= 12: all_scales[index].append(m * np.power(factor, factor_count)) minl = minl * factor factor_count += 1 # # # # # # # # # # # # # # first stage - fast proposal network (pnet) to obtain face candidates # # # # # # # # # # # # # images_obj_per_resolution = {} # TODO: use some type of rounding to number module 8 to increase probability that pyramid images will have the same resolution across input images for index, scales in enumerate(all_scales): h = images[index].shape[0] w = images[index].shape[1] for scale in scales: hs = int(np.ceil(h * scale)) ws = int(np.ceil(w * scale)) if (ws, hs) not in images_obj_per_resolution: images_obj_per_resolution[(ws, hs)] = [] im_data = imresample(images[index], (hs, ws)) im_data = (im_data - 127.5) * 0.0078125 img_y = np.transpose( im_data, (1, 0, 2)) # caffe uses different dimensions ordering images_obj_per_resolution[(ws, hs)].append({ 'scale': scale, 'image': img_y, 'index': index }) for resolution in images_obj_per_resolution: images_per_resolution = [ i['image'] for i in images_obj_per_resolution[resolution] ] outs = pnet(images_per_resolution) for index in range(len(outs[0])): scale = images_obj_per_resolution[resolution][index]['scale'] image_index = images_obj_per_resolution[resolution][index]['index'] out0 = np.transpose(outs[0][index], (1, 0, 2)) out1 = np.transpose(outs[1][index], (1, 0, 2)) boxes, _ = generateBoundingBox(out1[:, :, 1].copy(), out0[:, :, :].copy(), scale, threshold[0]) # inter-scale nms pick = nms(boxes.copy(), 0.5, 'Union') if boxes.size > 0 and pick.size > 0: boxes = boxes[pick, :] images_with_boxes[image_index]['total_boxes'] = np.append( images_with_boxes[image_index]['total_boxes'], boxes, axis=0) for index, image_obj in enumerate(images_with_boxes): numbox = image_obj['total_boxes'].shape[0] if numbox > 0: h = images[index].shape[0] w = images[index].shape[1] pick = nms(image_obj['total_boxes'].copy(), 0.7, 'Union') image_obj['total_boxes'] = image_obj['total_boxes'][pick, :] regw = image_obj['total_boxes'][:, 2] - image_obj['total_boxes'][:, 0] regh = image_obj['total_boxes'][:, 3] - image_obj['total_boxes'][:, 1] qq1 = image_obj[ 'total_boxes'][:, 0] + image_obj['total_boxes'][:, 5] * regw qq2 = image_obj[ 'total_boxes'][:, 1] + image_obj['total_boxes'][:, 6] * regh qq3 = image_obj[ 'total_boxes'][:, 2] + image_obj['total_boxes'][:, 7] * regw qq4 = image_obj[ 'total_boxes'][:, 3] + image_obj['total_boxes'][:, 8] * regh image_obj['total_boxes'] = np.transpose( np.vstack([qq1, qq2, qq3, qq4, image_obj['total_boxes'][:, 4]])) image_obj['total_boxes'] = rerec(image_obj['total_boxes'].copy()) image_obj['total_boxes'][:, 0:4] = np.fix( image_obj['total_boxes'][:, 0:4]).astype(np.int32) dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad( image_obj['total_boxes'].copy(), w, h) numbox = image_obj['total_boxes'].shape[0] tempimg = np.zeros((24, 24, 3, numbox)) if numbox > 0: for k in range(0, numbox): tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3)) tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = images[index][y[k] - 1:ey[k], x[k] - 1:ex[k], :] if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[ 0] == 0 and tmp.shape[1] == 0: tempimg[:, :, :, k] = imresample(tmp, (24, 24)) else: return np.empty() tempimg = (tempimg - 127.5) * 0.0078125 image_obj['rnet_input'] = np.transpose(tempimg, (3, 1, 0, 2)) # # # # # # # # # # # # # # second stage - refinement of face candidates with rnet # # # # # # # # # # # # # bulk_rnet_input = np.empty((0, 24, 24, 3)) for index, image_obj in enumerate(images_with_boxes): if 'rnet_input' in image_obj: bulk_rnet_input = np.append(bulk_rnet_input, image_obj['rnet_input'], axis=0) out = rnet(bulk_rnet_input) out0 = np.transpose(out[0]) out1 = np.transpose(out[1]) score = out1[1, :] i = 0 for index, image_obj in enumerate(images_with_boxes): if 'rnet_input' not in image_obj: continue rnet_input_count = image_obj['rnet_input'].shape[0] score_per_image = score[i:i + rnet_input_count] out0_per_image = out0[:, i:i + rnet_input_count] ipass = np.where(score_per_image > threshold[1]) image_obj['total_boxes'] = np.hstack([ image_obj['total_boxes'][ipass[0], 0:4].copy(), np.expand_dims(score_per_image[ipass].copy(), 1) ]) mv = out0_per_image[:, ipass[0]] if image_obj['total_boxes'].shape[0] > 0: h = images[index].shape[0] w = images[index].shape[1] pick = nms(image_obj['total_boxes'], 0.7, 'Union') image_obj['total_boxes'] = image_obj['total_boxes'][pick, :] image_obj['total_boxes'] = bbreg(image_obj['total_boxes'].copy(), np.transpose(mv[:, pick])) image_obj['total_boxes'] = rerec(image_obj['total_boxes'].copy()) numbox = image_obj['total_boxes'].shape[0] if numbox > 0: tempimg = np.zeros((48, 48, 3, numbox)) image_obj['total_boxes'] = np.fix( image_obj['total_boxes']).astype(np.int32) dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad( image_obj['total_boxes'].copy(), w, h) for k in range(0, numbox): tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3)) tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = images[index][y[k] - 1:ey[k], x[k] - 1:ex[k], :] if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[ 0] == 0 and tmp.shape[1] == 0: tempimg[:, :, :, k] = imresample(tmp, (48, 48)) else: return np.empty() tempimg = (tempimg - 127.5) * 0.0078125 image_obj['onet_input'] = np.transpose(tempimg, (3, 1, 0, 2)) i += rnet_input_count # # # # # # # # # # # # # # third stage - further refinement and facial landmarks positions with onet # # # # # # # # # # # # # bulk_onet_input = np.empty((0, 48, 48, 3)) for index, image_obj in enumerate(images_with_boxes): if 'onet_input' in image_obj: bulk_onet_input = np.append(bulk_onet_input, image_obj['onet_input'], axis=0) out = onet(bulk_onet_input) out0 = np.transpose(out[0]) out1 = np.transpose(out[1]) out2 = np.transpose(out[2]) score = out2[1, :] points = out1 i = 0 ret = [] for index, image_obj in enumerate(images_with_boxes): if 'onet_input' not in image_obj: ret.append(None) continue onet_input_count = image_obj['onet_input'].shape[0] out0_per_image = out0[:, i:i + onet_input_count] score_per_image = score[i:i + onet_input_count] points_per_image = points[:, i:i + onet_input_count] ipass = np.where(score_per_image > threshold[2]) points_per_image = points_per_image[:, ipass[0]] image_obj['total_boxes'] = np.hstack([ image_obj['total_boxes'][ipass[0], 0:4].copy(), np.expand_dims(score_per_image[ipass].copy(), 1) ]) mv = out0_per_image[:, ipass[0]] w = image_obj['total_boxes'][:, 2] - image_obj['total_boxes'][:, 0] + 1 h = image_obj['total_boxes'][:, 3] - image_obj['total_boxes'][:, 1] + 1 points_per_image[ 0:5, :] = np.tile(w, (5, 1)) * points_per_image[0:5, :] + np.tile( image_obj['total_boxes'][:, 0], (5, 1)) - 1 points_per_image[ 5:10, :] = np.tile(h, (5, 1)) * points_per_image[5:10, :] + np.tile( image_obj['total_boxes'][:, 1], (5, 1)) - 1 if image_obj['total_boxes'].shape[0] > 0: image_obj['total_boxes'] = bbreg(image_obj['total_boxes'].copy(), np.transpose(mv)) pick = nms(image_obj['total_boxes'].copy(), 0.7, 'Min') image_obj['total_boxes'] = image_obj['total_boxes'][pick, :] points_per_image = points_per_image[:, pick] ret.append((image_obj['total_boxes'], points_per_image)) else: ret.append(None) i += onet_input_count return ret
def continous_wavelet(X, dt, pad=False, wavelet=morlet, **kwargs): """ Computes the wavelet transform of the vector X, with sampling rate dt. inputs: X - the time series, numpy array dt - sampling time of dt pad - if True, pad time series with 0 to get len(X) up to the next higher power of 2. It speeds up the FFT. wavelet - which mother wavelet should be used. (morlet, paul, DOG) --- kwargs --- dj - the spacing between discrete scales. s0 - the smallest scale of the wavelet j1 - the number of scales minus one. Scales range from s0 up to s0 * 2^(j1+dj) to give a total of j1+1 scales. k0 - parameter of Mother wavelet: Morlet - wavenumber, Paul - order, DOG - derivative outputs: wave - wavelet transform of the X. It is a complex numpy array of dim (n, j1+1) period - the vector of Fourier periods in time units scale - the vector of scale indices, given by s0 * 2^(j*dj) coi - Cone-of-Influence, vector that contains a maximum period of useful information at particular time """ # map arguments if 'dj' in kwargs: dj = kwargs['dj'] else: dj = 0.25 if 's0' in kwargs: s0 = kwargs['s0'] else: s0 = 2 * dt if 'j1' in kwargs: j1 = np.int(kwargs['j1']) else: j1 = np.fix(np.log(len(X) * dt / s0) / np.log(2)) / dj if 'k0' in kwargs: k0 = kwargs['k0'] else: k0 = 6. n1 = len(X) Y = X - np.mean(X) #Y = X # padding, if needed if pad: base2 = int(np.fix(np.log(n1) / np.log(2) + 0.4999999)) # power of 2 nearest to len(X) Y = np.concatenate((Y, np.zeros((np.power(2, (base2 + 1)) - n1)))) n = len(Y) # wavenumber array k = np.arange(1, np.fix(n / 2) + 1) k *= (2. * np.pi) / (n * dt) k_minus = -k[int(np.fix(n - 1)) / 2 - 1::-1] k = np.concatenate((np.array([0.]), k, k_minus)) # compute FFT of the (padded) time series f = fft(Y) # construct scale array and empty period & wave arrays scale = np.array([s0 * np.power(2, x * dj) for x in range(0, j1 + 1)]) period = scale wave = np.zeros((j1 + 1, n), dtype=np.complex) # loop through scales and compute tranform for i in range(j1 + 1): daughter, fourier_factor, coi = wavelet(k, scale[i], k0) wave[i, :] = ifft(f * daughter) period = fourier_factor * scale coi *= dt * np.concatenate((np.array([1e-5]), np.arange( 1, (n1 + 1) / 2), np.arange((n1 / 2 - 1), 0, -1), np.array([1e-5]))) wave = wave[:, :n1] return wave, period, scale, coi
def detect_face(img, minsize, pnet, rnet, onet, threshold, factor): """Detects faces in image, and returns bounding boxes and points for them. img: input image minsize: minimum faces' size pnet, rnet, onet: caffemodel threshold: threshold=[th1, th2, th3], th1-3 are three steps's threshold factor: the factor used to create a scaling pyramid of face sizes to detect in the image. """ factor_count = 0 total_boxes = np.empty((0, 9)) points = np.empty(0) h = img.shape[0] w = img.shape[1] minl = np.amin([h, w]) m = 12.0 / minsize minl = minl * m # create scale pyramid scales = [] while minl >= 12: scales += [m * np.power(factor, factor_count)] minl = minl * factor factor_count += 1 # first stage for scale in scales: hs = int(np.ceil(h * scale)) ws = int(np.ceil(w * scale)) im_data = imresample(img, (hs, ws)) im_data = (im_data - 127.5) * 0.0078125 img_x = np.expand_dims(im_data, 0) img_y = np.transpose(img_x, (0, 2, 1, 3)) out = pnet(img_y) out0 = np.transpose(out[0], (0, 2, 1, 3)) out1 = np.transpose(out[1], (0, 2, 1, 3)) boxes, _ = generateboundingbox(out1[0, :, :, 1].copy(), out0[0, :, :, :].copy(), scale, threshold[0]) # inter-scale nms pick = nms(boxes.copy(), 0.5, 'Union') if boxes.size > 0 and pick.size > 0: boxes = boxes[pick, :] total_boxes = np.append(total_boxes, boxes, axis=0) numbox = total_boxes.shape[0] if numbox > 0: pick = nms(total_boxes.copy(), 0.7, 'Union') total_boxes = total_boxes[pick, :] regw = total_boxes[:, 2] - total_boxes[:, 0] regh = total_boxes[:, 3] - total_boxes[:, 1] qq1 = total_boxes[:, 0] + total_boxes[:, 5] * regw qq2 = total_boxes[:, 1] + total_boxes[:, 6] * regh qq3 = total_boxes[:, 2] + total_boxes[:, 7] * regw qq4 = total_boxes[:, 3] + total_boxes[:, 8] * regh total_boxes = np.transpose( np.vstack([qq1, qq2, qq3, qq4, total_boxes[:, 4]])) total_boxes = rerec(total_boxes.copy()) total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4]).astype(np.int32) dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad( total_boxes.copy(), w, h) numbox = total_boxes.shape[0] if numbox > 0: # second stage tempimg = np.zeros((24, 24, 3, numbox)) for k in range(0, numbox): tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3)) tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = img[y[k] - 1:ey[k], x[k] - 1:ex[k], :] if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[ 0] == 0 and tmp.shape[1] == 0: tempimg[:, :, :, k] = imresample(tmp, (24, 24)) else: return np.empty() tempimg = (tempimg - 127.5) * 0.0078125 tempimg1 = np.transpose(tempimg, (3, 1, 0, 2)) out = rnet(tempimg1) out0 = np.transpose(out[0]) out1 = np.transpose(out[1]) score = out1[1, :] ipass = np.where(score > threshold[1]) total_boxes = np.hstack([ total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1) ]) mv = out0[:, ipass[0]] if total_boxes.shape[0] > 0: pick = nms(total_boxes, 0.7, 'Union') total_boxes = total_boxes[pick, :] total_boxes = bbreg(total_boxes.copy(), np.transpose(mv[:, pick])) total_boxes = rerec(total_boxes.copy()) numbox = total_boxes.shape[0] if numbox > 0: # third stage total_boxes = np.fix(total_boxes).astype(np.int32) dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad( total_boxes.copy(), w, h) tempimg = np.zeros((48, 48, 3, numbox)) for k in range(0, numbox): tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3)) tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = img[y[k] - 1:ey[k], x[k] - 1:ex[k], :] if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[ 0] == 0 and tmp.shape[1] == 0: tempimg[:, :, :, k] = imresample(tmp, (48, 48)) else: return np.empty() tempimg = (tempimg - 127.5) * 0.0078125 tempimg1 = np.transpose(tempimg, (3, 1, 0, 2)) out = onet(tempimg1) out0 = np.transpose(out[0]) out1 = np.transpose(out[1]) out2 = np.transpose(out[2]) score = out2[1, :] points = out1 ipass = np.where(score > threshold[2]) points = points[:, ipass[0]] total_boxes = np.hstack([ total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1) ]) mv = out0[:, ipass[0]] w = total_boxes[:, 2] - total_boxes[:, 0] + 1 h = total_boxes[:, 3] - total_boxes[:, 1] + 1 points[0:5, :] = np.tile(w, (5, 1)) * points[0:5, :] + np.tile( total_boxes[:, 0], (5, 1)) - 1 points[5:10, :] = np.tile(h, (5, 1)) * points[5:10, :] + np.tile( total_boxes[:, 1], (5, 1)) - 1 if total_boxes.shape[0] > 0: total_boxes = bbreg(total_boxes.copy(), np.transpose(mv)) pick = nms(total_boxes.copy(), 0.7, 'Min') total_boxes = total_boxes[pick, :] points = points[:, pick] return total_boxes, points
def nmea2deg(nmea): deg = (np.fix(nmea / 100) + np.sign(nmea) * np.remainder(np.abs(nmea), 100) / 60) return deg