def getRadonVariance(self, transpose=0, log_step=None): """ Get the correct Radon Variance map for a specific step and transposition. """ """ If the size of _input_var has changed we will recalculate the maps using the getters defined in __init__. """ if log_step is None: # default variance map is the fully transformed if transpose: log_step = math.ceil( math.log(self.im_size[1], 2)) # transpose==horizontal axis is active else: log_step = math.ceil( math.log(self.im_size[0], 2)) # no transpose==vertical axis is active if not empty(self._var_size) and ( not compare_size(self.im_size, self._var_size) or self.useExpand() != self._var_was_expanded): self._radon_var_uni = [ ] # clear this to be lazy loaded with the right size self._radon_var_map = [ ] # clear this to be lazy loaded with the right size log_step = int(log_step) if empty(self._input_var) or scalar(self._input_var): return self._input_var * self.radon_var_uni[transpose][log_step - 1] else: return self.radon_var_map[transpose][log_step - 1]
def subtractStreak(self, M_in, width=None): if empty(width): width= self.subtract_psf_widths # these are really rough estimates. Can improve this by looking at the error ellipse and subtracting y and dy values inside that range only shift_array = np.arange(-self.psf_sigma*width, self.psf_sigma*width+1) M_sub = np.array(M_in) for shift in shift_array: if self.transposed: x1 = self.x1 x2 = self.x2 y1 = self.y1 + shift y2 = self.y2 + shift else: x1 = self.x1 + shift x2 = self.x2 + shift y1 = self.y1 y2 = self.y2 (xlist, ylist, n) = listPixels(x1,x2,y1,y2, M_in.shape) if not empty(xlist): M_sub[xlist[0], ylist[0]] = 0 return M_sub
def link_facebook_profile(self, facebook_id, access_token): Logger.Info('%s - UserController.link_facebook_profile - started' % __name__) Logger.Debug('%s - UserController.link_facebook_profile - started with facebook_id:%s and access_token:%s' % (__name__, facebook_id, access_token)) errors = [] if not self.user: errors.append(constants.USER_DOES_NOT_EXIST) if empty(facebook_id): errors.append(constants.FACEBOOK_ID_MISSING) if empty(access_token): errors.append(constants.FACEBOOK_ACCESS_TOKEN_MISSING) if len(errors) > 0: return False, errors # save Facebook metadata to user profile profile = self.user.profile if 'facebook' not in profile.linked_accounts: profile.linked_accounts['facebook'] = {} profile.linked_accounts['facebook'] = { 'facebook_id': facebook_id, 'access_token': access_token } profile.save() Logger.Info('%s - UserController.link_facebook_profile - finished' % __name__) return True, []
def __init__(self, rho: float, vae_layers, vgg_weights: str = 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth'): """Build the main model containing both the feature extractor and the variational autoencoder. Args: rho (float): rho value. vgg_weights (str, optional): vgg weights. Defaults to 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth'. """ super(Vicl, self).__init__() self.rho = rho self.extractor = Vgg19() self.vae = Vae(layers=vae_layers) self.reg_params = {} self.class_idents = {} # Load pretained weights for the VGG halo = Halo(text='Downloading VGG19 saved state', spinner='dots') vgg19_state_dict = load_state_dict_from_url( vgg_weights, progress=False) missing, unexpected = self.extractor.load_state_dict( vgg19_state_dict, strict=False ) if not empty(missing): halo.warn(f'There are missing keys in the VGG model ({missing})') if not empty(unexpected): halo.warn( f'There are unexpected keys in the VGG model ({unexpected})')
def finalizeFRT( self, M_in, transpose, radon_image ): # this is called at the end of "frt()" if it is given a Finder object self.subtracted_image = M_in # even if we don't subtract anything, this must be filled if not empty(self.last_streak) and ( self.last_streak.radon_dy < self.min_length): # if we found a very short streak self.last_streak = [] self.last_snr = 0 if not empty(self.last_streak): self.last_streak.input_image = M_in self.last_streak.radon_image = radon_image self.streaks.append(self.last_streak) self.subtracted_image = self.streaks[-1].subtractStreak( M_in, self.subtract_psf_widths) # first, subtract the found streak if self.use_recursive and len(self.streaks) < self.recursion_depth: if self.debug_bit > 9: plt.imshow(self.subtracted_image) plt.title("num streaks found %d | transpose: %d" % (len(self.streaks), transpose)) time.sleep(1) pyradon.frt.frt(self.subtracted_image, transpose=transpose, expand=self.useExpand(), finder=self)
def radon_var_uni(self): # lazy load this list if empty(self.im_size): raise Exception( "You are asking for a uniform var-map without giving an image size!" ) if empty(self._radon_var_uni) and (empty(self._input_var) or scalar(self._input_var)): self._var_size = self.im_size self._var_was_expanded = self.useExpand() self._radon_var_uni.append( pyradon.frt.frt(np.ones(self.im_size), expand=self.useExpand(), partial=True)) if self.im_size[0] == self.im_size[1]: self._radon_var_uni.append( self._radon_var_uni[0] ) # image is square, just make a copy of the var-map else: print "making transposed uni var map" self._radon_var_uni.append( pyradon.frt.frt(np.ones(self.im_size), expand=self.useExpand(), partial=True, transpose=True)) return self._radon_var_uni
def precompute_minimal(data, settings): param = empty() cache = {} if settings.optype == 'class': param.alpha = settings.alpha param.alpha_per_class = float(param.alpha) / data['n_class'] cache['y_train_counts'] = hist_count(data['y_train'], range(data['n_class'])) cache['range_n_class'] = range(data['n_class']) param.base_measure = (np.ones(data['n_class']) + 0.) / data['n_class'] param.alpha_vec = param.base_measure * param.alpha else: cache['sum_y'] = float(np.sum(data['y_train'])) cache['sum_y2'] = float(np.sum(data['y_train'] ** 2)) cache['n_points'] = len(data['y_train']) warn('initializing prior mean and precision to their true values') # FIXME: many of the following are relevant only for mondrian forests param.prior_mean = np.mean(data['y_train']) param.prior_variance = np.var(data['y_train']) param.prior_precision = 1.0 / param.prior_variance if not settings.smooth_hierarchically: param.noise_variance = 0.01 # FIXME: hacky else: K = min(1000, data['n_train']) # FIXME: measurement noise set to fraction of unconditional variance param.noise_variance = param.prior_variance / (1. + K) # assume noise variance = prior_variance / (2K) # NOTE: max_split_cost scales inversely with the number of dimensions param.variance_coef = 2.0 * param.prior_variance param.sigmoid_coef = data['n_dim'] / (2.0 * np.log2(data['n_train'])) param.noise_precision = 1.0 / param.noise_variance return (param, cache)
def RegisterUser(cls, request, username, password1, password2, registration_code=None): Logger.Info('%s - UserController.RegisterUser - started' % __name__) Logger.Debug('%s - UserController.RegisterUser - started with request:%s and username:%s and password1:%s and password2:%s' % (__name__, request, username, password1, password2)) errors = [] if empty(username) or not bool(email_re.search(username)): errors.append('You have not entered a valid email address') try: User.objects.get(username=username) errors.append('Sorry, that email address is already being used') except User.DoesNotExist: pass if errors: Logger.Info('%s - UserController.RegisterUser - finished' % __name__) return False, errors passed, rules_errors = _check_password_rules(password1) if not passed: errors += rules_errors if password1 != password2: errors.append('The passwords you entered don\'t match') if errors: Logger.Info('%s - UserController.RegisterUser - finished' % __name__) return False, errors User.objects.create_user(username, username, password1) UserSubscriptions.InitForUsername(username) UserController.LoginUser(request, username, password1) user = UserController.GetUserByUserName(username) if registration_code: profile = user.profile profile.registration_code = registration_code profile.save() Logger.Info('%s - UserController.RegisterUser - finished' % __name__) return True, []
def noise_var(self): if empty(self._input_var): return self._default_var_value elif scalar(self._input_var): return self._input_var else: return np.median(self._input_var)
def precompute_minimal(data, settings): param = empty() cache = {} if settings.optype == 'class': param.alpha = settings.alpha param.alpha_per_class = float(param.alpha) / data['n_class'] cache['y_train_counts'] = hist_count(data['y_train'], range(data['n_class'])) cache['range_n_class'] = range(data['n_class']) param.base_measure = (np.ones(data['n_class']) + 0.) / data['n_class'] param.alpha_vec = param.base_measure * param.alpha else: cache['sum_y'] = float(np.sum(data['y_train'])) cache['sum_y2'] = float(np.sum(data['y_train']**2)) cache['n_points'] = len(data['y_train']) warn('initializing prior mean and precision to their true values') # FIXME: many of the following are relevant only for mondrian forests param.prior_mean = np.mean(data['y_train']) param.prior_variance = np.var(data['y_train']) param.prior_precision = 1.0 / param.prior_variance if not settings.smooth_hierarchically: param.noise_variance = 0.01 # FIXME: hacky else: K = min( 1000, data['n_train'] ) # FIXME: measurement noise set to fraction of unconditional variance param.noise_variance = param.prior_variance / ( 1. + K) # assume noise variance = prior_variance / (2K) # NOTE: max_split_cost scales inversely with the number of dimensions param.variance_coef = 2.0 * param.prior_variance param.sigmoid_coef = data['n_dim'] / (2.0 * np.log2(data['n_train'])) param.noise_precision = 1.0 / param.noise_variance return (param, cache)
def run(self): if self.debug_bit>1: print "run()" self.clear() self.makeImage() self.find() if empty(self.finder.streaks): print "No streaks found. Maximal S/N= %f" % max(np.max(self.finder.radon_image), np.max(self.finder.radon_image_trans)) else: s = self.finder.streaks[0] if self.debug_bit: print "SIMULATED : S/N= %4.2f | I= %5.2f | L= %6.1f | th= %4.2f | x0= %4.1f" % (self.calcSNR(), self.intensity, self.L*self.im_size, self.th, self.x0*self.im_size) print "CALCULATED: S/N= %4.2f | I= %5.2f | L= %6.1f | th= %4.2f | x0= %4.1f" % (s.snr, s.I, s.L, s.th, s.x0) if self.debug_bit>1: input_xy = (self.x1, self.x2, self.y1, self.y2) input_xy = tuple((int(round(x*self.im_size)) for x in input_xy)) print "INPUT: x1= % 4d | x2= % 04d | y1= % 4d | y2= % 4d" % input_xy print "FOUND: x1= % 4d | x2= % 04d | y1= % 4d | y2= % 4d" % (s.x1, s.x2, s.y1, s.y2)
def link_twitter_profile(self, screen_name): Logger.Info('%s - UserController.link_twitter_profile - started' % __name__) Logger.Debug('%s - UserController.link_twitter_profile - started with screen_name:%s' % (__name__, screen_name)) errors = [] if not self.user: errors.append(constants.USER_DOES_NOT_EXIST) if empty(screen_name): errors.append(constants.TWITTER_SCREEN_NAME_MISSING) if len(errors) > 0: return False, errors # save Twitter metadata to user profile profile = self.user.profile if 'twitter' not in profile.linked_accounts: profile.linked_accounts['twitter'] = {} profile.linked_accounts['twitter'] = { 'screen_name': screen_name } profile.save() Logger.Info('%s - UserController.link_twitter_profile - finished' % __name__) return True, []
def grib_invdist(gid, target_lats, target_lons, mv): num_cells = target_lons.size indices = np.indices(target_lons.shape) valid_target_coords = (target_lons > -1.0e+10) & (target_lons != mv) xs = np.where(valid_target_coords, indices[0], int_fill_value).ravel() ys = np.where(valid_target_coords, indices[1], int_fill_value).ravel() idxs1 = empty(num_cells, fill_value=int_fill_value, dtype=int) idxs2 = empty(num_cells, fill_value=int_fill_value, dtype=int) idxs3 = empty(num_cells, fill_value=int_fill_value, dtype=int) idxs4 = empty(num_cells, fill_value=int_fill_value, dtype=int) invs1 = empty(num_cells) invs2 = empty(num_cells) invs3 = empty(num_cells) invs4 = empty(num_cells) format_progress = '{}Inverse distance interpolation: {}/{} [outs: {}] ({}%)'.format i = 0 outs = 0 back_char, progress_step = progress_step_and_backchar(num_cells) stdout.write('Start interpolation: {}\n'.format(now_string())) stdout.write(format_progress(back_char, 0, num_cells, outs, 0)) stdout.flush() for lat, lon in itertools.izip(target_lats.flat, target_lons.flat): if i % progress_step == 0: stdout.write(format_progress(back_char, i, num_cells, outs, i * 100. / num_cells)) stdout.flush() if not (lon < -1.0e+10 or lon == mv): try: # TODO CHECK IF asscalar is really needed here n_nearest = gribapi.grib_find_nearest(gid, np.asscalar(lat), np.asscalar(lon), npoints=4) except gribapi.GribInternalError: # tipically "out of grid" error outs += 1 xs[i] = int_fill_value ys[i] = int_fill_value else: invs1[i], invs2[i], invs3[i], invs4[i], idxs1[i], idxs2[i], idxs3[i], idxs4[i] = _compute_coeffs_and_idxs(n_nearest) i += 1 invs1 = invs1[~np.isnan(invs1)] invs2 = invs2[~np.isnan(invs2)] invs3 = invs3[~np.isnan(invs3)] invs4 = invs4[~np.isnan(invs4)] sums = ne.evaluate('invs1 + invs2 + invs3 + invs4') coeffs1 = ne.evaluate('invs1 / sums') coeffs2 = ne.evaluate('invs2 / sums') coeffs3 = ne.evaluate('invs3 / sums') coeffs4 = ne.evaluate('invs4 / sums') stdout.write('{}{:>100}'.format(back_char, ' ')) stdout.write(format_progress(back_char, i, num_cells, outs, 100)) stdout.write('End interpolation: {}\n\n'.format(now_string())) stdout.flush() return xs[xs != int_fill_value], ys[ys != int_fill_value], \ idxs1[idxs1 != int_fill_value], idxs2[idxs2 != int_fill_value], idxs3[idxs3 != int_fill_value], idxs4[idxs4 != int_fill_value], \ coeffs1, coeffs2, coeffs3, coeffs4
def run(self): # Options were parsed in main value = getattr(self.options, self.paramname) if empty(value): self.debug("Expected parameter '%s' is missing from command-line, use default." % self.paramname) value = self.default.value self.info(_("CLI Parameter '%s'='%s'") % (self.paramname, value)) self.value.value = value
def psf_sigma(self): if empty(self._input_psf): return self._default_psf_width elif scalar(self._input_psf): return self._input_psf else: a = fit_gaussian(self._input_psf) return (a.x[1] + a.x[2]) / 2 # average the x and y sigma
def best(self): if empty(self.streaks): return [] else: snr = [s.snr for s in self.streaks] ind = snr.index(max(snr)) return self.streaks[ind]
def exportXml(self): """ @rtype: string """ # Document root grxml = Document() grxmlr = grxml.createElement('flow') grxml.appendChild(grxmlr) # Each node... for node in self.nodes: xmlnode = grxml.createElement('node') xmlnode.setAttribute('id', unicode(node.id)) xmlnode.setAttribute('type', unicode(node.fullname())) grxmlr.appendChild(xmlnode) # Graphical properties if not empty(node.graphicalprops): for graphprop in node.graphicalprops: prop = grxml.createElement('graphproperty') prop.setAttribute('name', graphprop) prop.setAttribute('value', "%s" % node.graphicalprops[graphprop]) xmlnode.appendChild(prop) # Interfaces and successors for interface in node.interfaces: xmlinterface = grxml.createElement('interface') xmlinterface.setAttribute('name', interface.name) if interface.isInput() and interface.isValue(): xmlinterface.setAttribute('slot', "%s" % interface.slot) if not interface.slot: val = '' if interface.value is not None: val = interface.value xmlinterface.setAttribute('value', "%s" % val) if not empty(interface.successors): for successor in interface.successors: xmlsuccessor = grxml.createElement('successor') xmlsuccessor.setAttribute('node', successor.node.id) xmlsuccessor.setAttribute('interface', successor.name) xmlinterface.appendChild(xmlsuccessor) xmlnode.appendChild(xmlinterface) return grxml.toprettyxml()
def psf(self): if empty(self._input_psf): return np.empty elif scalar(self._input_psf): p = gaussian2D(self._input_psf) return p / np.sqrt(np.sum(p**2)) # normalized PSF else: p = self._input_psf return p / np.sqrt(np.sum(p**2)) # normalized PSF
def run(self): # Read file content and pass to output interface if empty(self.filepath.value): raise FlowError(_("Filepath empty, cannot read file.")) self.info(_("Read content of file '%s'") % self.filepath.value) f = open(self.filepath.value, 'rb') for line in f: self.output.write(line) self.output.flush() f.close()
def radon_var_map(self): # lazy load this list if empty( self._radon_var_map ) and not empty(self._input_var) and not scalar( self._input_var ): # no var-map was calculated, but also make sure _input_var is given as a matrix self._var_size = imsize(self._input_var) self._var_was_expanded = self.useExpand() self._radon_var_map.append( pyradon.frt.frt(self._input_var, expand=self.useExpand(), partial=True, transpose=False)) self._radon_var_map.append( pyradon.frt.frt(self._input_var, expand=self.useExpand(), partial=True, transpose=True)) return self._radon_var_map
def precompute_minimal(data, settings): param = empty() cache = {} assert settings.optype == 'class' if settings.optype == 'class': param.alpha = settings.alpha param.alpha_per_class = float(param.alpha) / data['n_class'] cache['y_train_counts'] = hist_count(data['y_train'], range(data['n_class'])) cache['range_n_class'] = range(data['n_class']) param.base_measure = (np.ones(data['n_class']) + 0.) / data['n_class'] param.alpha_vec = param.base_measure * param.alpha return (param, cache)
def __senderThread(self): while True: if self.__closeConnection: break self.__senderMutex.acquire() try: while not utils.empty(self.__senderBuffer): message = self.__senderBuffer.pop(0) # print('sending', message) self.__sendConnection.sendall(pickle.dumps(message)) finally: self.__senderMutex.release() time.sleep(0.5)
def listPixels(self): if empty(self.im_size): raise Exception("Cannot listPixels without an image size!") if empty(self.x1) or empty(self.x2) or empty(self.y1) or empty(self.y2): raise Exception("Cannot listPixels without x1,x2,y1,y2!") S = self.im_size if scalar(S): S = (S,S) x1_pix = S[0]*self.x1 x2_pix = S[0]*self.x2 y1_pix = S[1]*self.y1 y2_pix = S[1]*self.y2 x,y,N = listPixels(x1_pix, x2_pix, y1_pix, y2_pix, self.im_size) # these are lists because we may later add support for multiple lines self.x_list = x[0]; self.y_list = y[0]; self.num_pixels = N[0]
def change_password(self, password, new_password1, new_password2): Logger.Info('%s - UserController.change_password - started' % __name__) Logger.Debug('%s - UserController.change_password - started with password:%s and new_password1:%s and new_password2:%s' % (__name__, password, new_password1, new_password2)) errors = [] if not self.user: errors.append(constants.USER_DOES_NOT_EXIST) if empty(password): errors.append(constants.PASSWORD_BLANK) if empty(new_password1): errors.append(constants.NEW_PASSWORD_BLANK) if empty(new_password2): errors.append(constants.CONFIRM_PASSWORD_BLANK) if new_password1 != new_password2: errors.append(constants.NEW_PASSWORD_MISMATCH) if len(errors) > 0: return False, errors if not self.user.check_password(password): return False, [constants.PASSWORD_INCORRECT] # Verify password rules passed, rules_errors = _check_password_rules(new_password1) if not passed: return False, rules_errors # Request is valid. Let's change the password. self.user.set_password(new_password1) self.user.save() Logger.Info('%s - UserController.change_password - finished' % __name__) return True, []
def predict(self, x: Tensor, z_mu: Optional[Tensor] = None, z_logvar: Optional[Tensor] = None): """Predict classes. Args: x (Tensor): Tensor input. z_mu (Optional[Tensor], optional): Already computed mu from encoder. Defaults to None. z_logvar (Optional[Tensor], optional): Already computed logvar from encoder. Defaults to None. Returns: List: List of predicted labels. """ # Allows us to pass already computed z_mu and z_logvar if z_mu is None or z_logvar is None: output = self(x) z_mu, z_logvar = output['z_mu'], output['z_logvar'] z_var = calculate_var(z_logvar) device = self.device() batch_size = x.size(0) prediction = [None] * batch_size min_distances = [math.inf] * batch_size if empty(self.class_idents): print(f"⚠ No registered class identifiers") for label, prototype in self.class_idents.items(): proto_mu, proto_var = prototype['mu'], prototype['var'] proto_mu = proto_mu.repeat(batch_size, 1) proto_var = proto_var.repeat(batch_size, 1) mu_distances = cosine_distance(z_mu, proto_mu, dim=1) var_distances = cosine_distance(z_var, proto_var, dim=1) distances = self.rho * mu_distances + \ (1.0 - self.rho) * var_distances for i in range(0, batch_size): distance = distances[i].cpu().item() if distance < min_distances[i]: min_distances[i] = distance prediction[i] = label return prediction
def change_email_opt_in(self, opt_in_status): Logger.Info('%s - UserController.change_email_opt_in - started' % __name__) Logger.Debug('%s - UserController.change_email_opt_in - started with opt_in_status: %s' % (__name__, opt_in_status)) if empty(opt_in_status): return False, [constants.OPT_IN_STATUS_MISSING] profile = self.user.profile if 'email' not in profile.contact_options: profile.contact_options['email'] = {} profile.contact_options['email']['opt_in_status'] = True if opt_in_status == 'Y' else False profile.save() Logger.Info('%s - UserController.change_email_opt_in - finished' % __name__) return True, []
def __mainThread(self): while True: if self.__closeConnection: break self.__listenerMutex.acquire() try: if not utils.empty(self.__listenerBuffer): # mutex.acquire() message = self.__listenerBuffer.pop(0) messageCode = message[0] if messageCode == ComCodes.CAN_TRAIN: if len(models) < maxDevices: mutex.acquire() models[self.__address] = [] mutex.release() response = (ComCodes.CAN_TRAIN, True) else: response = (ComCodes.CAN_TRAIN, False) # response = (ComCodes.CAN_TRAIN, True) self.__sendResponse(response) elif messageCode == ComCodes.GET_STRUCTURE: self.__sendResponse( (ComCodes.GET_STRUCTURE, structure)) elif messageCode == ComCodes.GET_WEIGHTS: response = (ComCodes.GET_WEIGHTS, mainModel.getTrainableWeights()) self.__sendResponse(response) elif messageCode == ComCodes.POST_WEIGHTS: try: mutex.acquire() models[self.__address] = message[1] finally: mutex.release() finally: # mutex.release() self.__listenerMutex.release() time.sleep(0.5)
def grib_nearest(gid, target_lats, target_lons, mv): num_cells = target_lons.size indices = np.indices(target_lons.shape) valid_target_coords = (target_lons > -1.0e+10) & (target_lons != mv) xs = np.where(valid_target_coords, indices[0], int_fill_value).ravel() ys = np.where(valid_target_coords, indices[1], int_fill_value).ravel() idxs = empty(num_cells, fill_value=int_fill_value, dtype=int) back_char, progress_step = progress_step_and_backchar(num_cells) format_progress = '{}Nearest neighbour interpolation: {}/{} [outs: {}] ({}%)'.format i = 0 outs = 0 stdout.write('Start interpolation: {}\n'.format(now_string())) stdout.write(format_progress(back_char, 0, num_cells, outs, 0)) stdout.flush() for lat, lon in itertools.izip(target_lats.flat, target_lons.flat): if i % progress_step == 0: stdout.write(format_progress(back_char, i, num_cells, outs, i * 100. / num_cells)) stdout.flush() if not (lon <= -1.0e+10 or lon == mv): try: # TODO CHECK IF asscalar is really needed here n_nearest = gribapi.grib_find_nearest(gid, np.asscalar(lat), np.asscalar(lon)) except gribapi.GribInternalError: outs += 1 xs[i] = int_fill_value ys[i] = int_fill_value else: idxs[i] = n_nearest[0]['index'] i += 1 stdout.write('{}{:>100}'.format(back_char, ' ')) stdout.write(format_progress(back_char, i, num_cells, outs, 100)) stdout.write('End interpolation: {}\n\n'.format(now_string())) stdout.flush() return xs[xs != int_fill_value], ys[ys != int_fill_value], idxs[idxs != int_fill_value]
def startNodes(self): return [n for n in self.nodes if empty(n.predecessors)]
def scan(self, subframe, transpose): if self.debug_bit > 1: print "scan in progress... transpose=%d subframe.shape= %dx%dx%d" % ( transpose, subframe.shape[0], subframe.shape[1], subframe.shape[2]) m = math.log(subframe.shape[0] + 1, 2) - 1 if not self.use_short and 2**m < self.im_size[int(transpose)]: return # short circuit this function if not looking for short streaks if 2**m < self.min_length / 8: return # don't bother looking for streaks much shorter than minimal length V = self.getRadonVariance(transpose, m) S = (subframe.shape[0] + 1) / 2 th = np.arctan(np.arange(-S + 1, S) / np.float(S)) G = np.maximum(np.fabs(np.cos(th)), np.fabs(np.sin(th))) G = G[:, np.newaxis, np.newaxis] SNR = subframe / np.sqrt(V * self.psfNorm(self.psf) * G) SNR_final = SNR # add exclusion here if self.use_exclude: if transpose and not empty(self.exclude_y_pix): offset = ( subframe.shape[0] + 1 ) / 2 # index of dx=+1, also how many pixels are for angles 0<=th<=45 in this subframe scale = self._im_size_tr[ 1] / offset # scaling factor for smaller subframes idx1 = offset + math.ceil(self.exclude_y_pix / scale) - 1 idx2 = offset + math.floor(self.exclude_y_pix / scale) - 1 SNR_final[idx1:idx2, :, :] = 0 elif not empty(self.exclude_x_pix): offset = (subframe.shape[0] + 1) / 2 scale = self._im_size_tr[0] / offset idx1 = int(offset + math.ceil(self.exclude_x_pix[0] / scale)) idx2 = int(offset + math.floor(self.exclude_x_pix[1] / scale)) SNR_final[idx1:idx2, :, :] = 0 idx = np.unravel_index(np.nanargmax(SNR_final), SNR_final.shape) mx = SNR_final[idx] if self.debug_bit > 1: print "SNR found is: " + str(mx) if empty(self.last_snr) or mx > self.last_snr: self.last_snr = mx if mx > self.threshold and (empty(self.last_streak) or mx > self.last_streak.snr): self.last_streak = Streak(finder=self, subframe=SNR, log_step=m, transposed=transpose, count=subframe[idx], index=idx)
url = 'http://localhost:9000/api/situations/' + situation_id + '/openfisca-request' r = requests.get(url) situation = r.json() situations = copy.deepcopy(situation) for i in range(1, 800): situations = utils.merge(situations, utils.prefix(str(i), situation)) log.info('import server') import server #s = server.getBogusSituations() #s = server.getDailySituations(int(sys.argv[-1])) s = server.getSituationSubset() remoteSituations = server.processSituations(s, remote=False) situations = utils.empty() for prefix, situation in remoteSituations.iteritems(): situations = utils.merge(situations, utils.prefix(prefix, situation)) allMonths = ['2018-10'] calculs = { 'aspa': allMonths, 'acs': allMonths, 'asi': allMonths, 'cmu_c': allMonths, 'af': allMonths, 'cf': allMonths, 'asf': allMonths, 'paje_base': allMonths, 'rsa': allMonths, 'aide_logement': allMonths,
# test (reload object) if __name__ == "__main__": import matplotlib.pyplot as plt print("this is a test for Simulator and Finder...") if 's' not in locals() or not isinstance(s, Simulator): s = Simulator() s.debug_bit = 1 s.x1 = 3.0 / 8 s.x2 = 0.5 s.y1 = 1 / 3 s.y2 = 1.0 / 2 s.x1 = 0.2 s.y1 = 0.01 s.x2 = 1 s.y2 = 1.5 s.finder.use_subtract_mean = 0 s.run() fig, ax = plt.subplots() ax.imshow(s.image) if not empty(s.finder.streaks): for st in s.finder.streaks: st.plotLines(ax)
def __init__(self, finder=None, subframe=[], log_step=[], transposed=False, count=[], index=[]): self.im_size = [] # size of original image (after transpose). updated directly from finder self.input_image = [] # subtracted image, as given to finder. updated directly from finder self.radon_image = [] # full Radon image for the correct transposition. updated directly from finder self.subframe = subframe # subframe where streak is detected. For non-short streaks, equal to "radon_+image". from function argument self.psf = [] # the PSF image itself, normalized, that was used to do the filter. updated directly from finder # these help find the correct image where the streak exists self.frame_num = [] # which frame in the batch. updated directly from finder self.batch_num = [] # which batch in the run. updated directly from finder self.filename = '' # which file it came from. updated directly from finder self.threshold = [] # which threshold was used. updated directly from finder self.is_short = [] # if yes, subframe is smaller than the full Radon image. updated in "calculate()" # how bright the treak was self.I = [] # intensity, or brightness per unit length. found in "calculate()" self.snr = [] # calculated from subframe and index. found in "calculate()" self.snr_fwhm = [] # S/N per resolution element self.count = count # how many photons across the whole streak. from function argument # calculated parameters self.L = [] # length of streak (pixels). from "calculate()" self.th = [] # angle of streak (th=0 is on the y axis). from "calculate()" self.x0 = [] # intercept of line with the x axis. from "calculate()" self.a = [] # slope parameter (y=ax+b). from "calculate()" self.b = [] # intercept parameter (y=ax+b). from "calculate()" self.x1 = [] # streak starting point in x. from "calculate()" self.x2 = [] # streak end point in x. from "calculate()" self.y1 = [] # streak starting point in y. from "calculate()" self.y2 = [] # streak end point in y. from "calculate()" # raw coordinates from the Radon result / subframe self.transposed = transposed # was the image transposed? from function argument self.radon_step = log_step # in what step in the FRT the streak was found. from function argument self.radon_max_idx = index # 3D index of the maximum of the subframe (streak position). from function argument self.radon_x0 = [] # position coordinate in the Radon image. using subframe and index self.radon_dx = [] # slope coordinate in the Radon image. using subframe and index self.radon_x_var = [] # error estimate on "radon_x" self.radon_dx_var = [] # error estimate on radon_dx" self.radon_xdx_cov = [] # cross correlation of the slope-position errors self.radon_y1 = [] # start of the subframe self.radon_y1 = [] # end of the subframe # switches and parameters for user self.noise_var = [] # updated directly from finder self.psf_sigma = [] # updated directly from finder self.subtract_psf_widths = 3 # how many PSF widths to remove around streak position (overriden by finder) # internal switches and parameters self.was_expanded = False # check if original image was expanded before FRT. updated manually from finder self.was_convolved = False # check if original image was convolved with PSF. updated manually from finder self.num_psfs_peak_region = 5 # rough estimate of the region around the Radon peak we want to cut for error estimates self.num_snr_peak_region = 2 # how many S/N units below maximum is still inside the peak region self.peak_region = [] # a map of the peak region, with only the part above the cut (peak-num_snr_peak_region) not zeroed out (used for error estimates) self._version = 1.01 if not empty(finder): self.update_from_finder(finder) self.calculate()
def input(self, images, variance=None, psf=None, filename=None, batch_num=None): """ Input images and search for streaks in them. Inputs: -images (expect numpy array, can be 3D) -variance (can be scalar or map of noise variance) -psf: for the images (2D) or for each image individuall (3D, first dim equal to number of images) -filename: for tracking the source of discovered streaks -batch_num: if we are running many batches in this run """ if empty(images): raise Exception("Cannot do streak finding without images!") self.clear() if images.ndim == 2: images = images[np.newaxis, :, :] # so we can loop over axis0 if self.use_crop: images = crop2size(images, self.crop_size) self.im_size = imsize(images) self.input_images = images # input the variance, if given! if not empty(variance): if self.use_crop: self._input_var = crop2size(variance, self.crop_size) else: self._input_var = variance # input the PSF if given! if not empty(psf): self._input_psf = psf # housekeeping if not empty(filename): self.filename = filename if not empty(batch_num): self.batch_num = batch_num # need these to normalize each Radon image V = np.transpose(self.getRadonVariance(transpose=False), (0, 2, 1))[:, :, 0] VT = np.transpose(self.getRadonVariance(transpose=True), (0, 2, 1))[:, :, 0] th = np.arctan( np.arange(-self.im_size[0] + 1, self.im_size[0]) / np.float(self.im_size[0])) G = np.maximum(np.fabs(np.cos(th)), np.fabs(np.sin(th))) thT = np.arctan( np.arange(-self.im_size[1] + 1, self.im_size[1]) / np.float(self.im_size[1])) GT = np.maximum(np.fabs(np.cos(thT)), np.fabs(np.sin(thT))) for i in range(images.shape[0]): # loop over all input images if self.debug_bit: sys.stdout.write( "running streak detection on batch %d | frame %d " % (self.batch_num, i)) image_single = images[i, :, :] self.frame_num = i self.last_snr = [] self.streaks = [] # make a new, empty list for the transposed FRT self.last_streak = [] if self.psf.ndim > 2: this_psf = self.psf[i, :, :] else: this_psf = self.psf if self.use_subtract_mean: image_single = image_single - np.nanmean( image_single) # masked stars don't influence the mean image_single = np.nan_to_num( image_single ) # get rid of nans (used to mask stars...). for some reason copy=False is not working! if self.use_conv and not empty(this_psf): image_single = scipy.signal.fftconvolve(image_single, np.rot90(this_psf, 2), mode='same') R = pyradon.frt.frt(image_single, expand=self.useExpand(), finder=self, transpose=False) temp_streaks = self.streaks self.streaks = [] # make a new, empty list for the transposed FRT self.last_streak = [] if self.use_only_one == 0 or self.use_recursive: RT = pyradon.frt.frt(self.subtracted_image, expand=self.useExpand(), finder=self, transpose=True) else: RT = pyradon.frt.frt(image_single, expand=self.useExpand(), finder=self, transpose=True) self.streaks.extend( temp_streaks ) # combine the lists from the regular andf transposed FRT self.radon_image = R / np.sqrt( V * self.psfNorm(this_psf) * G[:, np.newaxis]) self.radon_image_trans = RT / np.sqrt( VT * self.psfNorm(this_psf) * GT[:, np.newaxis]) if self.use_only_one and not self.use_recursive: self.streaks = [self.best] if not empty(self.streaks): if self.use_save_images: for s in self.streaks: s.input_image = np.array(images[i, :, :]) if s.transposed: s.radon_image = np.array(self.radon_image_trans) else: s.radon_image = np.array(self.radon_image) if not empty(s.subframe): s.subframe = np.array(s.subframe) else: s.input_image = [] s.radon_image = [] s.subframe = [] if not empty(self.streaks): best_snr = self.best.snr else: # no streaks, just take the best S/N in the final Radon images if self.last_snr > 0: best_snr = self.last_snr else: best_snr = max(np.max(self.radon_image), np.max(self.radon_image_trans)) if self.debug_bit: sys.stdout.write("best S/N found was %f\n" % best_snr) self.prev_streaks.extend(self.streaks) self.snr_values.append(best_snr)
def paramname(self): name = self.name.value if empty(name): raise FlowError(_("Error in getting name of CLI Parameter")) return name
lower_lims = np.array([-1, -1]) upper_lims = np.array([1, 1]) * 1.3 else: if k == 1: A = np.array([[0.5, 0.57], [0.57, 1]]) b = np.array([1.6, 2.1]) elif k == 2: A = np.array([[0.3, 0.1], [0.1, 0.7]]) b = np.array([1, 1.5]) elif k == 3: A = np.array([[0.5, 0.57], [0.57, 1]]) b = np.array([3.1, 4.3]) b = b * 0. lower_lims = np.array([-1, -1]) upper_lims = np.array([8, 6]) params = empty() params.b = b params.A = A # params.Achol = chol(A) # FIXME: not sure why real is required below: is it float? if box: # log_step_func = lambda t: -1e10 * np.real(np.logical_or(t<-1, t>1)) log_step_func = lambda t: -1e10 * np.logical_or(t < -1, t > 1) else: # log_step_func = lambda t: -1e10 * np.real(t<0) log_step_func = lambda t: -1e10 * (t < 0) #first function: step function in each of the directions log_f_vec = lambda t: log_step_func(t[:, 0]) + log_step_func(t[:, 1]) log_f = lambda t: log_step_func(t[0]) + log_step_func(t[1])
lower_lims = np.array([-1,-1]) upper_lims = np.array([1, 1]) * 1.3 else: if k==1: A = np.array([[0.5, 0.57], [0.57, 1]]) b = np.array([1.6, 2.1]) elif k==2: A = np.array([[0.3, 0.1], [0.1, 0.7]]) b = np.array([1, 1.5]) elif k==3: A = np.array([[0.5, 0.57], [0.57, 1]]) b = np.array([3.1, 4.3]) b = b * 0. lower_lims = np.array([-1,-1]) upper_lims = np.array([8, 6]) params = empty() params.b = b params.A = A # params.Achol = chol(A) # FIXME: not sure why real is required below: is it float? if box: # log_step_func = lambda t: -1e10 * np.real(np.logical_or(t<-1, t>1)) log_step_func = lambda t: -1e10 * np.logical_or(t<-1, t>1) else: # log_step_func = lambda t: -1e10 * np.real(t<0) log_step_func = lambda t: -1e10 * (t<0) #first function: step function in each of the directions log_f_vec = lambda t: log_step_func(t[:,0]) + log_step_func(t[:,1]) log_f = lambda t: log_step_func(t[0]) + log_step_func(t[1])