def post_thumbnails(self, oid: str, detection: Mapping): """ Post Fritz-style (~1'x1') cutout images centered around the detected candidate to Fritz :param oid: Fritz obj id :param detection: Tails detection dict :return: """ candid = f"{detection['id']}_{detection['ni']}" for thumbnail_type in ("new", "ref", "sub"): with timer( f"Making {thumbnail_type} thumbnail for {oid} {candid}", self.verbose > 1, ): thumb = self.make_thumbnail(oid, detection, thumbnail_type) with timer( f"Posting {thumbnail_type} thumbnail for {oid} {candid} to Fritz", self.verbose > 1, ): response = self.api_fritz("POST", "/api/thumbnail", thumb) if response.json()["status"] == "success": log(f"Posted {oid} {candid} {thumbnail_type} cutout to Fritz") else: log(f"Failed to post {oid} {candid} {thumbnail_type} cutout to Fritz" ) log(response.json())
def main(img_path, model_name, patch_dimension): """ Driver for recursive forward chop. Takes an image path and model name to upsample the image. """ # Loading model and image img = None model = None print("\nLoading model and image... \n") if model_name in ["EDSR"]: img = ut.load_image(img_path) img = img.unsqueeze(0) model = md.load_edsr(device="cuda") else: img = ut.npz_loader(img_path) img = img.unsqueeze(0) model = md.load_rrdb(device="cuda") # Timers for saving stats timer = [0, 0, 0, 0, 0] print("Processing...") # Shfiting input image to CUDA total_time = ut.timer() cpu2gpu_time = ut.timer() img = img.to("cuda") cpu2gpu_time = cpu2gpu_time.toc() # Forward chopping and upsampling output = forward_chop( img, model, timer=timer, min_size=patch_dimension * patch_dimension ) # Shifting output image to CPU gpu2cpu_time = ut.timer() output = output.to("cpu") gpu2cpu_time = gpu2cpu_time.toc() if model_name in ["EDSR"]: output = output.int() total_time = total_time.toc() timer[0] = cpu2gpu_time timer[-2] = gpu2cpu_time timer[-1] = total_time # Saving output np.savez("results/recursive_outputx4.npz", output) np.save("results/recursive_outputx4", output) # Printing processing times print("\nCPU 2 GPU time: ", timer[0]) print("\nUpsampling time: ", timer[1]) print("\nMerging time: ", timer[2]) print("\nGPU 2 CPU time", timer[3]) print("\nTotal execution time: ", timer[4])
def helper_rrdb_experiment(img_dimension, patch_dimension): # Loading model and image img = None model = None # ============================================================================= # print("\nLoading model and image... \n") # ============================================================================= img = np.load("data/slices/0.npz") img = img.f.arr_0 img = np.resize(img, (img_dimension, img_dimension)) img = img[np.newaxis, :, :] img = torch.from_numpy(img) img = img.unsqueeze(0) model = md.load_rrdb(device="cuda") # Timers for saving stats timer = [0, 0, 0, 0, 0] # ============================================================================= # print("Processing...") # ============================================================================= # Shfiting input image to CUDA total_time = ut.timer() cpu2gpu_time = ut.timer() img = img.to("cuda") cpu2gpu_time = cpu2gpu_time.toc() # Forward chopping and upsampling output = forward_chop( img, model, timer=timer, min_size=patch_dimension * patch_dimension ) # Shifting output image to CPU gpu2cpu_time = ut.timer() output = output.to("cpu") gpu2cpu_time = gpu2cpu_time.toc() total_time = total_time.toc() timer[0] = cpu2gpu_time timer[-2] = gpu2cpu_time timer[-1] = total_time # Printing processing times # ============================================================================= # print("\nCPU 2 GPU time: ", timer[0]) # print("\nUpsampling time: ", timer[1]) # print("\nMerging time: ", timer[2]) # print("\nGPU 2 CPU time", timer[3]) # print("\nTotal execution time: ", timer[4]) # ============================================================================= print(timer[1]) print(timer[4])
def main(): was_off = False #was machine off before script began; initialize False parser = ArgumentParser( description= 'Pull backup for specified host, waking and shutting if desired', usage='{} host --aggressive --verbose'.format(sys.argv[0])) parser.add_argument('host', help='host from which to pull backup') parser.add_argument( '--aggressive', help='wake and shut machine if necessary', action='store_true' #false by default ) parser.add_argument( '--verbose', help='set for print statements', action='store_true' #false by default ) args = parser.parse_args() timer_host_alive = timer(is_alive, run_until=True, interval=10, verbose=args.verbose) timer_host_dead = timer(is_alive, run_until=False, interval=10, verbose=args.verbose) if is_alive(args.host): #host online if args.verbose: print('{} is alive, calling run_backup'.format(args.host)) backup_retval = run_backup(args.host, verbose=args.verbose) elif args.aggressive: #host offline and we need to wake was_off = True if wake_machine(MAC_ADDRS[args.host]): alive_retval = timer_host_alive(args.host) if alive_retval: #host now online backup_retval = run_backup(args.host, wait=30, verbose=args.verbose) if shutdown(args.host): retval_host_dead = timer_host_dead(args.host) if retval_host_dead: print('{} is dead; duration: {}'.format( args.host, retval_host_dead)) else: print('{} is not killable'.format(args.host)) else: print('shutdown failed') #, file=sys.stderr) else: print('timeout waiting for {} to wake'.format( args.host)) #, file=sys.stderr) else: print('wake command failed', file=sys.stderr)
def main(): was_off = False #was machine off before script began; initialize False parser = ArgumentParser(description='Pull backup for specified host, waking and shutting if desired' ,usage='{} host --aggressive --verbose'.format(sys.argv[0]) ) parser.add_argument('host', help='host from which to pull backup') parser.add_argument('--aggressive' ,help='wake and shut machine if necessary' ,action='store_true' #false by default ) parser.add_argument('--verbose' ,help='set for print statements' ,action='store_true' #false by default ) args = parser.parse_args() timer_host_alive = timer(is_alive ,run_until=True ,interval=10 ,verbose=args.verbose ) timer_host_dead = timer(is_alive ,run_until=False ,interval=10 ,verbose=args.verbose ) if is_alive(args.host): #host online if args.verbose: print('{} is alive, calling run_backup'.format(args.host)) backup_retval = run_backup(args.host, verbose=args.verbose) elif args.aggressive: #host offline and we need to wake was_off = True if wake_machine(MAC_ADDRS[args.host]): alive_retval = timer_host_alive(args.host) if alive_retval: #host now online backup_retval = run_backup(args.host, wait=30, verbose=args.verbose) if shutdown(args.host): retval_host_dead = timer_host_dead(args.host) if retval_host_dead: print('{} is dead; duration: {}'.format(args.host ,retval_host_dead ) ) else: print('{} is not killable'.format(args.host)) else: print('shutdown failed') #, file=sys.stderr) else: print('timeout waiting for {} to wake'.format(args.host))#, file=sys.stderr) else: print('wake command failed', file=sys.stderr)
def save_image(model_name, img, scale=4, output_path="results/result_imagex4.png"): b, c, h, w = img.shape if model_name in ["EDSR"]: img = img.int() save_time = ut.timer() fig = plt.figure(figsize=((4 * h) / 1000, (4 * w) / 1000), dpi=100, frameon=False) ax = plt.Axes(fig, [0.0, 0.0, 1.0, 1.0]) ax.set_axis_off() fig.add_axes(ax) # fig = plt.figure(figsize=(4*h, 4*w)) ax.imshow(img[0].permute((1, 2, 0))) fig.savefig( output_path, bbox_inches="tight", transparent=True, pad_inches=0, dpi=1000, ) save_time = save_time.toc() return save_time
def post_candidate(self, oid: str, detection: Mapping): """ Post a candidate comet detection to Fritz :param oid: candidate id :param detection: from self.process_frame :return: """ candid = f"{detection['id']}_{detection['ni']}" meta = { "id": oid, "ra": detection["ra"], "dec": detection["dec"], "score": detection["p"], "filter_ids": [self.config["sentinel"]["fritz"]["filter_id"]], "origin": candid, "passed_at": arrow.utcnow().format("YYYY-MM-DDTHH:mm:ss.SSS"), } if self.verbose > 1: log(meta) with timer( f"Posting metadata of {oid} {candid} to Fritz", self.verbose > 1, ): response = self.api_fritz("POST", "/api/candidates", meta) if response.json()["status"] == "success": log(f"Posted {oid} {candid} metadata to Fritz") else: log(f"Failed to post {oid} {candid} metadata to Fritz") log(response.json())
def trt_helper_upsampler_piterative_experiment(model_name, trt_engine_path, img_path, patch_dimension, shave=10, use_fp16=False): """ Driver function to run a trtengine Parameters ---------- model_name : str name of the model (i.e. "EDSR", "RRDB") trt_engine_path : str path of the trt engine. img_path : str path of the image file. patch_dimension : int patch_size. shave : int, optional patch overlapping size. The default is 10. Returns ------- None. """ # Loading model and image if model_name in ['EDSR']: img = ut.load_image(img_path) input_image = img.unsqueeze(0) elif model_name in ["RRDB"]: img = ut.npz_loader(img_path) input_image = img.unsqueeze(0) else: print('Unknown model!') return b, c, h, w = input_image.shape total_time = ut.timer() out_tuple = trt_forward_chop_iterative_v2( input_image, trt_engine_path=trt_engine_path, shave=shave, min_size=patch_dimension * patch_dimension, device="cuda", use_fp16=use_fp16, print_result=True, ) output_image = out_tuple[0] total_time = total_time.toc() # ============================================================================= # for i in out_tuple[1:]: # print(i) # ============================================================================= print('Total executing time: ', total_time) return output_image
def helper_upsampler_piterative_experiment(model_name, img_path, patch_dimension): """ Driver function for running pytorch model inference Parameters ---------- img_dimension : int image one side dimension. patch_dimension : int patch size. Returns ------- None. """ # Loading model and image if model_name in ['EDSR']: model = md.load_edsr(device="cuda") img = ut.load_image(img_path) input_image = img.unsqueeze(0) elif model_name in ["RRDB"]: model = md.load_rrdb(device="cuda") img = ut.npz_loader(img_path) input_image = img.unsqueeze(0) else: print('Unknown model!') return b, c, h, w = input_image.shape total_time = ut.timer() out_tuple = forward_chop_iterative( input_image, shave=10, min_size=patch_dimension * patch_dimension, model=model, device="cuda", print_result=True, ) model.cpu() del model output_image = out_tuple[0] total_time = total_time.toc() # ============================================================================= # for i in out_tuple[1:]: # print(i) # ============================================================================= print('Total executing time: ', total_time) return output_image
def helper_rrdb_piterative_experiment(img_dimension, patch_dimension): """ Driver function for running pytorch model inference Parameters ---------- img_dimension : int image one side dimension. patch_dimension : int patch size. Returns ------- None. """ # Loading model and image img = None model = None img = np.load("data/slices/0.npz") img = img.f.arr_0 img = np.resize(img, (img_dimension, img_dimension)) img = img[np.newaxis, :, :] img = torch.from_numpy(img) img = img.unsqueeze(0) input_image = img b, c, h, w = input_image.shape # input_image = input_image.reshape((1, c, h, w)) # Loading model model = md.load_rrdb("cuda") model.eval() total_time = ut.timer() out_tuple = forward_chop_iterative( input_image, shave=10, min_size=patch_dimension * patch_dimension, model=model, device="cuda", print_result=True, ) model.cpu() del model output_image = out_tuple[0] total_time = total_time.toc() for i in out_tuple[1:]: print(i) print(total_time)
def batch_forward_chop( patch_list, batch_size, channel, img_height, img_width, dim, shave, scale, model, device="cuda", print_timer=True, ): """ Create SR image from batches of patches Parameters ---------- patch_list : list list of patches. batch_size : int batch size. channel : int input image channel. img_height : int input image height. img_width : int input image width. dim : int patch dimension. shave : int shave value for patch. scale : int scale for LR to SR. model : nn.Module SR model. device : str, optional GPU or CPU. The default is 'cuda'. print_timer : bool, optional Print result or not. The default is True. Raises ------ Exception DESCRIPTION. Returns ------- 3D matrix, tuple output_image, tuple of timings. """ logger = ut.get_logger() total_patches = len(patch_list) if batch_size > total_patches: sys.exit(2) raise Exception("Batch size greater than total number of patches") output_image = torch.tensor( np.zeros((channel, img_height * scale, img_width * scale))) cpu_to_gpu_time = 0 gpu_to_cpu_time = 0 batch_creating_time = 0 total_EDSR_time = 0 cuda_clear_time = 0 merging_time = 0 for start in range(1, total_patches + 1, batch_size): info = "" try: batch_creating_timer = ut.timer() batch = [] end = start + batch_size if start + batch_size > total_patches: end = total_patches + 1 for p in range(start, end): batch.append(patch_list[p][4]) batch_creating_time += batch_creating_timer.toc() torch.cuda.synchronize() cpu_to_gpu_timer = ut.timer() batch = torch.stack(batch).to(device) torch.cuda.synchronize() cpu_to_gpu_time += cpu_to_gpu_timer.toc() info = (info + "C2G Starts: " + str(cpu_to_gpu_timer.t0) + "C2G total: " + str(cpu_to_gpu_time)) # ============================================================================= # print(batch.shape) # subprocess.run("gpustat", shell=True) # ============================================================================= with torch.no_grad(): # ============================================================================= # print(start, end) # print(sys.getsizeof(batch)) # ============================================================================= torch.cuda.synchronize() start_time = time.time() sr_batch = model(batch) torch.cuda.synchronize() end_time = time.time() processing_time = end_time - start_time total_EDSR_time += processing_time info = (info + "\tModel Starts: " + str(start_time) + "Model total: " + str(total_EDSR_time)) torch.cuda.synchronize() gpu_to_cpu_timer = ut.timer() sr_batch = sr_batch.to("cpu") torch.cuda.synchronize() gpu_to_cpu_time += gpu_to_cpu_timer.toc() info = (info + "\tGPU 2 CPU Starts: " + str(gpu_to_cpu_timer.t0) + "G2C total: " + str(gpu_to_cpu_time)) _, _, patch_height, patch_width = sr_batch.size() logger.info(info) batch_id = 0 merging_timer = ut.timer() for p in range(start, end): output_image[:, patch_list[p][3][0]:patch_list[p][3][ 1], patch_list[p][3][2]:patch_list[p][3][3], ] = sr_batch[ batch_id][:, patch_list[p][2][0]:patch_list[p][2][1], patch_list[p][2][2]:patch_list[p][2][3], ] batch_id += 1 merging_time += merging_timer.toc() cuda_clear_timer = ut.timer() ut.clear_cuda(batch, None) cuda_clear_time += cuda_clear_timer.toc() except RuntimeError as err: ut.clear_cuda(batch, None) raise Exception(err) model = model.to("cpu") if print_timer: print("Total upsampling time: {}\n".format(total_EDSR_time)) print("Total CPU to GPU shifting time: {}\n".format(cpu_to_gpu_time)) print("Total GPU to CPU shifting time: {}\n".format(gpu_to_cpu_time)) print("Total batch creation time: {}\n".format(batch_creating_time)) print("Total merging time: {}\n".format(merging_time)) print("Total CUDA clear time: {}\n".format(cuda_clear_time)) print("Total time: {}\n".format(total_EDSR_time + cpu_to_gpu_time + gpu_to_cpu_time + batch_creating_time + cuda_clear_time + merging_time)) return output_image, ( total_EDSR_time, cpu_to_gpu_time, gpu_to_cpu_time, batch_creating_time, cuda_clear_time, merging_time, )
def patch_batch_forward_chop( input_image, patch_dimension, patch_shave, scale, batch_size, model_type="EDSR", device="cuda", print_timer=True, ): """ Parameters ---------- input_image : 3D Matrix input image. patch_dimension : int patch dimension. patch_shave : int patch shave value. scale : int scale for LR to SR. batch_size : int batch size. model_type : str, optional model name. The default is 'EDSR'. device : str, optional GPU or CPU. The default is 'cuda'. print_timer : bool, optional print result or not. The default is True. Returns ------- output_image : 3D Matrix output SR image. """ model = None if model_type == "EDSR": model = md.load_edsr(device=device) model.eval() elif model_type == "RRDB": model = md.load_rrdb(device=device) model.eval() else: raise Exception("{} : Unknown model...".format(model_type)) total_timer = ut.timer() channel, height, width = input_image.shape patch_list_timer = ut.timer() patch_list = {} create_patch_list( patch_list, input_image, patch_dimension, patch_shave, scale, channel, height, width, ) patch_list_processing_time = patch_list_timer.toc() total_batch_processing_timer = ut.timer() output_image, timer_results = batch_forward_chop( patch_list, batch_size, channel, height, width, patch_dimension, patch_shave, scale, model=model, device="cuda", print_timer=False, ) total_batch_processing_time = total_batch_processing_timer.toc() if model_type == "EDSR": output_image = output_image.int() total_time = total_timer.toc() print(len(patch_list)) if print_timer: print(patch_list_processing_time) for t in timer_results: print(t) print(total_batch_processing_time) print(total_time) model = model.cpu() del model return output_image
def forward_chop( input_image, model, timer, shave=10, scale=4, n_GPUs=1, min_size=160000 ): """ Recursive forward chop Parameters ---------- input_image : str image path. model : str model. shave : int, optional overlapping value. The default is 10. scale : int, optional LR to HR scale. The default is 4. n_GPUs : int, optional number of GPUs. The default is 1. min_size : int, optional patch size. The default is 160000. Returns ------- output : tensor 4x output. """ n_GPUs = min(n_GPUs, 4) b, c, h, w = input_image.size() h_half, w_half = h // 2, w // 2 h_size, w_size = h_half + shave, w_half + shave lr_list = [ input_image[:, :, 0:h_size, 0:w_size], input_image[:, :, 0:h_size, (w - w_size) : w], input_image[:, :, (h - h_size) : h, 0:w_size], input_image[:, :, (h - h_size) : h, (w - w_size) : w], ] if w_size * h_size < min_size: sr_list = [] for i in range(0, 4, n_GPUs): model.eval() with torch.no_grad(): lr_batch = torch.cat(lr_list[i : (i + n_GPUs)], dim=0) upsampling_time = ut.timer() sr_batch = model(lr_batch) upsampling_time = upsampling_time.toc() timer[1] += upsampling_time sr_list.extend(sr_batch.chunk(n_GPUs, dim=0)) else: sr_list = [ forward_chop(patch, model, timer, shave=shave, min_size=min_size) for patch in lr_list ] h, w = scale * h, scale * w h_half, w_half = scale * h_half, scale * w_half h_size, w_size = scale * h_size, scale * w_size shave *= scale output = input_image.new(b, c, h, w) merging_time = ut.timer() output[:, :, 0:h_half, 0:w_half] = sr_list[0][:, :, 0:h_half, 0:w_half] output[:, :, 0:h_half, w_half:w] = sr_list[1][ :, :, 0:h_half, (w_size - w + w_half) : w_size ] output[:, :, h_half:h, 0:w_half] = sr_list[2][ :, :, (h_size - h + h_half) : h_size, 0:w_half ] output[:, :, h_half:h, w_half:w] = sr_list[3][ :, :, (h_size - h + h_half) : h_size, (w_size - w + w_half) : w_size ] merging_time = merging_time.toc() timer[2] += merging_time return output
def sentinel( utc_start: Optional[str] = None, utc_stop: Optional[str] = None, twilight: Optional[bool] = False, test: Optional[bool] = False, verbose: Optional[bool] = False, ): """ ZTF Sentinel service - Monitors the ZTF_ops collection on Kowalski for new ZTF data (Twilight only by default). - Uses dask.distributed to process individual ZTF image frames (ccd-quads). Each worker is initialized with a TailsWorker instance that maintains a Fritz connection and preloads Tails. The candidate comet detections, if any, are posted to Fritz together with auto-annotations (cross-matches from the MPC and SkyBot) and auxiliary data. :param utc_start: UTC start date/time in arrow-parsable format. If not set, defaults to (now - 1h) :param utc_stop: UTC stop date/time in arrow-parsable format. If not set, defaults to (now + 1h). If set, program runs once :param twilight: process only the data of the ZTF Twilight survey :param test: run in test mode :param verbose: verbose? :return: """ if verbose: log("Setting up MongoDB connection") init_db(config=config, verbose=verbose) mongo = Mongo( host=config["sentinel"]["database"]["host"], port=config["sentinel"]["database"]["port"], username=config["sentinel"]["database"]["username"], password=config["sentinel"]["database"]["password"], db=config["sentinel"]["database"]["db"], verbose=verbose, ) if verbose: log("Set up MongoDB connection") collection = config["sentinel"]["database"]["collection"] # remove dangling entries in the db at startup mongo.db[collection].delete_many({"status": "processing"}) # Configure dask client if verbose: log("Initializing dask.distributed client") dask_client = dask.distributed.Client( address= f"{config['sentinel']['dask']['host']}:{config['sentinel']['dask']['scheduler_port']}" ) # init each worker with Worker instance if verbose: log("Initializing dask.distributed workers") worker_initializer = WorkerInitializer() dask_client.register_worker_plugin(worker_initializer, name="worker-init") if test: frame = "ztf_20191014495961_000570_zr_c05_o_q3" with timer(f"Submitting frame {frame} for processing", verbose): mongo.db[collection].update_one({"_id": frame}, {"$set": { "status": "processing" }}, upsert=True) future = dask_client.submit(process_frame, frame, pure=True) dask.distributed.fire_and_forget(future) future.release() del future return True if verbose: log("Setting up Kowalski connection") kowalski = Kowalski( token=config["kowalski"]["token"], protocol=config["kowalski"]["protocol"], host=config["kowalski"]["host"], port=config["kowalski"]["port"], verbose=verbose, ) if verbose: log(f"Kowalski connection OK: {kowalski.ping()}") while True: try: # monitor the past 24 hours as sometimes there are data processing/posting delays at IPAC start = (arrow.get(utc_start) if utc_start is not None else arrow.utcnow().shift(hours=-24)) stop = (arrow.get(utc_stop) if utc_stop is not None else arrow.utcnow().shift(hours=1)) if (stop - start).total_seconds() < 0: raise ValueError("utc_stop must be greater than utc_start") if verbose: log(f"Looking for ZTF exposures between {start} and {stop}") kowalski_query = { "query_type": "find", "query": { "catalog": "ZTF_ops", "filter": { "jd_start": { "$gt": Time(start.datetime).jd, "$lt": Time(stop.datetime).jd, } }, "projection": { "_id": 0, "fileroot": 1 }, }, } if twilight: kowalski_query["query"]["filter"]["qcomment"] = { "$regex": "Twilight" } response = kowalski.query(query=kowalski_query).get("data", dict()) file_roots = sorted([entry["fileroot"] for entry in response]) frame_names = [ f"{file_root}_c{ccd:02d}_o_q{quad:1d}" for file_root in file_roots for ccd in range(1, 17) for quad in range(1, 5) ] if verbose: log(f"Found {len(frame_names)} ccd-quad frames") log(frame_names) processed_frames = [ frame["_id"] for frame in mongo.db[collection].find( { "_id": { "$in": frame_names }, "status": { "$in": ["processing", "success"] }, }, {"_id": 1}, ) ] if verbose: log(processed_frames) unprocessed_frames = set(frame_names) - set(processed_frames) for frame in unprocessed_frames: with timer(f"Submitting frame {frame} for processing", verbose): mongo.db[collection].update_one( {"_id": frame}, {"$set": { "status": "processing" }}, upsert=True) future = dask_client.submit(process_frame, frame, pure=True) dask.distributed.fire_and_forget(future) future.release() del future except Exception as e: log(e) # run once if utc_stop is set if utc_stop is not None: break else: log("Heartbeat") time.sleep(60)
def __init__(self, **kwargs): self.verbose = kwargs.get("verbose", 2) self.config = config # mongo connection self.mongo = Mongo( host=config["sentinel"]["database"]["host"], port=config["sentinel"]["database"]["port"], username=config["sentinel"]["database"]["username"], password=config["sentinel"]["database"]["password"], db=config["sentinel"]["database"]["db"], verbose=self.verbose, ) # requests.Session to talk to Fritz self.session = requests.Session() self.session_headers = { "Authorization": f"token {config['sentinel']['fritz']['token']}" } retries = Retry( total=5, backoff_factor=2, status_forcelist=[405, 429, 500, 502, 503, 504], method_whitelist=["HEAD", "GET", "PUT", "POST", "PATCH"], ) adapter = TimeoutHTTPAdapter(timeout=5, max_retries=retries) self.session.mount("https://", adapter) self.session.mount("http://", adapter) # init MPC and SkyBot - services used for cross-matching identified candidates with known Solar system objects self.mpc = MPC(verbose=self.verbose) self.imcce = IMCCE(verbose=self.verbose) # check Fritz connection try: with timer("Checking connection to Fritz", self.verbose > 1): response = self.api_fritz("GET", "/api/sysinfo") if response.json()["status"] == "success": log("Fritz connection OK") else: log("Failed to connect to Fritz") raise ValueError("Failed to connect to Fritz") except Exception as e: log(e) # load Tails self.path = pathlib.Path(config["sentinel"]["app"]["path"]) self.checkpoint = f"/app/models/{config['sentinel']['app']['checkpoint']}/tails" self.model = Tails() self.model.load_weights(self.checkpoint).expect_partial() self.score_threshold = float(config["sentinel"]["app"].get( "score_threshold", 0.5)) if not (0 <= self.score_threshold <= 1): raise ValueError( "sentinel.app.score_threshold must be (0 <= score_threshold <=1), check config" ) self.cleanup = config["sentinel"]["app"]["cleanup"] if self.cleanup not in ("all", "none", "ref", "sci"): raise ValueError( "sentinel.app.cleanup value not in ('all', 'none', 'ref', 'sci'), check config" ) self.num_threads = mp.cpu_count()
dimension = int(sys.argv[2]) if len(sys.argv) > 2 else 293 shave = int(sys.argv[3]) if len(sys.argv) > 3 else 10 batch_size = int(sys.argv[4]) if len(sys.argv) > 4 else 1 print_result = bool(int(sys.argv[5])) if len(sys.argv) > 5 else True device = str(sys.argv[6]) if len(sys.argv) > 6 else "cuda" # Reading image # img = torchvision.io.read_image(img_path) img = ut.npz_loader(img_path) c, h, w = img.shape # img = img.reshape((1, c, h, w)) # plt.imshow(img[0].permute((1,2,0))) input_image = img.float() # Creating patch list from the image patch_list_timer = ut.timer() patch_list = {} create_patch_list(patch_list, input_image, dimension, shave, 4, c, h, w) patch_list_processing_time = patch_list_timer.toc() print("Total patch list creating time: {}".format( patch_list_processing_time)) print(len(patch_list)) # Loading model model = md.load_rrdb(device=device) model.eval() min_dim = min(dimension, h, w) if min_dim != dimension: print( "\nPatch dimension is greater than the input image's minimum dimension. Changing patch dimension to input image's minimum dimension... \n "
def post_annotations(self, oid: str, detection: Mapping): """ Post candidate annotations to Fritz :param oid: candidate id :param detection: from self.process_frame :return: """ candid = f"{detection['id']}_{detection['ni']}" data = { "candid": candid, "score": detection["p"], "jd": detection["jd"], "datestr": detection["datestr"], "x": detection["x"], "y": detection["y"], "ra": detection["ra"], "dec": detection["dec"], "radecstr": detection["radecstr"], "tails_v": self.config["sentinel"]["app"]["checkpoint"], "sci_ipac_url": detection["sci_ipac_url"], "dif_ipac_url": detection["dif_ipac_url"], } if (detection["x_match_mpc"]["status"] == "success" and len(detection["x_match_mpc"]["data"]) > 0): nearest_match = detection["x_match_mpc"]["data"][0] data["mpc_nearest_id"] = nearest_match.get("designation") data["mpc_nearest_offset_arcsec"] = nearest_match.get( "offset__arcsec") data["mpc_nearest_orbit"] = nearest_match.get("orbit") if (detection["x_match_skybot"]["status"] == "success" and len(detection["x_match_skybot"]["data"]) > 0): nearest_match = detection["x_match_skybot"]["data"][0] data["imcce_nearest_id"] = str(nearest_match["Name"]) data["imcce_nearest_offset_arcsec"] = float( nearest_match["centerdist"].value) data["imcce_nearest_Vmag"] = float(nearest_match["V"].value) annotations = { "origin": "tails:twilight", "data": data, "group_ids": [self.config["sentinel"]["fritz"]["group_id"]], } if self.verbose: log(annotations) with timer( f"Posting annotations for {oid} {candid} to Fritz", self.verbose > 1, ): response = self.api_fritz("POST", f"/api/sources/{oid}/annotations", annotations) if response.json()["status"] == "success": log(f"Posted {oid} {candid} annotation to Fritz") else: log(f"Failed to post {oid} {candid} annotation to Fritz") log(response.json())
def process_frame(self, frame: str) -> Mapping[str, Union[Sequence, str]]: """ Process a ZTF observation - Fetch the epochal science, reference, and difference image for a ZTF observation - Re-project the reference image onto the epochal science image - Stack all three together - Tessellate into a 13x13 grid of overlapping tiles of size 256x256 pix - Execute Tails on each tile - For each candidate detection, query MPC and IMCCE for cross-matches with known SS objects :param frame: ZTF frame id formatted as in ztf_20201114547512_000319_zr_c01_o_q1, where 20201114: date string 547512: time tag 000319: zero-padded field id zr: filter code <zg|zr|zi> c01: zero-padded CCD id c [1, 16] q1: quadrant id c [1, 4] :return: detections (if any), packed into the result dict """ path_base = self.path.resolve() date_string = frame.split("_")[1][:8] path_date = self.path / "runs" / date_string if not path_date.exists(): path_date.mkdir(parents=True, exist_ok=True) results = {"detections": [], "status": "success"} try: box_size_pix = config["sentinel"]["app"]["box_size_pix"] dim_last = self.model.inputs[0].shape[-1] if dim_last == 2: stack_class = Dvoika elif dim_last == 3: stack_class = Troika else: raise ValueError( "bad dim_last: only know how to operate on duplets and triplets" ) with timer("Making stack", self.verbose > 1): stack = stack_class( path_base=str(path_base), name=frame, secrets=self.config, verbose=self.verbose, ) # re-project ref with timer("Re-projecting", self.verbose > 1): ref_projected = stack.reproject_ref2sci( how="swarp", nthreads=self.num_threads) # tessellate with timer("Tessellating", self.verbose > 1): xboxes, yboxes = stack.tessellate_boxes( box_size_pix=box_size_pix, offset=20) tessellation = [] with timer("Preprocessing tiles", self.verbose > 1): for i, xbox in enumerate(xboxes): for j, ybox in enumerate(yboxes): # stack and preprocess image stack if dim_last == 2: s = np.stack( [ stack.sci[xbox[0]:xbox[1], ybox[0]:ybox[1]], ref_projected[xbox[0]:xbox[1], ybox[0]:ybox[1]], ], axis=2, ) elif dim_last == 3: s = np.stack( [ stack.sci[xbox[0]:xbox[1], ybox[0]:ybox[1]], ref_projected[xbox[0]:xbox[1], ybox[0]:ybox[1]], stack.zogy[xbox[0]:xbox[1], ybox[0]:ybox[1]], ], axis=2, ) else: raise ValueError( "bad dim_last: only know how to operate on duplets and triplets" ) s_raw = deepcopy(s) preprocess_stack(s) tessellation.append({ "i": i, "j": j, "xbox": xbox, "ybox": ybox, "stack": s, "stack_raw": s_raw, }) stacks = np.array([t["stack"] for t in tessellation]) with timer("Running Tails", self.verbose > 1): predictions = self.model.predict(stacks, ) # log(predictions[0], predictions.shape) # URL to fetch original images from IPAC sci_name = stack.name + "_sciimg.fits" tmp = sci_name.split("_")[1] y, p1, p2 = tmp[:4], tmp[4:8], tmp[8:] sci_ipac_url = os.path.join(config["irsa"]["url"], "sci", y, p1, p2, sci_name) # detections ind = np.where( predictions[:, 0].flatten() > self.score_threshold)[0] for ni, ii in enumerate(ind): x_o, y_o = predictions[ii, 1:] * stacks.shape[1] # save png with timer("Saving png", self.verbose > 1): plot_stack( # tessellation[ii]['stack'], tessellation[ii]["stack_raw"], reticles=((x_o, y_o), ), w=6, h=2, dpi=360, save=str(path_date / f"{frame}_{ni}.png"), # cmap=cmr.arctic, # cmap=plt.cm.viridis, # cmap=plt.cm.cividis, cmap="bone", ) # save npy: with timer("Saving npy", self.verbose > 1): np.save(str(path_date / f"{frame}_{ni}.npy"), tessellation[ii]["stack"]) x, y = ( tessellation[ii]["ybox"][0] + x_o, tessellation[ii]["xbox"][0] + y_o, ) ra, dec = stack.pix2world_sci(x, y) sky_coord = SkyCoord(ra=ra, dec=dec, unit="deg") radecstr = sky_coord.to_string(style="hmsdms") epoch = Time(stack.header_sci["OBSJD"], format="jd") jd = float(epoch.jd) dt = epoch.datetime # make cutouts for posting to Fritz with timer("Making cutouts", self.verbose > 1): cutout_xbox, cutout_ybox, _, _ = stack.make_box( ra=ra, dec=dec, box_size_pix=63, min_offset=0, random=False) if self.verbose: log((cutout_xbox, cutout_ybox)) cutouts = np.stack( [ stack.sci[cutout_ybox[0]:cutout_ybox[1], cutout_xbox[0]:cutout_xbox[1], ], ref_projected[cutout_ybox[0]:cutout_ybox[1], cutout_xbox[0]:cutout_xbox[1], ], stack.zogy[cutout_ybox[0]:cutout_ybox[1], cutout_xbox[0]:cutout_xbox[1], ], ], axis=2, ) # query MPC and SkyBot x_match_query = { "id": f"{frame}_{ni}", "position": sky_coord, "radius": 2, "epoch": epoch, "timeout": 30, } with timer("Querying MPC", self.verbose > 1): x_match_mpc = self.mpc.query(query=x_match_query) with timer("Querying IMCCE", self.verbose > 1): x_match_skybot = self.imcce.query(query=x_match_query) if self.verbose: log(x_match_mpc) log(x_match_skybot) detection = { "id": frame, "ni": ni, "jd": jd, "datestr": f"{dt.year} {dt.month} {dt.day + (dt.hour + dt.minute / 60 + dt.second / 3600) / 24}", "p": float(predictions[ii, 0]), "x": x, "y": y, "ra": ra, "dec": dec, "radecstr": radecstr, "tails_v": self.config["sentinel"]["app"]["checkpoint"], "sci_ipac_url": sci_ipac_url, "dif_ipac_url": sci_ipac_url.replace("sciimg.fits", "scimrefdiffimg.fits.fz"), "cutouts": cutouts, "x_match_mpc": x_match_mpc, "x_match_skybot": x_match_skybot, } results["detections"].append(detection) if len(results["detections"]) > 0: df_dets = pd.DataFrame.from_records(results["detections"]) df_dets.to_csv(str(path_date / f"{frame}.csv"), index=False) if self.cleanup.lower() != "none": cleanup_ref = True if self.cleanup.lower() in ( "all", "ref") else False cleanup_sci = True if self.cleanup.lower() in ( "all", "sci") else False stack.cleanup(ref=cleanup_ref, sci=cleanup_sci) except Exception as e: log(e) results["status"] = "error" return results
import onnxruntime ort_session = onnxruntime.InferenceSession("edsr.onnx") def to_numpy(tensor): return tensor.detach().cpu().numpy( ) if tensor.requires_grad else tensor.cpu().numpy() img_path = "test2.jpg" input_batch = ut.load_image(img_path).unsqueeze(0) print(input_batch.shape) print(type(input_batch)) ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(input_batch)} execution_time = ut.timer() ort_outs = ort_session.run(None, ort_inputs) execution_time = execution_time.toc() print(execution_time) output = ort_outs[0] print(output) print(output.shape) output = torch.tensor(output).int() output_folder = "." file_name = img_path.split("/")[-1].split(".")[0] ut.save_image(output[0], output_folder, 100, 100,