def main(arguments): """ Method to start the workflow of image processing :param arguments: the arguments passed by the user. Determines which procedure will be performed """ if arguments.procedure is not None: if arguments.procedure == 'tiling_raster': if (arguments.image is not None) and (arguments.output is not None) \ and (arguments.width is not None) and (arguments.height is not None): tiling.Tiling().tiling_raster(arguments.image, arguments.output, arguments.width, arguments.height, True) else: logging.error( ">> One of arguments (image_folder, output_folder, weight, height) are incorrect or " "empty. Try it again!") raise RuntimeError elif arguments.procedure == 'tiling_vector': if (arguments.image_tiles is not None) and (arguments.shapefile_reference is not None) and \ (arguments.output is not None): tiling.Tiling().tiling_vector(arguments.image_tiles, arguments.shapefile_reference, arguments.output) else: logging.error( ">> One of arguments (image_tiles, shapefile_reference, output_folder) are incorrect or " "empty. Try it again!") raise RuntimeError elif arguments.procedure == 'shp2png': if (arguments.image is not None) and (arguments.shapefile_folder is not None) and \ (arguments.output is not None) and (arguments.width is not None) and (arguments.height is not None): tiling.Tiling().shp2png(arguments.image, arguments.shapefile_folder, arguments.output, arguments.width, arguments.height, settings.CLASSES, label_type='rgb') else: logging.error( ">> One of arguments (image_folder, shapefile_reference, output_folder) are incorrect or " "empty. Try it again!") raise RuntimeError elif arguments.procedure == 'split_samples': if (arguments.training_folder is not None) and (arguments.validation_folder is not None) and \ (arguments.percentage is not None): utils.Utils().split_samples(arguments.training_folder, arguments.validation_folder, arguments.percentage) else: logging.error( ">> One of arguments (training_folder, validation_folder, percentage) are incorrect or " "empty. Try it again!") raise RuntimeError else: logging.error(">> Procedure option not found. Try it again!") raise RuntimeError
def check_requirements(): checker = u.Utils() try: checker.which('bwa') checker.which('bedtools') checker.which('samtools') checker.which('R') #check sort version #check samtools version except Exception as e: raise (e)
import discord from discord.ext import commands import asyncio from utils import utils from utils.storage import Storage Utils = utils.Utils() class LFC(commands.Cog, name='Looking for Crew'): def __init__(self, client): self.client = client self.Storage = Storage() async def auto_remove(self, user: discord.User): await asyncio.sleep(7200) for guild in self.Storage.get_lfc_enabled_guilds(self.client): if user in guild.members: role = self.Storage.get_lfc_role(guild) try: await guild.get_member(user.id).remove_roles(role) except discord.Forbidden: continue @Utils.matchLFCChannel() @commands.command( brief='Sets the user into `Looking for Crew` status for 2 hours.', description='This command gives the user the set `Looking for Crew` role. \n\ You can only use this command in the set channels.\n\ Type ?nlfc once you\'re in a crew to avoid getting further notifications.\n\
# API server for controlling settings from flask import Flask, render_template, request from flask_socketio import SocketIO, emit from utils import utils import json, os cwd = os.getcwd() utilities = utils.Utils() app = Flask(__name__) socketio = SocketIO(app) # GET route for main HTML page so user can edit his settings @app.route("/") def index(): with open(f"{cwd}/config.json", "r") as f: config = json.load(f) return render_template('index.html', config=config) # POST route for updating data @app.route("/config", methods=['POST']) def config(): try: ip = request.form.get("ip") light = request.form.get("light") # Get current config with open(f"{cwd}/config.json", "r") as f: config = json.load(f) config["ip"] = ip config["light"] = light
def check_arguments(args): checker = u.Utils() inputs = {} try: # Check if the input files exist and are well formatted os.path.isfile(args['f']) inputs['inputs'], inputs['controls'], inputs['prefix_inputs'], inputs[ 'prefix_controls'] = input_integrity(args['f']) # Check if the output folder exist if os.path.isdir(args['o']): raise Exception( "mkdir: cannot create directory '%s': File exists." % (args['o'])) else: os.makedirs(args['o']) inputs['outdir'] = args['o'] # Check if Bowtie index exists if args['b'].endswith('.bwt'): prefix_index = os.path.splitext(args['b'])[0] else: prefix_index = args['b'] for suffix in ['.bwt', '.amb', '.ann', '.pac', '.sa']: if not os.path.isfile("%s%s" % (prefix_index, suffix)): raise Exception("The BWA index file '%s%s' does not exist." % (prefix_index, suffix)) inputs['index'] = prefix_index # Check if genome file exists if not os.path.isfile(args['g']): raise Exception("The genome chromInfo file '%s' does not exist." % (args['g'])) inputs['genome'] = args['g'] if args['sponges'] != None: if not os.path.isfile(args['sponges']): raise Exception("The sponges file '%s' does not exist." % (args['sponges'])) inputs['sponges'] = args['sponges'] else: inputs['sponges'] = None if args['macs2'] != None: if os.path.exists(args['macs2']): inputs['macs2'] = args['macs2'] else: raise Exception("Macs2 not found in '%s'" % (args['macs2'])) else: try: checker.which('macs2') inputs['macs2'] = "macs2" except Exception as e: raise (e) # if not os.path.exists(args['picard']): # raise Exception("Picard not found in '%s'" %(args['picard'])) # else: # inputs['picard'] = args['picard'] inputs['fraglen'] = -1 if args['fraglen'] != None: if args['spp']: raise Exception( "'--spp' and '--fraglen' are mutually exclusive. Please select only one." ) else: inputs['fraglen'] = args['fraglen'] else: if args['spp']: try: j = jr.JobRunner() j.append([['Rscript ./common/test.R']]) j.run() inputs['spp'] = True except: raise Exception( "Spp is not currently installed or cannot be loaded.") else: inputs['spp'] = False inputs['fraglen'] = -1 print('Picard tools will be used to estimate insert size.') inputs['threads'] = args['t'] inputs['cpus'] = args['c'] inputs['common'] = os.path.join( os.path.dirname(os.path.abspath(__file__)), "common") inputs['picard'] = os.path.join( os.path.dirname(os.path.abspath(__file__)), "common/picard-tools-1.141/picard.jar") # If not enough controls are supplied, use pooled controls. if len(inputs['inputs']) != len(inputs['controls']): inputs['pooled'] = True else: inputs['pooled'] = False print("Argument Verification completed") except Exception as e: raise (e) return inputs
def __init__(self): self.driver = utils.Utils() self.urlUtils = urlutils.UrlUtils() self.json = jsondatas.JsonData()
def predict_deep_network(self, model, load_param): """ Initiate the process of inferences. The weight matrix from trained deep learning, which represents the knowledge, is loaded and the images are then presented. Each one is processed (multiclass or not) and submitted to the polygonization, where the raster is interpreted and a correspondent geographic format is created :param model: the compiled keras deep learning architecture :param load_param: a dict with the keras deep learning architecture parameters """ logging.info(">> Performing prediction...") path_val_images = os.path.join(load_param['image_prediction_folder']) pred_images = loader.Loader(path_val_images) for item in pred_images.get_list_images(): filename = os.path.basename(item) name = os.path.splitext(filename)[0] complete_path = os.path.join(path_val_images, item) dims, is_geographic_file = self.check_image_format(complete_path) if dims is None or is_geographic_file is None: logging.warning( ">>>>>> The filename {} does not match any accepted extension. " "Check it and try again!".format(filename)) return if dims[0] > load_param['width_slice'] or dims[1] > load_param[ 'height_slice']: logging.info( ">>>> Image {} is bigger than the required dimension! " "Cropping and predicting...".format(filename)) if is_geographic_file is True: list_images = slicer.Slicer().slice_geographic( complete_path, load_param['width_slice'], load_param['height_slice'], load_param['tmp_slices']) else: list_images = slicer.Slicer().slice_bitmap( complete_path, load_param['width_slice'], load_param['height_slice'], load_param['tmp_slices']) logging.info( ">>>> Predicting each of {} slices and predicting...". format(len(list_images))) prediction_path_list = [] for path in list_images: images_array = load_img( path, target_size=(load_param['input_size_w'], load_param['input_size_h'])) images_array = image.img_to_array(images_array) images_array = np.expand_dims(images_array, axis=0) images_array = cv2.normalize(images_array, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_32F) prediction = model.get_model().predict(images_array) prediction_path_list.append( self.segment_image(path, prediction, load_param)) logging.info( ">>>> Merging the {} predictions in image with {} x {}...". format(len(prediction_path_list), dims[0], dims[1])) complete_path_to_merged_prediction = os.path.join( load_param['output_prediction'], name + ".png") slicer.Slicer().merge_images( prediction_path_list, dims[0], dims[1], complete_path_to_merged_prediction) if is_geographic_file is True: logging.info(">>>> Polygonizing segmented image...") self.poligonize(complete_path_to_merged_prediction, load_param['classes'], complete_path, load_param['output_prediction_shp']) else: image_to_predict = load_img( complete_path, target_size=(load_param['input_size_w'], load_param['input_size_h'])) images_array = image.img_to_array(image_to_predict) images_array = cv2.normalize(images_array, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_32F) images_array = np.expand_dims(images_array, axis=0) prediction = model.get_model().predict(images_array) prediction_path = self.segment_image(complete_path, prediction, load_param) complete_path_to_prediction = os.path.join( load_param['output_prediction'], name + ".png") os.replace(prediction_path, complete_path_to_prediction) if is_geographic_file is True: logging.info(">>>> Polygonizing segmented image...") self.poligonize(complete_path_to_prediction, load_param['classes'], complete_path, load_param['output_prediction_shp']) utils.Utils().flush_files(load_param['tmp_slices']) utils.Utils().flush_files(load_param['tmp_slices_predictions'])