def main(url: str, pdf_filename: str) -> None: your_dir = os.path.dirname(os.path.abspath(__file__)) path_to_pdf_folder = os.path.join(your_dir, 'pdf') try: os.mkdir(path_to_pdf_folder) except OSError: pass try: converter(url, pdf_filename, your_dir) except MissingSchema: print('Проверьте правильность введенной ссылки!')
def __init__(self, data): self.id = data['id'] self.world = data['world'] self.lang = self.world[:2] self.name = utils.converter(data['name']) self.points = data['points'] self.rank = data['rank']
def extend_callgrad(x, Q, P, M, L, r, mu, sigma, sm, maxdegree, vecsize, experiment, opt_var, small_k): #opt_var elemnts # 1. pwm # 2. sm # 3. sigma # 0: no optimization variable # 1: optimization variable #example: opt_var =[1,0,1] === pwm,sigma if (opt_var[0] == 1): r = utils.converter(x[:vecsize[0]], P, M) if (opt_var[1] == 1): sm = utils.listconverter(x[vecsize[0] :vecsize[1]], P, M) if (opt_var[2] == 1): sigma = utils.listconverter(x[vecsize[1]:vecsize[2]], P, M) if (opt_var[3] == 1): mu = utils.listconverter(x[vecsize[2]:], P, M) gradmu, gradsig, gradpwm, gradsm = extend_gradient(Q, P, M, L, r, mu, sigma, sm, maxdegree, opt_var, small_k) gradient = [] if (opt_var[0] == 1): gradpwm, vecsize1 = utils.matr2vec(gradpwm, P, M) gradient = np.concatenate((gradient, gradpwm)) if (opt_var[1] == 1): gradsm, vecsize2 = utils.list2vec(gradsm, P, M) gradient = np.concatenate((gradient, gradsm)) if (opt_var[2] == 1): gradsig, vecsize3 = utils.list2vec(gradsig, P, M) gradient = np.concatenate((gradient, gradsig)) if (opt_var[3] == 1): gradmu, vecsize4 = utils.list2vec(gradmu, P, M) gradient = np.concatenate((gradient, gradmu)) return gradient
async def fetch_bulk(self, world, iterable, table=None, *, name=False, dictionary=False): dsobj = utils.DSType(table or 0) base = f'SELECT * FROM {dsobj.table} WHERE world = $1' if not name: query = f'{base} AND id = ANY($2)' else: if dsobj.table == "village": iterable = [vil.replace("|", "") for vil in iterable] query = f'{base} AND CAST(x AS TEXT)||CAST(y as TEXT) = ANY($2)' else: iterable = [utils.converter(obj, True) for obj in iterable] if dsobj.table == "tribe": query = f'{base} AND ARRAY[LOWER(name), LOWER(tag)] && $2' else: query = f'{base} AND LOWER(name) = ANY($2)' async with self.pool.acquire() as conn: res = await conn.fetch(query, world, iterable) if dictionary: return {rec[1]: dsobj.Class(rec) for rec in res} else: return [dsobj.Class(rec) for rec in res]
def extend_callfunc(x, Q, P, M, L, r, mu, sigma, sm, maxdegree, vecsize, experiment, opt_var, small_k): if (opt_var[0] == 1): r = utils.converter(x[:vecsize[0]], P, M) if (opt_var[1] == 1): sm = utils.listconverter(x[vecsize[0] :vecsize[1]], P, M) if (opt_var[2] == 1): sigma = utils.listconverter(x[vecsize[1]:vecsize[2]], P, M) if (opt_var[3] == 1): mu = utils.listconverter(x[vecsize[2]:], P, M) fval = extend_func_POIM(Q, P, M, L, r, mu, sigma, sm, maxdegree, small_k) return fval
async def fetch_player(self, world, searchable, *, name=False, archive=''): table = f"player{archive}" if name: searchable = utils.converter(searchable, True) query = f'SELECT * FROM {table} WHERE world = $1 AND LOWER(name) = $2' else: query = f'SELECT * FROM {table} WHERE world = $1 AND id = $2' async with self.pool.acquire() as conn: result = await conn.fetchrow(query, world, searchable) return utils.Player(result) if result else None
def __init__(self, data): super().__init__(data) self.alone = False self.tag = utils.converter(data['tag']) self.member = data['member'] self.villages = data['villages'] self.all_points = data['all_points'] self.att_bash = data['att_bash'] self.att_rank = data['att_rank'] self.def_bash = data['def_bash'] self.def_rank = data['def_rank'] self.all_bash = data['all_bash'] self.all_rank = data['all_rank']
def file_flow(file_name): new_img_name = file_name[:-3]+'png' origin = os.path.join(in_folder, file_name) png_dest = os.path.join(out_folder, new_img_name) raw_img_data = utils.load_data(origin) if not os.path.isfile(png_dest): utils.converter(raw_img_data, png_dest) identified_blobs = utils.blob_finding(png_dest) if len(identified_blobs)>20 or len(identified_blobs)<10: #some kind of sanity check here print('weird number of blobs found') print(png_dest) return centers = utils.sort_blobs(identified_blobs) print(centers) processed_rows = [] for row in centers: results = [] for center in row: intensity = utils.process_blob(center, raw_img_data) results.append(intensity) processed_rows.append(results) utils.save_results(processed_rows, png_dest) return
async def fetch_tribe(self, world, searchable, *, name=False, archive=None): table = f"tribe{archive}" if archive else "tribe" if name: searchable = utils.converter(searchable, True) query = f'SELECT * FROM {table} WHERE world = $1 ' \ f'AND (LOWER(tag) = $2 OR LOWER(name) = $2)' else: query = f'SELECT * FROM {table} WHERE world = $1 AND id = $2' async with self.pool.acquire() as conn: result = await conn.fetchrow(query, world, searchable) return utils.Tribe(result) if result else None
def executeQuery(SPARQLEndpoint, ontologyPrefix, dataPropertyToFuzzify, domainClass, rangeClass="", auxiliaryClass="", objectProperty=""): """ Executes the SPARQL query and stores the resultset in a csv file. It has a different behaviour based on the input parameters. If rangeClass, auxiliaryClass, objectProperty and dataPropertyToFuzzify are not blank, the operation is ternary; binary, otherwise. :param SPARQLEndpoint: the SPARQL end point :param ontologyPrefix: the ontology prefix :param domainClass: the domain class :param dataPropertyToFuzzify: the data property to fuzzify :param rangeClass: the range class :param auxiliaryClass: the auxiliar class :param objectProperty: the object property """ nameProperty = createNameProperty(dataPropertyToFuzzify) # obtain a connection to the endpoint sparql = SPARQLEndpointInterface(SPARQLEndpoint) targetDomainClass = "" targetRangeClass = "" targetauxiliaryClass = "" targetdataPropertyToFuzzify = "" if rangeClass == "" and auxiliaryClass == "" and objectProperty == ["", ""]: targetDomainClass = " ?" + domainClass targetdataPropertyToFuzzify = " ?" + nameProperty whereClause = " ?" + domainClass + " a ontology:" + domainClass + ". ?" + domainClass + " ontology:" + dataPropertyToFuzzify + " ?" + nameProperty elif rangeClass != "" and auxiliaryClass != "" and objectProperty != ["", ""]: targetDomainClass = " ?" + domainClass targetRangeClass = " ?" + rangeClass targetauxiliaryClass = " ?" + auxiliaryClass targetdataPropertyToFuzzify = " ?" + nameProperty whereClause = "?" + domainClass + " a ontology:" + domainClass + ". ?" + rangeClass + " a ontology:" + rangeClass + ". ?" + domainClass + " ontology:" + \ objectProperty[0] + " ?" + auxiliaryClass + ". ?" + auxiliaryClass + " ontology:" + \ objectProperty[ 1] + " ?" + rangeClass + ". ?" + auxiliaryClass + " ontology:" + dataPropertyToFuzzify + " ?" + nameProperty query = ("PREFIX ontology: " + ontologyPrefix + " SELECT " + targetDomainClass + targetRangeClass + targetauxiliaryClass + targetdataPropertyToFuzzify + " WHERE { " + whereClause + "}") # sends the query rs, rs2, fields, fields2 = converter(sparql(query)) resultset = [] # store only the identifiers of generic class if rangeClass == "" and auxiliaryClass == "" and objectProperty == ["", ""]: resultset = [{domainClass: row.get(domainClass).split("#", 1)[1], nameProperty: row.get(nameProperty)} for row in rs2] # save the resultset in the csv file if not resultset == []: if not os.path.exists('output/' + Main._time + '/csv_files/'): os.makedirs('output/' + Main._time + '/csv_files/') csvhandler = CSVHandler('output/' + Main._time + '/csv_files/' + domainClass + ".csv") csvhandler.writeDict(resultset, fields2) elif rangeClass != "" and auxiliaryClass != "" and objectProperty != ["", ""]: resultset = [ {domainClass: row.get(domainClass).split("#", 1)[1], auxiliaryClass: row.get(auxiliaryClass).split("#", 1)[1], rangeClass: row.get(rangeClass).split("#", 1)[1], nameProperty: row.get(nameProperty)} for row in rs] # save the resultset in the csv file if not resultset == []: if not os.path.exists('output/' + Main._time + '/csv_files/'): os.makedirs('output/' + Main._time + '/csv_files/') csvhandler = CSVHandler( 'output/' + Main._time + '/csv_files/' + auxiliaryClass + domainClass + rangeClass + ".csv") csvhandler.writeDict(resultset, fields) if resultset == []: logging.warning("\nSomething went wrong with the query:\n\n" + query) raise Exception