def main(): os.chdir(path) html = HTML(url="http://www.genenames.org/cgi-bin/hgnc_downloads.cgi" ) # Check html for attributes. attributes = html.find_between( "</td> <td>", "</td>", '"', all=True) # Retrieve all aviable attributes. print("Number of attributes: %s" % len(attributes)) # Check number of attributes. # Building url: url_begin = "http://www.genenames.org/cgi-bin/hgnc_downloads.cgi?title=Core+Data" url_context = ";col=" + ";col=".join( attributes ) #col=gd_hgnc_id;col=gd_app_sym;col=gd_app_name;col=gd_status;col=gd_prev_sym;col=gd_aliases;col=gd_pub_chrom_map;col=gd_pub_acc_ids;col=gd_pub_refseq_ids; url_end = ";status=Approved;status=Approved+Non-Human;status=Entry+Withdrawn;status_opt=3;=on;where=;order_by=gd_app_sym_sort;limit=;format=text;submit=submit;.cgifields=;.cgifields=status;.cgifields=chr" url = url_begin + url_context + url_end f = File(name="hgnc.txt", url=url, path=path) contents = f.parse(printing=False, header=True) genes.name = "HGNC" genes.key = "hgnc" genes.taxid = 9606 genes.addData(contents) genes.save() genes.buildMappings()
def main(self, argv): f = File() f2 = File() f2.open() f.open() f.write() f2.close()
def __init__(self): self.file = File() self.file.file_menu() self.run = tk.Menu(menubar.toolbar, tearoff=False) self.run.add_command(label="Run", command=lambda: self.run_it()) menubar.toolbar.add_cascade(label="Run", menu=self.run) self.binding_keys()
def del_comment(data): data = data.split('\n') book_id = int(data[1]) starting_text = data[2] date = data[3] try: comments = json.loads( f.open('./books/comments/book_{}.db'.format(book_id), 'r').read()) except: f.open('./books/comments/book_{}.db'.format(book_id), 'w').write(json.dumps({'all': 0})) comments = {'all': 0} for user in list(comments.keys()): if user == 'all': continue for post in comments[user]['posts']: if post['text'].lower().startswith( starting_text.lower()) and post['date'] == date: if len(comments[user]['posts']) > 1: comments[user]['posts'].remove(post) else: comments.pop(user) comments['all'] -= 1 result = maker(ind=book_id, comments=comments) all_books = start() client.edit_page(path=all_books[book_id]['url'][18:], title=all_books[book_id]['title'], content=result, author_name='Hops', author_url='https://t.me/Hopsrobot') f.open('./books/comments/book_{}.db'.format(book_id), 'w').write(json.dumps(comments))
def initFromObject(cls, propertyObject, propertyID=None): if 'addressCoordinate' in propertyObject: longitude = propertyObject['addressCoordinate']['lon'] lattitude = propertyObject['addressCoordinate']['lat'] else: longitude = None lattitude = None photos = [] #Create File Array from the Photos Object inside the propertyObject for photo in propertyObject['photos']: propertyImage = File(FILE_TYPE_Images, OBJECT_Property, propertyID, None, 'Property ' + photo['imageType'], None) propertyImage.addImageDetails(photo['advertId'], photo['date'], photo['fullUrl'], None) photos.append(propertyImage) new_property = cls( propertyObject['id'], propertyObject.get("cadastreType"), VarIf(propertyObject['status'] == PROPERTY_MARKET_STATUS_ONMARKET, True, False), Address(propertyObject['address'], propertyObject['streetName'], propertyObject['suburb'], propertyObject['streetNumber'], propertyObject.get("zone"), propertyObject.get("lotNumber"), longitude, lattitude, None), propertyObject.get("areaSize"), propertyObject.get('numBedrooms'), propertyObject.get('numBathrooms'), propertyObject.get('features'), json.dumps(propertyObject), propertyObject.get('history'), photos) return new_property
def add_book(data): data = data.split('\n') title = data[1] author = data[2] lang = data[3] tip = data[4] size = data[5] download = data[6] book_id = len(books) new_book = { 'title': title, 'author': author, 'lang': lang, 'type': tip, 'size': size, 'download': download } result = maker(book=new_book, ind=book_id) page = client.create_page(title=title, content=result, author_name='Hops', author_url='https://t.me/Hopsrobot') new_book['url'] = page.url books.append(new_book) f.open('./books/all.db', 'w').write(json.dumps(books))
def get_comment(m): text = m.text uid = m.from_user.id cid = m.chat.id if text == '/cancel': bot.send_message(cid, get_string('book_comment_cancelled')) return else: name = m.from_user.first_name if m.from_user.last_name: name += " " + m.from_user.last_name name = resize(name) book_steps = json.loads(f.open('./books/steps.json', 'r').read()) book_id = book_steps['{}'.format(uid)] try: books = json.loads(f.open('./books/all.json', 'r').read()) try: books[int(book_id)] except Exception as e: bot.send_message(uid, get_string('error_in_comment')) log('XATOLIK: comment => {}'.format(e)) except: book.start() book.add_comment(book_id=book_id, uid=uid, text=text, name=name) bot.send_message(cid, get_string('book_comment_saved'))
def begin_test(m): text = m.text uid = m.from_user.id if text.startswith('/'): if text == '/cancel': bot.send_message(uid, get_string('test_cancelled')) else: bot.send_chat_action(uid, 'typing') else: qtns = tools.questions data = tools.check_name(text) if data.valid: info = {"name": data.name, "surname": data.surname} status = {"score": 0, "limit": 0, "date": None} f.open('./user/{}/info.json'.format(uid), 'w').write(json.dumps(info)) f.open('./user/{}/status.json'.format(uid), 'w').write(json.dumps(status)) k = tools.btn_maker(1) bot.send_message(uid, text="<b>{}-savol</b>\n{}".format( 1, qtns['1']['question']), reply_markup=k, parse_mode='html') else: if text == '/cancel': bot.send_message(uid, get_string('test_cancelled')) else: a = bot.send_message(uid, get_string('test_invalid_credentials')) bot.register_next_step_handler(a, begin_test)
class Run: def __init__(self): self.file = File() self.file.file_menu() self.run = tk.Menu(menubar.toolbar, tearoff=False) self.run.add_command(label="Run", command=lambda: self.run_it()) menubar.toolbar.add_cascade(label="Run", menu=self.run) self.binding_keys() def run_it(self): self.file.save_file() try: if self.file.f.name.endswith(".py"): subprocess.Popen(["python", self.file.f.name], shell=True) elif self.file.f.name.endswith(".js"): subprocess.Popen(["node", self.file.f.name], shell=True) elif self.file.f.name.endswith(".exs"): subprocess.Popen(["elixir", self.file.f.name], shell=True) else: toplevel = tk.Toplevel(w.root) toplevel.title("Warning") txt = tk.Text(toplevel) txt.insert("1.0", "File cannot run.") toplevel.geometry("215x50") toplevel.resizable(False, False) txt.config(state=tk.DISABLED) txt.pack() except AttributeError: return def binding_keys(self): w.root.bind_all("<F5>", lambda event: self.run_it())
def create_file(): global makeChanges file_name = input("Enter File name: ") file = File(file_id_assigner(), file_name, 0, {}) JSON_structure["files"].update(file.create_f()) print("File Created Successfully!") makeChanges = 1
def editor(m): try: langs = {'24': '#py', '5': '#py2', '8': '#php'} uid = m.from_user.id cid = m.chat.id request_id = m.message_id data = json.loads( f.open('./codes/{}/{}/{}_code.json'.format(cid, uid, request_id), 'r').read()) lang = data['lang'] response_id = data['response_id'] code = m.text.replace('{}\n'.format(langs[str(lang)]), '', 1) modified = modify_the_code(code) code = modified[0] k = types.InlineKeyboardMarkup() b = types.InlineKeyboardButton(text="Info", callback_data="info_{}/{}/{}".format( cid, uid, request_id)) k.add(b) try: bot.send_chat_action(cid, 'typing') compiled = run(lang, code) if len(str(compiled.result)) <= 3000: if compiled.success: bot.edit_message_text(chat_id=cid, message_id=response_id, text=get_string('output').format( html_converter(compiled.result)), parse_mode='html', reply_markup=k) else: bot.edit_message_text( chat_id=cid, message_id=response_id, text=get_string('output_error').format( html_converter(compiled.errors)), parse_mode='html', reply_markup=k) else: bot.edit_message_text(chat_id=cid, message_id=response_id, text=get_string('output_too_large'), parse_mode='html', reply_markup=k) data = { "response_id": response_id, "stats": compiled.stats, "lang": lang } f.open('./codes/{}/{}/{}_code.json'.format(cid, uid, request_id), 'w').write(json.dumps(data)) except Exception as e: log(e) except Exception as e: log(e)
def msg_id(mid=False): try: data = f.open('./mid.txt', 'r').read() except: data = '0' f.open('./mid.txt', 'w').write(data) if mid: f.open('./mid.txt', 'w').write(str(mid)) else: return int(data)
def _create_files(self): """Generate a new File object for every file the metainfo file told us about. """ try: yield (File( self.piece_length, self.info_dict['info']['name'], self.info_dict['info']['length'])) except KeyError: for f in self.info_dict['info']['files']: yield (File(self.piece_length, f['path'], f['length']))
def reduce_limit(self): atm = (datetime.now() + timedelta(hours=3)).strftime('%Y-%m-%d') if self.date != atm: self.limit = self.score - 3 if self.score else None self.date = atm self.limit -= 1 f.open('./user/{}/status.db'.format(uid), 'w').write(json.dumps( { "score": self.score, "limit": self.limit, "date": self.date }) )
def right_click_options(self): self.rightclickmenu.add_command(label="Copy", command=lambda: Edit.copy(self)) self.rightclickmenu.add_command(label="Paste", command=lambda: Edit.paste(self)) self.rightclickmenu.add_command(label="Cut", command=lambda: Edit.cut(self)) self.rightclickmenu.add_command(label="Select all", command=lambda: Edit.select_all(self)) self.rightclickmenu.add_command(label="New File", command=lambda: File.new_file(self)) self.rightclickmenu.add_command(label="Close File", command=lambda: File.close_file(self))
def start(): global books try: books = json.loads(f.open('./books/all.db', 'r').read()) except: bot.send_message(dev, str(books)) result = maker(book=books[0], ind=0) page = client.create_page(title=books[0]['title'], content=result, author_name='Hops', author_url='https://t.me/Hopsrobot') books[0]['url'] = page.url f.open('./books/all.db', 'w').write(json.dumps(books)) return books
def all_pages(uid): try: data = eval(f.open('./comments/{}/all.db'.format(uid), 'r').read()) log(1, data) except: data = False if data: pages = data['all'] log(3, pages) url = data['url'] result = [] stat = { 'tag': 'b', 'children': ['{} ta kod uchun izohlar'.format(len(pages))] } result.append(stat) result.append({'tag': 'br'}) if len(pages) > 0: for page in pages: result.append({ 'tag': 'a', 'attrs': { 'href': page }, 'children': ['• {}\n'.format(page[18:])] }) else: return False log(4, result) if url: log(5, url) try: page = client.edit_page(path=data['url'][18:], title='Barcha kod va izohlar', content=result[:-1], author_name='Hops', author_url='https://t.me/Hopsrobot') except Exception as e: log(7, e) else: log(6, 'started to create') page = client.create_page(title='Barcha kod va izohlar', content=result, author_name='Hops', author_url='https://t.me/Hopsrobot') data['url'] = page.url log(2, data) f.open('./comments/{}/all.db'.format(uid), 'w').write(str(data)) return page.url
def create_file(file_name): global makeChanges flag = False for fileIndexes in list(JSON_structure["files"]): if JSON_structure["files"][fileIndexes]["name"] == file_name: flag = True break if flag: display_msg("File with the same name already Exists!") else: file = File(file_id_assigner(), file_name, 0, {}) JSON_structure["files"].update(file.create_f()) JSON_structure["meta_data"]["files"] += 1 display_msg("File Created Successfully!") makeChanges = 1
def _create_files(self): """Generate a new File object for every file the metainfo file told us about. """ try: yield (File(self.piece_length, self.info_dict['info']['name'], self.info_dict['info']['length'])) self.logger.info('Appended file {} of length {}'.format( self.info_dict['info']['name'], self.info_dict['info']['length'])) except KeyError: for f in self.info_dict['info']['files']: self.logger.info('Appending file {} of length {}'.format( f['path'][len(f['path']) - 1], f['length'])) yield (File(self.piece_length, f['path'], f['length']))
def __init__(self, uid): self.expired = False try: data = json.loads(f.open('./user/{}/status.db'.format(uid), 'r').read()) if data['score'] > 5: self.cert = True else: self.cert = False except: self.cert = False if self.cert: self.score = data['score'] self.date = data['date'] self.limit = data['limit'] else: self.score = None self.date = None self.limit = None if self.limit <= 0: if self.date == datetime.now().strftime('%Y-%m-%d'): self.expired = True else: self.limit = self.score - 3 if self.score else None self.date = datetime.now().strftime('%Y-%m-%d') self.expired = False
def get_user_data(forwarded_message): user = forwarded_message.forward_from.id a = bot.get_chat_member(python_uz, user) if a.status == 'left': return False data = tools.get_info(user) try: ban_info = json.loads( f.open('./restricted/user_{}.json'.format(user), 'r').read())['times'] except: ban_info = 0 result = get_string('user_info') rest = is_restricted(python_uz, user) rest_date = get_string('user_info_date').format( get_restrict_time(python_uz, user)) if rest else '' rest = '{} {}'.format(get_string('yes'), rest_date) if rest else get_string('no') certified = get_string('yes') if data.cert else get_string('no') limit = data.limit if data.limit else '0' score = data.score if data.score else '0' result = result.format( rest, ban_info, certified, get_string('user_info_scores').format(score, limit) if data.cert else '') return result
def decode(socket): fixed = bytearray(6) socket.recv_into(fixed, flags=MSG_WAITALL) file_path_length = byte_utils.bytes_to_char(fixed, 0) file_is_directory = byte_utils.bytes_to_boolean(fixed, 1) file_last_modified = byte_utils.bytes_to_unsigned_int(fixed, 2) file_size = None if not file_is_directory: fixed = bytearray(4) socket.recv_into(fixed, flags=MSG_WAITALL) file_size = byte_utils.bytes_to_unsigned_int(fixed, 0) strings = bytearray(file_path_length) socket.recv_into(strings, flags=MSG_WAITALL) file_path = byte_utils.bytes_to_string(strings, file_path_length, 0) if utils.DEBUG_LEVEL >= 3: utils.log_message("DEBUG", "Decoded send file packet: ") utils.log_message("DEBUG", "File path length: " + str(file_path_length)) utils.log_message("DEBUG", "Is directory: " + str(file_is_directory)) utils.log_message( "DEBUG", "Last modified: " + str(utils.format_timestamp(file_last_modified))) utils.log_message("DEBUG", "File size: " + str(file_size)) utils.log_message("DEBUG", "File Path: " + str(file_path)) # parse file's contents to File().write() 1024 chunks if is not directory if not file_is_directory: chunk_size = min(SendFilePacket.CHUNK_SIZE, file_size) remaining = file_size file_wrapper = File() received_bytes_acc = 0 while remaining > 0: if utils.DEBUG_LEVEL >= 3: utils.log_message("DEBUG", "Chunk size: " + str(chunk_size)) chunk = bytearray(chunk_size) received_bytes = socket.recv_into(chunk, flags=MSG_WAITALL) received_bytes_acc += received_bytes file_wrapper.write(chunk) remaining -= received_bytes chunk_size = min(chunk_size, remaining) file_wrapper.close() if utils.DEBUG_LEVEL >= 1: utils.log_message( "DEBUG", "File size is " + str(file_size) + " and received bytes are " + str(received_bytes_acc)) utils.log_message( "DEBUG", "File is located in " + str(file_wrapper.get_path())) else: file_wrapper = Directory() packet = SendFilePacket( FileInfo(file_path, file_is_directory, file_last_modified, file_size, file_wrapper)) return packet
def template(self): """ File's template. Note that this template is not associated to any collection. @rtype: L{File} """ return self._get_field("template", lambda x: File(None, x))
def add_comment(book_id, uid, text, name): all_books = json.loads(f.open('./books/all.db', 'r').read()) atm = str(time.time()).split('.')[0] date = (datetime.now() + timedelta(hours=5)).strftime('%Y.%m.%d') user = '******'.format(atm, uid) try: comments = json.loads( f.open('./books/comments/book_{}.db'.format(book_id), 'r').read()) except: f.open('./books/comments/book_{}.db'.format(book_id), 'w').write(json.dumps({'all': 0})) comments = {'all': 0} if comments['all'] > 0: recorded = False for key in list(comments.keys()): if str(uid) in key: comments[key]['posts'].append({'text': text, 'date': date}) recorded = True if not recorded: comments[user] = { 'name': name, 'posts': [{ 'text': text, 'date': date }] } else: comments[user] = { 'name': name, 'posts': [{ 'text': text, 'date': date }] } comments['all'] += 1 f.open('./books/comments/book_{}.db'.format(book_id), 'w').write(json.dumps(comments)) result = maker(all_books[book_id], ind=book_id, comments=comments) if all_books[book_id]['url']: client.edit_page(path=all_books[book_id]['url'][18:], title=all_books[book_id]['title'], content=result, author_name='Hops', author_url='https://t.me/Hopsrobot') else: page = client.create_page(title=all_books[book_id]['title'], content=result, author_name='Hops', author_url='https://t.me/Hopsrobot') all_books[book_id]['url'] = page.url f.open('./books/all.db', 'w').write(json.dumps(all_books))
def files_(): logdir = os.path.join(os.getcwd(), "logs") f = File("av_log.bin", path=logdir) f.put({ "interested": [ "coet", "lluis_tgn", "Mafoso-Espieta", "krls-ca", "Vriullop", "Lohen", "Paucabot", "SMP_ca" ], "authorized": ["pasqual", "krls-ca", "Lohen", "SMP", "Vriullop"], "usersQueue": [], "channels": {} }) f.close() data = f.get() print data f.put(data)
def notify(message, mark=False): user = message.reply_to_message.from_user.id code_id = message.reply_to_message.message_id code = message.reply_to_message.text try: info = json.loads( f.open( './comments/{}/codes/{}/info_{}.db'.format( user, code_id, code_id), 'r').read()) except: info = { 'likes': { 'all': 0, 'clicked': [] }, 'people': 0, 'comments': 0, 'code': code, 'url': None, 'notified': False } f.open( './comments/{}/codes/{}/info_{}.db'.format(user, code_id, code_id), 'w').write(json.dumps(info)) info = json.loads( f.open( './comments/{}/codes/{}/info_{}.db'.format( user, code_id, code_id), 'r').read()) if mark: info['notified'] = True f.open( './comments/{}/codes/{}/info_{}.db'.format(user, code_id, code_id), 'w').write(json.dumps(info)) return info['notified']
def _processfile(self, path_or_file, *args, **kwds): self.info("START") self._file = path_or_file if isinstance(path_or_file, (str, basestring)): self._file = File.guess(path_or_file, **getkwds(kwds, pd.read_csv)) self.filename = self._file.basename() if kwds.get('testbadlines') and isinstance(self._file, Csv): self.info("Checking rows in '%s' for embedded delimiters." % self.filename) self.badlines = Csv.locate_badlines(self._file.path, delimiter=self._file.delimiter) self.badlinescount = len(self.badlines) if self.badlinescount >= 1: self.warning("%s bad lines have been found in '%s'." % (self.badlinescount, self.filename)) _ = newfolder(kwds.get('outdir', 'processed')) outfile = kwds.get('outfile') if not outfile: outfile = self._file.get_outfile(self.filename, dirname=_) createcsv(outfile, self.fields) for df in self._file.dfreader: try: df = self.process(df, *args, **kwds) self.countsout += self.countvalues(df) self.normalized += len(df) except IncompleteExcelFile as e: self.incomplete_excel += 1 File.append(outfile, df.to_csvstring(header=False)) self.info("%s rows written to %s" % (len(df), outfile)) gc.disable() gc.collect() self.emptysheets = getattr(self._file, 'emptysheets', None) self.info("END") print return self.evaluate()
def StoreListings( listingObject ): #Prepare the secondary dictionaries listing_sales = listingObject.get( 'saleDetails' ) lisitng_inspections = listingObject.get( 'inspectionDetails' ) listing_prices = listingObject.get( 'priceDetails' ) try: #Insert the raw listing first. #Get the listing_status listing_status = QueryWithSingleValue( 'listing_status_lkp', 'description', listingObject['status'], 'listing_status_id', True ) #build the JSON from listingObject raw_listing_JSON = json.dumps( listingObject ) #Get the value that will be used with listing_insert_statement listings_id = returnNextSerialID( 'listings', 'listings_id' ) if lisitng_inspections is not None: isByAppointmentOnly = lisitng_inspections.get( 'isByAppointmentOnly' ) else: isByAppointmentOnly = None cur.execute( """ INSERT INTO listings( domain_listings_id, headline, price_displayed, display_price, price, price_from, price_to, seo_url, listing_objective, listing_status, land_area, building_area, energy_efficiency, is_new_development, date_updated, date_created, entered_when, entered_by, raw_listing, inspection_appointment_only ) VALUES( %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, to_timestamp( %s, 'YYYY-MM-DD HH24:MI:SS' ), to_timestamp( %s, 'YYYY-MM-DD HH24:MI:SS' ), current_timestamp, 1, %s, %s ) """, ( listingObject.get( 'id' ), listingObject.get( 'headline' ), listing_prices.get( 'canDisplayPrice' ), listing_prices.get( 'displayPrice' ), listing_prices.get( 'price' ), listing_prices.get( 'priceFrom' ), listing_prices.get( 'priceTo' ), listingObject.get( 'seoUrl' ), listingObject.get( 'objective' ), listing_status, listingObject.get( 'landAreaSqm'), listingObject.get( 'buildingAreaSqm' ), listingObject.get( 'energyEfficiencyRating' ), listingObject.get( 'isNewDevelopment' ), convertJSONDate(listingObject['dateUpdated']), convertJSONDate(listingObject['dateListed']), cleanForSQL(raw_listing_JSON), isByAppointmentOnly ) ) #Insert the Features if the listing contains any. #Set the object type #Only do this if the listing already has a features object. if 'features' in listingObject: link_object_type = OBJECT_Listing for feature in listingObject['features']: storeFeatures( listings_id, link_object_type, feature ) if 'media' in listingObject: #Store any media attached to the listing for media in listingObject['media']: mediaObject = File( FILE_TYPE_Images, OBJECT_Listing, str(listings_id), None, "listing_" + media['type'] ) mediaObject.addImageDetails( None, None, media['url'] ) mediaObject.storeFile( False ) #Store Listing Sales Information. #First, we need to check if the listings has any sales information attached to it. if listing_sales is not None: storeListingSalesDetails( listing_sales, listings_id ) #Store the Inspection information (if the listing_inspections array is not None) if lisitng_inspections is not None: storeInspectionDetails( listings_id, lisitng_inspections ) return listings_id except(Exception, psycopg2.DatabaseError) as error: print("Error in INSERTING New Listing with Domain Listing ID " + "\n" + "Error: " + error ) return None
def main(): """Testing suits here:""" #print Map.dbs #print Map.it('zbtb16', 9606) #print m("ZBTB16") ## res = m("eat-1", 6239) ## print res ## return res print m('chico', 7227) f = File(name='MappedIDs.txt') data = f.parse(header=True, printing=False) for i in data: if not i: continue # correct this. #print i id = i['Input Id'] print id, m([id], 6239)[0], try: print mo([id], 6239)#[0] except: print #print data[1] print m("cha-1", 6239) print m("unc-17", 6239)
def main(): os.chdir(path) html = HTML(url="http://www.genenames.org/cgi-bin/hgnc_downloads.cgi") # Check html for attributes. attributes = html.find_between("</td> <td>", "</td>", '"', all=True) # Retrieve all aviable attributes. print("Number of attributes: %s" % len(attributes)) # Check number of attributes. # Building url: url_begin = "http://www.genenames.org/cgi-bin/hgnc_downloads.cgi?title=Core+Data" url_context = ";col="+";col=".join(attributes) #col=gd_hgnc_id;col=gd_app_sym;col=gd_app_name;col=gd_status;col=gd_prev_sym;col=gd_aliases;col=gd_pub_chrom_map;col=gd_pub_acc_ids;col=gd_pub_refseq_ids; url_end = ";status=Approved;status=Approved+Non-Human;status=Entry+Withdrawn;status_opt=3;=on;where=;order_by=gd_app_sym_sort;limit=;format=text;submit=submit;.cgifields=;.cgifields=status;.cgifields=chr" url = url_begin + url_context + url_end f = File(name="hgnc.txt", url=url, path=path) contents = f.parse(printing=False, header=True) genes.name = "HGNC" genes.key = "hgnc" genes.taxid = 9606 genes.addData(contents) genes.save() genes.buildMappings()
def convertToClasses(self): """ This Function converts all instance variables of the Agency Object that are meant to be classes...(i.e agency_banner) into their respective class. (These includes the Contact Details and Files Classes) """ #Check to see if the instance has its agency_id set or has already had its variables converted into classes. try: if self.agency_id is None or self.classesConverted == True: raise Exception( "Cannot convert classes due to them already being converted or Agency Instance having a Null Agency ID" ) else: agency_banner = File(FILE_TYPE_Images, OBJECT_Agency, self.agency_id, None, "agency_banner") agency_banner.addImageDetails(None, None, self.agency_banner) self.agency_banner = agency_banner self.agency_website = ContactDetails('agency_website', OBJECT_Agency, self.agency_id, self.agency_website) agency_logo_standard = File(FILE_TYPE_Images, OBJECT_Agency, self.agency_id, None, "agency_logo_standard") agency_logo_standard.addImageDetails(None, None, self.agency_logo_standard) self.agency_logo_standard = agency_logo_standard self.contact_details_converted = [] for contactType, detailsType in self.contact_details.items(): #For now, only store dictionaries. We can handle exceptions by logging them to a debug file. if isinstance(detailsType, dict): for contact, details in detailsType.items(): if details != '': self.contact_details_converted.append( ContactDetails( 'agency_' + contactType + '_' + contact, OBJECT_Agency, self.agency_id, details)) #else: # self.contact_details_converted.append( ContactDetails( 'agency_' + contactType + '_' + contact, self.agency_id, OBJECT_Agency, details ) ) self.classesConverted = True except (Exception) as error: print(error)
def main(): """Testing suits here:""" #print Map.dbs #print Map.it('zbtb16', 9606) #print m("ZBTB16") ## res = m("eat-1", 6239) ## print res ## return res print m('chico', 7227) f = File(name='MappedIDs.txt') data = f.parse(header=True, printing=False) for i in data: if not i: continue # correct this. #print i id = i['Input Id'] print id, m([id], 6239)[0], try: print mo([id], 6239) #[0] except: print #print data[1] print m("cha-1", 6239) print m("unc-17", 6239)
def decode(socket): fixed = bytearray(6) socket.recv_into(fixed, flags=MSG_WAITALL) file_path_length = byte_utils.bytes_to_char(fixed, 0) file_is_directory = byte_utils.bytes_to_boolean(fixed, 1) file_last_modified = byte_utils.bytes_to_unsigned_int(fixed, 2) file_size = None if not file_is_directory: fixed = bytearray(4) socket.recv_into(fixed, flags=MSG_WAITALL) file_size = byte_utils.bytes_to_unsigned_int(fixed, 0) strings = bytearray(file_path_length) socket.recv_into(strings, flags=MSG_WAITALL) file_path = byte_utils.bytes_to_string(strings, file_path_length, 0) if utils.DEBUG_LEVEL >= 3: utils.log_message("DEBUG", "Decoded send file packet: ") utils.log_message("DEBUG", "File path length: " + str(file_path_length)) utils.log_message("DEBUG", "Is directory: " + str(file_is_directory)) utils.log_message("DEBUG", "Last modified: " + str(utils.format_timestamp(file_last_modified))) utils.log_message("DEBUG", "File size: " + str(file_size)) utils.log_message("DEBUG", "File Path: " + str(file_path)) # parse file's contents to File().write() 1024 chunks if is not directory if not file_is_directory: chunk_size = min(SendFilePacket.CHUNK_SIZE, file_size) remaining = file_size file_wrapper = File() received_bytes_acc = 0 while remaining > 0: if utils.DEBUG_LEVEL >= 3: utils.log_message("DEBUG", "Chunk size: " + str(chunk_size)) chunk = bytearray(chunk_size) received_bytes = socket.recv_into(chunk, flags=MSG_WAITALL) received_bytes_acc += received_bytes file_wrapper.write(chunk) remaining -= received_bytes chunk_size = min(chunk_size, remaining) file_wrapper.close() if utils.DEBUG_LEVEL >= 1: utils.log_message("DEBUG", "File size is " + str(file_size) + " and received bytes are " + str( received_bytes_acc)) utils.log_message("DEBUG", "File is located in " + str(file_wrapper.get_path())) else: file_wrapper = Directory() packet = SendFilePacket(FileInfo(file_path, file_is_directory, file_last_modified, file_size, file_wrapper)) return packet
def main(self, argv): f = File() f.open() f.write()
if commands: for command in commands: output, error = c.run_command(command, host) if error != []: raise Exception(error[0]) else: print("\n*** Output of '{}' on '{}'".format(command, host)) for o in output: print(o.strip('\n')) elif install_pkgs: p = Packages() p.install_package(host, install_pkgs) s = Service() s.manageService(pkg_service_deps, 'restart', service_pkg_deps, host) elif uninstall_pkgs: p = Packages() p.uninstall_package(host, uninstall_pkgs) elif upgrade_pkgs: p = Packages() p.upgrade_package(host, upgrade_pkgs) elif file_data and file_metadata: f = File() f.create_file(host, file_data, file_metadata) elif not file_data and file_metadata: f = File() f.delete_file(host, file_metadata) elif services: s.manageService(services, service_action, service_pkg_deps, host) except Exception as e: print(e)
def __init__(self, path, project): File.__init__(self, path, project)
def main(self, argv): f = File() f.open() f.write() f.close() print "Done."
def main(interactions=False, download=True, parse=True, withdrawn=True, cleanup=True): """Performs the download of interaction and annotation files from MGI. Builds a gene annotation file and mapping tables. TODO: - Inspect and eventually use interaction file, else discard from this module. - Also check whether other information from MGI is worse to integrate such as homology or phenotypes.""" os.chdir(path) genes.name = "MGI" genes.key = "mgi" folder = Folder(path) if interactions: ftp = FTP(url="ftp://ftp.informatics.jax.org/pub/protein-interaction-data/", path=path) ftp.download(path) if download: url = "ftp://ftp.informatics.jax.org/pub/reports/" files = [ "MRK_List1.rpt", "MRK_List2.rpt", "MGI_Coordinate.rpt", "MRK_Sequence.rpt", "MRK_SwissProt_TrEMBL.rpt", "MRK_VEGA.rpt", "MRK_ENSEMBL.rpt", "MGI_EntrezGene.rpt", ] # MPheno_OBO.ontology, VOC_MammalianPhenotype.rpt, MGI_PhenotypicAllele.rpt, HMD_HumanPhenotype.rpt for f in files: f = File(url=url + f) # automatically does f.download() res = f.parse(header=True, printing=False) folder.downloads.append(f.name) if parse: folder.update() if withdrawn: filename = "MRK_List1.rpt" else: filename = "MRK_List2.rpt" data = folder[filename].parse(header=True, printing=False) genes.addData(data, key="mgi", taxid=10090) data = folder["MGI_Coordinate.rpt"].parse(header=True, printing=False) for i in data: i = change_keys(i) i["taxid"] = 10090 genes.add(i) data = folder["MRK_Sequence.rpt"].parse(header=True, printing=False) genes.addData(data, key="mgi", taxid=10090) header = ( "mgi symbol status name cm_position chromosome type " "secondary_accession_ids id synonyms feature_types start " "stop strand biotypes".split() ) data = folder["MGI_EntrezGene.rpt"].parse(header=header, printing=False) genes.addData(data, key="mgi", taxid=10090) print len(genes) if cleanup: if interactions: ftp.remove(confirm=False) for f in folder.downloads: folder.remove(f) genes.keep("category", "Gene") genes.remove("name", "withdrawn") genes.save() genes.buildMappings()