def render( cls, media_type = None, id = None, params = None, cursor = None, path = None ): v = None if id == 'new' and not params: m = None else: m = cls._instance( media_type = media_type, id = id, params = params, cursor = cursor ) if type( m ) is Collection: v = views[ media_type + 's' ] elif media_type: v = views[ media_type ] if not v and path == '': m = App() v = views[ 'app' ] if v: doc = render.imprint( m, v ) return doc else: try: v = files.read( path ) except: try: v = files.read( 'front/' + path ) except: return views.view404 finally: if not v: v = views.view404 return v
def main(): files.read('tweets', process_file) files.read('urls', process_file2) triples = 0 with open('./data/triples_urls.txt', 'r') as f: for line in f: triples += 1 print 'cantidad de urls en los tweets', url_tw_count print 'cantidad de urls en archivos de urls', url_files_count print 'cantidad de urls en triples', triples
def main(): PATH = '/Users/mquezada/Tesis/stats/' with open(PATH + 'urls.txt', 'r') as urls_file: for line in urls_file: pair = line.split() short = pair[0] if len(pair) > 1: expanded = pair[1] else: expanded = pair[0] urls[short] = expanded files.read('urls', process_file)
def setup(): """ Read book info from file, if file exists. """ global counter # reads them BOOKS_FILE_NAME file and passes make_book_list() as a calllback file.read(BOOKS_FILE_NAME, cb=make_book_list) try: # reads COUNTER_FILE_NAME and loads it into memory counter = file.read(COUNTER_FILE_NAME) if not counter: counter = 0 except IOError: counter = len(book_list)
def select_view_api(userid, journalid, anyway=False, increment_views=False): rating = d.get_rating(userid) journal = _select_journal_and_check( userid, journalid, rating=rating, ignore=anyway, anyway=anyway, increment_views=increment_views) content = files.read(files.make_resource(userid, journalid, 'journal/submit')) return { 'journalid': journalid, 'title': journal['title'], 'owner': journal['username'], 'owner_login': d.get_sysname(journal['username']), 'owner_media': api.tidy_all_media( media.get_user_media(journal['userid'])), 'content': text.markdown(content), 'tags': searchtag.select(journalid=journalid), 'link': d.absolutify_url('/journal/%d/%s' % (journalid, text.slug_for(journal['title']))), 'type': 'journal', 'rating': ratings.CODE_TO_NAME[journal['rating']], 'views': journal['page_views'], 'favorites': favorite.count(journalid, 'journal'), 'comments': comment.count(journalid, 'journal'), 'favorited': favorite.check(userid, journalid=journalid), 'friends_only': 'f' in journal['settings'], 'posted_at': d.iso8601(journal['unixtime']), }
def select_latest(userid, rating, otherid=None, config=None): if config is None: config = d.get_config(userid) statement = ["SELECT jo.journalid, jo.title, jo.unixtime FROM journal jo WHERE"] if userid: if d.is_sfw_mode(): statement.append(" (jo.rating <= %i)" % (rating,)) else: statement.append(" (jo.userid = %i OR jo.rating <= %i)" % (userid, rating)) if not otherid: statement.append(m.MACRO_IGNOREUSER % (userid, "jo")) statement.append(m.MACRO_BLOCKTAG_JOURNAL % (userid, userid)) else: statement.append(" jo.rating <= %i" % (rating,)) if otherid: statement.append( " AND jo.userid = %i AND jo.settings !~ '[%sh]'" % (otherid, "" if frienduser.check(userid, otherid) else "f")) statement.append("ORDER BY jo.journalid DESC LIMIT 1") query = d.execute("".join(statement), options="single") if query: return { "journalid": query[0], "title": query[1], "unixtime": query[2], "content": files.read("%s%s%i.txt" % (m.MACRO_SYS_JOURNAL_PATH, d.get_hash_path(query[0]), query[0])), "comments": d.execute("SELECT COUNT(*) FROM journalcomment WHERE targetid = %i AND settings !~ 'h'", [query[0]], ["element"]), }
def test_read(self): f = files.open.return_value self.assertEqual(f.read.return_value, 'content') # read() always returns 'content' self.assertEqual(files.read('pathname'), 'content') files.open.assert_called_once_with('pathname', 'rb')
async def on_ready(): t = time.localtime() current_time = time.strftime("[%H:%M]", t) print(f"{current_time}:login successful as {bot.user}") servers = list_servers() while True: place = 1 for i in servers: print(f"{place}:{await bot.fetch_guild(i)}") place += 1 server_select = input(">>>") try: data = files.read(f"data/{servers[int(server_select)-1]}.pkl") except (IndexError, ValueError): print("You have to select a valid server") continue while True: while True: action_counter = 1 for i in data: if type(i) == str: print(f"{action_counter}:{i} = {data[i]}") else: print( f"{action_counter}:{await bot.fetch_user(i)} = {data[i]}" ) action_counter += 1 action_select = input(">>>") try: int(action_select) break except ValueError: print("Not Valid number") action_counter = 1 for i in data: if action_counter == int(action_select): action = i break else: pass action_counter += 1 try: confirmation = input( f"Do you want to edit {await bot.fetch_user(action)}? \n>>>" ) except discord.errors.HTTPException: confirmation = input(f"Do you want to edit {action}? \n>>>") if confirmation.lower() == "y": pass else: continue new_value = input("Set new value \n>>>") try: data[action] = int(new_value) except ValueError: data[action] = new_value files.write(f"data/{servers[int(server_select)-1]}.pkl", data)
def update_diag(job): """ Filters out WallTime from the diag file if present and replaces it with output from the batch system. It also adds StartTime and EndTime for accounting. :param job: job object :type job: :py:obj:`object` """ content = read(job.diag_file) diag_dict = {} for line in content: line = line.strip(' \n#') if not line: continue key, value = line.split('=', 1) if key[:9] == 'frontend_': continue if key not in diag_dict: diag_dict[key] = [] diag_dict[key].append(value) # Do not save the 'frontend_*' and 'ExecutionUnits' keys, # they are set on the font-end. Not to be overwritten diag_dict.pop('ExecutionUnits', None) keys = ['nodename', 'WallTime', 'UserTime', 'KernelTime', 'AverageTotalMemory', 'AverageResidentMemory', 'exitcode', 'LRMSStartTime', 'LRMSEndTime', 'LRMSExitcode', 'LRMSMessage'] for key in keys: if key in diag_dict: diag_dict[key] = diag_dict[key][-1:] if hasattr(job, 'Processors'): diag_dict['Processors'] = [job.Processors] if hasattr(job, 'LRMSStartTime'): diag_dict['LRMSStartTime'] = [job.LRMSStartTime.str(arc.common.MDSTime)] if hasattr(job, 'LRMSEndTime'): diag_dict['LRMSEndTime'] = [job.LRMSEndTime.str(arc.common.MDSTime)] if hasattr(job, 'WallTime'): diag_dict['WallTime'] = ['%ds' % (job.WallTime.GetPeriod())] if not diag_dict.has_key('exitcode') and hasattr(job, 'exitcode'): diag_dict['exitcode'] = [job.exitcode] buf = '' for k, vs in diag_dict.iteritems(): buf += '\n'.join('%s=%s' % (k, v) for v in vs) + '\n' if write(job.diag_file, buf, 0644): # Set job user as owner os.chown(job.diag_file, job.uid, job.gid)
def render(cls, media_type=None, id=None, params=None, cursor=None, path=None): v = None if id == 'new' and not params: m = None else: m = cls._instance(media_type=media_type, id=id, params=params, cursor=cursor) if type(m) is Collection: v = views[media_type + 's'] elif media_type: v = views[media_type] if not v and path == '': m = App() v = views['app'] if v: doc = render.imprint(m, v) return doc else: try: v = files.read(path) except: try: v = files.read('front/' + path) except: return views.view404 finally: if not v: v = views.view404 return v
def select_view(userid, rating, journalid, ignore=True, anyway=None): journal = d.engine.execute(""" SELECT jo.userid, pr.username, jo.unixtime, jo.title, jo.rating, jo.settings, jo.page_views, pr.config FROM journal jo JOIN profile pr ON jo.userid = pr.userid WHERE jo.journalid = %(id)s """, id=journalid).fetchone() if journal and userid in staff.MODS and anyway == "true": pass elif not journal or "h" in journal.settings: raise WeasylError("journalRecordMissing") elif journal.rating > rating and ((userid != journal[0] and userid not in staff.MODS) or d.is_sfw_mode()): raise WeasylError("RatingExceeded") elif "f" in journal.settings and not frienduser.check(userid, journal.userid): raise WeasylError("FriendsOnly") elif ignore and ignoreuser.check(userid, journal.userid): raise WeasylError("UserIgnored") elif ignore and blocktag.check(userid, journalid=journalid): raise WeasylError("TagBlocked") page_views = journal.page_views if d.common_view_content(userid, journalid, "journal"): page_views += 1 return { "journalid": journalid, "userid": journal.userid, "username": journal.username, "user_media": media.get_user_media(journal.userid), "mine": userid == journal.userid, "unixtime": journal.unixtime, "title": journal.title, "content": files.read(files.make_resource(userid, journalid, "journal/submit")), "rating": journal.rating, "settings": journal.settings, "page_views": page_views, "reported": report.check(journalid=journalid), "favorited": favorite.check(userid, journalid=journalid), "friends_only": "f" in journal.settings, "hidden_submission": "h" in journal.settings, # todo "fave_count": d.execute("SELECT COUNT(*) FROM favorite WHERE (targetid, type) = (%i, 'j')", [journalid], ["element"]), "tags": searchtag.select(journalid=journalid), "comments": comment.select(userid, journalid=journalid), }
def read_local_file(job): """ Read the local file and set ``job.localid`` and ``job.sessiondir`` attributes. :param job: job object :type job: :py:obj:`object` :return: ``True`` if successful, else ``False`` :rtype: :py:obj:`bool` """ try: content = dict(item.split('=', 1) for item in read(job.local_file) if item) job.localid = content['localid'].strip() job.sessiondir = content['sessiondir'].strip() return True except Exception as e: error('Failed to get local ID or sessiondir from local file (%s)' % job.globalid, 'common.scan') return False
def write_comments(job): """ Write content of comment file to errors file. :param job: job object :type job: :py:obj:`object` """ comments = read(job.comment_file) if comments: buf = \ '------- ' 'Contents of output stream forwarded by the LRMS ' '---------\n' buf += ''.join(comments) buf += \ '------------------------- ' 'End of output ' '-------------------------' write(job.errors_file, buf, 0644, True)
def set_exit_code_from_diag(job): """ Retrieve exit code from the diag file and set the ``job.exitcode`` attribute. :param job: job object :type job: :py:obj:`object` :return: ``True`` if exit code was found, else ``False`` :rtype: :py:obj:`bool` """ # In case of non-NFS setup it may take some time till # diagnostics file is delivered. Wait for it max 2 minutes. time_to_wait = 10 if Config.shared_filesystem else 120 time_slept = 0 time_step = 0.5 previous_mtime = current_mtime = getmtime(job.diag_file) while True: content = read(job.diag_file, 1000) for line in content: if line[:9] == 'exitcode=': job.exitcode = int(line[9:]) job.state = 'COMPLETED' if job.exitcode == 0 else 'FAILED' return True previous_mtime = current_mtime current_mtime = getmtime(job.diag_file) # Non-successful read, but mtime changed. Reload file. if current_mtime > previous_mtime: # Possibly infinite loop? continue # Wait if time_slept >= time_to_wait: warn('Failed to get exit code from diag file', 'common.scan') return False time.sleep(time_step) time_slept += time_step warn('Failed to get exit code from diag file', 'common.scan') return False
def select_view(userid, rating, journalid, ignore=True, anyway=None): journal = _select_journal_and_check( userid, journalid, rating=rating, ignore=ignore, anyway=anyway == 'anyway') return { 'journalid': journalid, 'userid': journal['userid'], 'username': journal['username'], 'user_media': media.get_user_media(journal['userid']), 'mine': userid == journal['userid'], 'unixtime': journal['unixtime'], 'title': journal['title'], 'content': files.read(files.make_resource(userid, journalid, 'journal/submit')), 'rating': journal['rating'], 'settings': journal['settings'], 'page_views': journal['page_views'], 'reported': report.check(journalid=journalid), 'favorited': favorite.check(userid, journalid=journalid), 'friends_only': 'f' in journal['settings'], 'hidden_submission': 'h' in journal['settings'], 'fave_count': favorite.count(journalid, 'journal'), 'tags': searchtag.select(journalid=journalid), 'comments': comment.select(userid, journalid=journalid), }
def test_read_file_contents(self): self.assertEquals(files.read('test.txt'), """Beautiful is better than ugly. Explicit is better than implicit.""") self.assertEquals(len(files.read('zen.txt')), 856)
def select_view(userid, submitid, rating, ignore=True, anyway=None): query = d.execute(""" SELECT su.userid, pr.username, su.folderid, su.unixtime, su.title, su.content, su.subtype, su.rating, su.settings, su.page_views, su.sorttime, pr.config, fd.title FROM submission su INNER JOIN profile pr USING (userid) LEFT JOIN folder fd USING (folderid) WHERE su.submitid = %i """, [submitid], options=["single", "list"]) # Sanity check if query and userid in staff.MODS and anyway == "true": pass elif not query or "h" in query[8]: raise WeasylError("submissionRecordMissing") elif query[7] > rating and ((userid != query[0] and userid not in staff.MODS) or d.is_sfw_mode()): raise WeasylError("RatingExceeded") elif "f" in query[8] and not frienduser.check(userid, query[0]): raise WeasylError("FriendsOnly") elif ignore and ignoreuser.check(userid, query[0]): raise WeasylError("UserIgnored") elif ignore and blocktag.check(userid, submitid=submitid): raise WeasylError("TagBlocked") # Get submission filename submitfile = media.get_submission_media(submitid).get('submission', [None])[0] # Get submission text if submitfile and submitfile['file_type'] in ['txt', 'htm']: submittext = files.read(submitfile['full_file_path']) else: submittext = None embedlink = d.text_first_line(query[5]) if "v" in query[8] else None google_doc_embed = None if 'D' in query[8]: db = d.connect() gde = d.meta.tables['google_doc_embeds'] q = (sa.select([gde.c.embed_url]) .where(gde.c.submitid == submitid)) results = db.execute(q).fetchall() if not results: raise WeasylError("can't find embed information") google_doc_embed = results[0] tags, artist_tags = searchtag.select_with_artist_tags(submitid) settings = d.get_profile_settings(query[0]) return { "submitid": submitid, "userid": query[0], "username": query[1], "folderid": query[2], "unixtime": query[3], "title": query[4], "content": (d.text_first_line(query[5], strip=True) if "v" in query[8] else query[5]), "subtype": query[6], "rating": query[7], "settings": query[8], "page_views": ( query[9] + 1 if d.common_view_content(userid, 0 if anyway == "true" else submitid, "submit") else query[9]), "fave_count": d.execute( "SELECT COUNT(*) FROM favorite WHERE (targetid, type) = (%i, 's')", [submitid], ["element"]), "mine": userid == query[0], "reported": report.check(submitid=submitid), "favorited": favorite.check(userid, submitid=submitid), "friends_only": "f" in query[8], "hidden_submission": "h" in query[8], "collectors": collection.find_owners(submitid), "no_request": not settings.allow_collection_requests, "text": submittext, "sub_media": media.get_submission_media(submitid), "user_media": media.get_user_media(query[0]), "submit": submitfile, "embedlink": embedlink, "embed": embed.html(embedlink) if embedlink is not None else None, "google_doc_embed": google_doc_embed, "tags": tags, "artist_tags": artist_tags, "removable_tags": searchtag.removable_tags(userid, query[0], tags, artist_tags), "can_remove_tags": searchtag.can_remove_tags(userid, query[0]), "folder_more": select_near(userid, rating, 1, query[0], query[2], submitid), "folder_title": query[12] if query[12] else "Root", "comments": comment.select(userid, submitid=submitid), }
from __future__ import print_function import random import sys import numpy as np from keras.callbacks import LambdaCallback from keras.layers import Dense from keras.layers import LSTM from keras.models import Sequential from keras.optimizers import RMSprop from soundex import to_soundex, to_text from files import read, write, add style_text = read('style.txt') content_text = read('content.txt') print('Soundex encoding...') style_soundex, _ = to_soundex(style_text) _, content_soundex_dictionary = to_soundex(content_text) chars = sorted(set(style_soundex)) print('total chars:', len(chars)) char_indices = dict((c, i) for i, c in enumerate(chars)) indices_char = dict((i, c) for i, c in enumerate(chars)) print('Sentences creation...') max_len = 10 sentences = [] next_chars = [] for i in range(0, len(style_soundex) - max_len): sentences.append(style_soundex[i:i + max_len])
import random import files import hangman_pics n = 0 print( hangman_pics.HANGMAN_PICS[ n ] ) all_words = files.read("words_list.txt") guess = input("Guess a letter: ") guesses = guesses + guess
def check_login(username, password): account_list = from_buffer(read(admin_file)) for account in account_list: if account.isEqual(username, password): return True return False
async def on_message(message): t = time.localtime() current_time = time.strftime("[%H:%M]", t) # Ignore own messages unless they are leaderboard messages, if so save them to the guild's file if message.author == bot.user: data = files.read(f"data/{message.guild.id}.pkl") # Write the channel id and message id (dependent on channel) into the guild file try: if message.channel.id == data["leaderboard_message_channel_id"]: data["leaderboard_message_id"] = message.id files.write(f"data/{message.guild.id}.pkl", data) return except KeyError: if str(message.channel).lower() == "leaderboard": data["leaderboard_message_id"] = message.id data["leaderboard_message_channel_id"] = message.channel.id files.write(f"data/{message.guild.id}.pkl", data) if message.author.bot: forbidden = ["leaderboard", "counting"] data = files.read(f"data/{message.guild.id}.pkl") if (str(message.channel) in forbidden or message.channel.id == data["leaderboard_message_channel_id"] ) and message.author != bot.user: await message.delete() return # Put server's data into var data for efficiency try: data = files.read(f"data/{message.guild.id}.pkl") except FileNotFoundError: # if the file is not found intitialize the guild with counter on 1 files.write(f"data/{message.guild.id}.pkl", {"count": 1}) data = files.read(f"data/{message.guild.id}.pkl") except AttributeError: # This only occurs in a private chat await message.channel.send("Sorry you have to be on a server to count") return try: expected_number = int(data["count"]) except ValueError: # This prevents breaking of functionality when the Value is a string as a result of corrections.py expected_number = 1 # dont count if messages are not send in the counting channel if str(message.channel) == "counting": # Check if message is a number # If not disregard the message and delete it try: message_number = int(message.content) except ValueError: print( f"{current_time} on {message.guild}: was looking for {expected_number} got {message.content} instead" ) await message.delete() return # ignore if the author of the message was the last one counting try: if message.author.id == data["last_counter"]: await message.channel.send( f"Sie sind der letzte der Zählte, **{message.author}**!") await message.delete() return except KeyError: pass # delete message if the number isn't the one we're searching for if message_number == expected_number: # Update Values for counting data["count"] = expected_number + 1 data["last_counter"] = message.author.id try: # Read participation data add 1 to user's value participation_user = data[message.author.id] participation_user = participation_user + 1 data[message.author.id] = participation_user except KeyError: # If participation is not there yet set it to one data[message.author.id] = 1 # this is used for updating the leaderboard try: data["till_update"] = data["till_update"] - 1 except KeyError: data["till_update"] = 99 # <= because of caution IF something goes wrong that decreases the till_update key to a negative # or stupid / not intended use of corrections.py if data["till_update"] <= 0: data["till_update"] = 100 # Jup i have 0 clues on why attribute errors are excepted here # will test later it's 1am so no not now try: # Ignore if leaderboard messages haven't been configured yet, is normal on first run try: channel = await bot.fetch_channel( data["leaderboard_message_channel_id"]) msg = await channel.fetch_message( data["leaderboard_message_id"]) await msg.delete() # delete old Message so you dont have multiple leaderboards, done first bc even when something # goes wrong, there shall be now indication of that on the discord users end only logs except KeyError: print( f"{current_time} on {message.guild}: No old leaderboard msg found" ) for channel in message.guild.channels: # this could potentionally reconfigure the leaderboard channel without will, # however in order for this to happen, the KeyError has to be raised by the "leaderboard_message_id" key # this key is always defined except on first run # aka when no leaderboard channel is defined anyways # the only exception being someone working with corrections.py # TODO # make this specific to "leaderboard_message_channel_id" key if str(channel) == "leaderboard": if type(channel ) == discord.channel.CategoryChannel: pass else: print( f"{current_time} on {message.guild}: setup for leaderboard channel successful" ) data[ "leaderboard_message_channel_id"] = channel.id try: # This is only undefined when the setup above fails to locate any channels named "leaderboard" data["leaderboard_message_channel_id"] except KeyError: print( f"{current_time} on {message.guild}: FATAL no leaderboards channel found" ) # user counts are deleted to ensure that no Errors will hinder the count from being valid # will look into this, maybe making a leaderboardless version idk await message.delete() return pass except discord.errors.NotFound: print( f"{current_time} on {message.guild}: FATAL leaderboard msg not found!" ) pass # List all Counters counters = [] for user in data: if type(user) == int: counters.append([data[user], user]) else: # Sort output counters.sort() # we dont wan't the lowest on the leaderboard counters.reverse() board = "---------Leaderboard-----------------------------\n" for place in range(10): try: name = await bot.fetch_user(counters[place][1]) board += f"{place + 1}:{name}:{counters[place][0]}\n" except IndexError: pass # I should probably move this check to the others try: channel = await bot.fetch_channel( data["leaderboard_message_channel_id"]) except discord.errors.NotFound: print( f"{current_time} on {message.guild}: FATAL leaderboard channel deleted" ) data = files.read(f"data/{message.guild.id}.pkl") try: del data["leaderboard_message_channel_id"] except KeyError: pass files.write(f"data/{message.guild.id}.pkl", data) await message.delete() return await channel.send(board) print( f"{current_time} on {message.guild}: updated leaderboard" ) except AttributeError: pass # updating the guild file # this prevents overwriting the leader_message_id key as the updating is handled async and is faster # yes i know this is not good, I will look to improve this try: data_with_id = files.read(f'data/{message.guild.id}.pkl') data["leaderboard_message_id"] = data_with_id[ "leaderboard_message_id"] except KeyError: pass if (data["count"] - 1) % 100 == 0: print( f"{current_time} on {message.guild}: Server reached {message.content}" ) files.write(f"data/{message.guild.id}.pkl", data) else: await message.delete() if str(message.channel) == "leaderboard": # As all bot actions quit the function before so this is safe await message.delete() return
def select_view(userid, submitid, rating, ignore=True, anyway=None): query = d.execute(""" SELECT su.userid, pr.username, su.folderid, su.unixtime, su.title, su.content, su.subtype, su.rating, su.settings, su.page_views, su.sorttime, pr.config, fd.title FROM submission su INNER JOIN profile pr USING (userid) LEFT JOIN folder fd USING (folderid) WHERE su.submitid = %i """, [submitid], options=["single", "list"]) # Sanity check if query and userid in staff.MODS and anyway == "true": pass elif not query or "h" in query[8]: raise WeasylError("submissionRecordMissing") elif query[7] > rating and ( (userid != query[0] and userid not in staff.MODS) or d.is_sfw_mode()): raise WeasylError("RatingExceeded") elif "f" in query[8] and not frienduser.check(userid, query[0]): raise WeasylError("FriendsOnly") elif ignore and ignoreuser.check(userid, query[0]): raise WeasylError("UserIgnored") elif ignore and blocktag.check(userid, submitid=submitid): raise WeasylError("TagBlocked") # Get submission filename submitfile = media.get_submission_media(submitid).get( 'submission', [None])[0] # Get submission text if submitfile and submitfile['file_type'] in ['txt', 'htm']: submittext = files.read(submitfile['full_file_path']) else: submittext = None embedlink = d.text_first_line(query[5]) if "v" in query[8] else None google_doc_embed = None if 'D' in query[8]: db = d.connect() gde = d.meta.tables['google_doc_embeds'] q = (sa.select([gde.c.embed_url]).where(gde.c.submitid == submitid)) results = db.execute(q).fetchall() if not results: raise WeasylError("can't find embed information") google_doc_embed = results[0] tags, artist_tags = searchtag.select_with_artist_tags(submitid) settings = d.get_profile_settings(query[0]) return { "submitid": submitid, "userid": query[0], "username": query[1], "folderid": query[2], "unixtime": query[3], "title": query[4], "content": (d.text_first_line(query[5], strip=True) if "v" in query[8] else query[5]), "subtype": query[6], "rating": query[7], "settings": query[8], "page_views": (query[9] + 1 if d.common_view_content( userid, 0 if anyway == "true" else submitid, "submit") else query[9]), "fave_count": d.execute( "SELECT COUNT(*) FROM favorite WHERE (targetid, type) = (%i, 's')", [submitid], ["element"]), "mine": userid == query[0], "reported": report.check(submitid=submitid), "favorited": favorite.check(userid, submitid=submitid), "friends_only": "f" in query[8], "hidden_submission": "h" in query[8], "collectors": collection.find_owners(submitid), "no_request": not settings.allow_collection_requests, "text": submittext, "sub_media": media.get_submission_media(submitid), "user_media": media.get_user_media(query[0]), "submit": submitfile, "embedlink": embedlink, "embed": embed.html(embedlink) if embedlink is not None else None, "google_doc_embed": google_doc_embed, "tags": tags, "artist_tags": artist_tags, "removable_tags": searchtag.removable_tags(userid, query[0], tags, artist_tags), "can_remove_tags": searchtag.can_remove_tags(userid, query[0]), "folder_more": select_near(userid, rating, 1, query[0], query[2], submitid), "folder_title": query[12] if query[12] else "Root", "comments": comment.select(userid, submitid=submitid), }
import files import discord import time import os bot = discord.Client() # load the Token into a variable / ask for it try: token = files.read("data/token.pkl") except FileNotFoundError: token = input("INPUT TOKEN:") # the writing of the token.pkl file does not automatically generate a data folder # this creates it if it is missing try: files.write("data/token.pkl", token) except FileNotFoundError: os.system("mkdir data") files.write("data/token.pkl", token) # TODO # nothing rn @bot.event async def on_ready(): t = time.localtime() current_time = time.strftime("[%H:%M]", t) print(f"{current_time}:login successful as {bot.user}")
def GET(self): return files.read()
def read(filename): tmp = files.read(filename) if tmp is None and files.copy(filename): tmp = files.read(filename) return tmp
def test_read_file_contents(self): self.assertEquals( files.read('test.txt'), """Beautiful is better than ugly. Explicit is better than implicit.""") self.assertEquals(len(files.read('zen.txt')), 856)
def files_list_view(self): if not 'password' in self.request.POST and self.check(): print self.request.POST return HTTPFound(location='/login') url = USB_LOCATION + get_url(self.request.matchdict['list']) url_parsed = '/' for i in range(len(url.split('/')) - 3): url_parsed += url.split('/')[i + 1] + '/' action = url.split('/')[-2] filename = str(url_parsed.split('/')[-2]) if 'password' in self.request.POST: if self.request.POST['password'] != '': password = self.request.POST['password'] # print password content = encrypt(self.request.POST['notecontent'], password) write('/'.join(url.split('/')[:-2]), content) return HTTPFound(location='/files' + url[:-1]) # return Response() elif 'file' in self.request.POST: filename = self.request.POST['file'].filename print filename input_file = self.request.POST['file'].file upload(input_file, url, filename) print '/files' + url return HTTPFound(location='/files' + url) elif 'dir_name' in self.request.POST: dirname = self.request.POST['dir_name'] make_dir(dirname, url) return HTTPFound(location='/files' + url) elif 'note_name' in self.request.POST: write(url + self.request.POST['note_name'], '') return HTTPFound(location='/files' + url) elif 'notecontent' in self.request.POST: content = encrypt(self.request.POST['notecontent'], decrypt(self.request.session['enpass'], self.request.cookies['r'])) write('/'.join(url.split('/')[:-2]), content) return HTTPFound(location='/files' + url) elif action == 'edit': content = decrypt(read(url_parsed[:-1]), decrypt(self.request.session['enpass'], self.request.cookies['r'])) re = { 'page_title' : 'Notes', 'edit' : True, 'contents' : content, 'url' : url } re.update(self.get_header()) return re elif action == 'rename': # file_old = str(url_parsed.split('/')[-3]) file_old = '/'.join(url_parsed.split('/')[:-2]) if not is_file('/'.join(url_parsed.split('/')[:-2])) and not is_dir('/'.join(url_parsed.split('/')[:-2])): print 'not filename' return HTTPFound(location='/files') rename(file_old, filename) return HTTPFound('/files' + '/'.join(url.split('/')[:-4])) elif is_file(url_parsed[:-1]): if action == 'download': re = FileResponse(url_parsed[:-1]) re.headers['Content-Disposition'] = 'attachment; filename="' + filename + '"' re.headers['Content-Type'] = 'application/force-download' # re.headers['filename'] = filename re.headers['Accept-Ranges'] = 'bytes' return re elif action == 'delete': delete(url_parsed[:-1]) return HTTPFound(self.request.url[:-7-len(filename.replace(' ', '%20'))]) elif is_dir(url[:-7]) and action == 'delete': delete(url[:-7]) return HTTPFound(self.request.url[:-7-len(filename)]) elif not is_dir(url) or len(url.split('/')) < 5: return HTTPFound(location='/files') temp = [str(part) for part in list_dir(url)] temp.sort(lambda x, y: cmp(x.lower(),y.lower())) item_list = [ { 'url' : '/files/' + url[1:] + part if is_dir(url + part) else '/files/' + url[1:] + part + '/download', 'url_ren' : '/files/' + url[1:] + part, 'url_del' : '/files/' + url[1:] + part + '/delete', 'name' : part, 'is_file' : is_file(url + part), 'size' : size(url + part) } for part in temp ] re = {'page_title': LINKS['files_list']['title'], 'list' : item_list, 'up_dir' : '/files/' + url_parsed[1:], 'url' : url, 'edit' : False } re.update(self.get_header()) return re
def readwordlist(self): return files.read(os.path.dirname(os.path.realpath(__file__)) + '/words.txt')