def fetchMedia(self): f = Fetch(self.mediaUrl) if f.connected: self.media = f.site self.mediaData = self.media.read() self.urlinfo = self.media.info() self.fetched = True
def load(self, loadUrl): o = urlparse(loadUrl) print o.scheme print o.port print o o = urlsplit(loadUrl) print o.geturl() reader = Fetch() htmlFile = reader.get(loadUrl) soup = BeautifulSoup(htmlFile) form = soup.find('form') print form print form['action'] print form['method'] #print form['onsubmit'] if 'action' in form: print form['action'] if 'method' in form: print form['method'] if 'onsubmit' in form: print form['onsubmit'] data = {"username": "******", "password": "******"} response = reader.post('https://210.154.183.61:4443/mantis/login.php', data) print response
def __init__(self, auth:str, logger=logging.getLogger()): self.auth = auth self.logger = logger self.fetch = Fetch(auth, logger) self.user = self.fetch.fetch("/v1/me") self.userId = self.user["id"] self.imageUrl = self.user["images"][0]["url"] self.market = self.user["country"] logger.debug("Initialized Spotify: {0} @ {1}".format(self.userId, self.market)) self.fetch.fetch("/v1/me/tracks", limit=1, market=self.market)
def __init__(self): self.config = { "max_result": 1000, "topic_score": 10.0, "threshold": 1.0, "base_url": "http://export.arxiv.org/api/query?", } self.config = Bunch(self.config) self.fetch = Fetch(self.config)
def subscribe_to_feed(self, feed_url): feed_id = db.get_feed_id(feed_url) if feed_id < 0: feed_fetch = Fetch(feed_url) feed_id = db.create_new_feed(feed_fetch.get_feed_details()) db.add_new_item(feed_fetch.get_entries()) user_feed_id = db.add_user_to_feed(self.uid, feed_id) return feed_id
def execute(): """Run script every X minutes. """ # Get google api credentials. my_creds = Creds() # Construct a service for interacting with sheets api. service = build('sheets', 'v4', credentials=my_creds.creds) print(CONSOLE_COLOR_START_DONE + f'{datetime.now()} running script' + CONSOLE_ENDC) main_start = timer() # Create new spreadsheet. start = timer() print('creating spreadsheet...') sheet = Spreadsheet(service, f'everyday_sheet{datetime.now()}') print(CONSOLE_COLOR + f'new sheet created in {round(timer() - start, 3)}s' + CONSOLE_ENDC) # Fetch data from all data sources defined in json file. fetcher = Fetch('reddit_inputs.json') start = timer() print('fetching data...') data = fetcher.fetch_all() print(CONSOLE_COLOR + f'data fetched in {round(timer() - start, 3)}s' + CONSOLE_ENDC) # Convert data into format needed for spreadsheet. sheet.convert_data(data) # Insert data into spreadsheet. start = timer() print('inserting data into spreadsheet...') sheet.insert_data() print(CONSOLE_COLOR + f'data inserted into a sheet in {round(timer() - start, 3)}s' + CONSOLE_ENDC) # Adjust columns size. sheet.set_col_size() print(CONSOLE_COLOR + f'elapsed time since start {round(timer() - main_start, 3)}s' + CONSOLE_ENDC) print('added ', datetime.now()) print(CONSOLE_COLOR_START_DONE + f'{datetime.now()} script finished' + CONSOLE_ENDC) return f'https://docs.google.com/spreadsheets/d/{sheet.sheet_id}/edit#gid=0'
def set_wallpaper(self, img_path): if system == "Linux": if "~" in img_path: img_path = img_path.replace("~", HOME) if img_path == ".": img_path = start_path if os.path.isfile(img_path): self.linux_wallpaper(img_path) elif os.path.isfile(join(start_path, img_path)): self.linux_wallpaper(join(start_path, img_path)) elif os.path.isdir(img_path): Fetch().custom_folder([img_path]) elif os.path.isdir(join(start_path, img_path)): Fetch().custom_folder([join(start_path, img_path)]) else: print( f"{colors['red']}Error, file path not recognized{colors['normal']}" ) else: print( f"{colors['red']}Sorry, your system is not supported yet.{colors['normal']}" )
def main_menu(self, message=message): refresh(message) choice = input(f"""{colors['green']} Welcome to Redpaper. This is a TUI used to control the underlying Redpaper program. Select an option:\n{colors['normal']} {colors['red']} 1 {colors['normal']}: {colors['blue']} Download wallpapers {colors['normal']} \n {colors['red']} 2 {colors['normal']}: {colors['blue']} Next wallpaper{colors['normal']}\n {colors['red']} 3 {colors['normal']}: {colors['blue']} Previous wallpaper{colors['normal']}\n {colors['red']} 4 {colors['normal']}: {colors['blue']} Settings{colors['normal']}\n {colors['red']} 5 {colors['normal']}: {colors['blue']} Help {colors['normal']}\n {colors['red']} x {colors['normal']}: {colors['blue']} exit {colors['normal']}\n >>> """) if choice == "1": refresh(message) Fetch().wall_dl() elif choice == "2": message = f"{colors['green']} Changed wallpaper {colors['normal']}\n" refresh(message) img_path = WallSet().sequetial(0) WallSet().set_wallpaper(img_path) self.main_menu() elif choice == "3": message = f"{colors['green']} Changed wallpaper {colors['normal']}\n" refresh(message) img_path = WallSet().sequetial(1) WallSet().set_wallpaper(img_path) self.main_menu() elif choice == "4": message = "" Settings().main_settings() elif choice == "5": # TODO: create a help page message = "HELP\n" refresh(message) print(f""" {colors['green']}You can check the wiki for help: https://github.com/keystroke3/redpaper/wiki{colors['normal']}""" ) self.main_menu() elif choice == "x" or choice == "X": clear() else: Home().main_menu()
def sequetial(self, go_back): try: with open(wall_data_file, encoding="utf-8") as data: saved_walls = json.load(data) except (FileNotFoundError, ValueError): Fetch().wall_dl() """chooses the wallpaper in the order in which they were downloaded""" with open(wall_data_file, "r") as data: saved_walls = json.load(data) with open("point.pickle", "rb+") as wall_point: # selection_point stores the value of the current wallpaper # it is necessary so that wallpapers don't repeat selection_point = pickle.load(wall_point) if selection_point > len(saved_walls): selection_point = 1 elif selection_point == len(saved_walls) and go_back == 1: selection_point -= 1 elif selection_point == len(saved_walls) and go_back == 0: selection_point = 1 elif (selection_point < len(saved_walls) and selection_point != 1 and go_back == 1): selection_point -= 1 elif selection_point < len(saved_walls) and go_back == 0: selection_point += 1 elif (selection_point < len(saved_walls) and selection_point == 1 and go_back == 0): selection_point += 1 elif (selection_point < len(saved_walls) and selection_point == 1 and go_back == 1): selection_point = len(saved_walls) elif (selection_point < len(saved_walls) and selection_point == 0 and go_back == 0): selection_point = 1 elif (selection_point < len(saved_walls) and selection_point == 0 and go_back == 1): selection_point = len(saved_walls) img_name = str(saved_walls.get(str(selection_point))) # the new value of selection point is stored for the next run print(f"selection point is {selection_point}") with open("point.pickle", "wb") as point: pickle.dump(selection_point, point) return join(pictures, str(img_name))
def parseFeed(self): print "Start to fetch and parse Feed list" seed = self.seed f = Fetch(seed.prefixurl, seed.charset, self.seed.timeout) if f.isReady(): feed = feedparser.parse(f.read()) items = feed["entries"] if len(items) > 0: for item in items: _item = Item({"url": item["link"], "type": self.seed_type}) if self.guid_rule is None: self.guid_rule = "url" guid = self.getItemGUID(item) self.items[guid] = _item print "List has finished parsing. It has %s docs." % ansicolor.red( self.__len__())
def fetchListPages(self, listtype="html"): print "Start to fetch and parse List" urls = self.listRule.getListUrls() for url in urls: print "Fetching list page: ", url, "charset:", safestr( self.seed["charset"]), "timeout:", safestr( self.seed["timeout"]) f = Fetch(url, charset=self.seed["charset"], timeout=self.seed["timeout"]) if f.isReady(): doc = f.read() if listtype == "html": self.parseListPage(f, doc, url) elif listtype == "json": self.parseJsonPage(f, doc, url) print "List has finished parsing. It has %s docs." % ansicolor.red( self.__len__())
async def on_message(self, message): channel = message.channel channel.id = 842475416160698379 if message.content.startswith('tag everyone'): self.members_to_ping = Fetch().return_data() print(self.members_to_ping) self.ids, self.members_to_tag = [], [] for member in self.members: if member[2] in self.members_to_ping or member[ 0] in self.members_to_ping: self.ids.append(member[1]) self.members_to_tag.append(member[2] if member[2] in self. members_to_ping else member[0]) print(self.members_to_tag) s = '' for members in self.ids: s += '<@{}>'.format( members) if members == self.ids[-1] else '<@{}>,'.format( members) await channel.send("{} Please report for your shift".format(s))
def __init__(self, item, seed): ''' document base url ''' self.url = item["url"] self.data = item self.seed = seed item["tags"] = ",".join(self.seed.tags) #文章采集规则 self.articleRule = seed.getRule().getArticleRule() print "Document %s is fetcing" % ansicolor.green(self.url) firstContent = Fetch(self.url, charset=seed["charset"], timeout=seed["timeout"]).read() if firstContent: self.parseDocument(firstContent)
def load(self, loadUrl): o = urlparse(loadUrl) print o.scheme print o.port print o o = urlsplit(loadUrl) print o.geturl() reader = Fetch() htmlFile = reader.get(loadUrl) soup = BeautifulSoup(htmlFile) form = soup.find('form') print form print form['action'] print form['method'] #print form['onsubmit'] if 'action' in form: print form['action'] if 'method' in form: print form['method'] if 'onsubmit' in form: print form['onsubmit']
async def on_ready(self): self.members = [] self.required_members = [] # for guild in self.guilds: # for channel in guild.channels: # print(channel.id,channel.name) for guild in self.guilds: data = guild.members for x in data: if x.name == 'REAPER': print(x.name, x.id, x.nick) self.members.append([x.name, x.id, x.nick]) print('Logged on as {0}!'.format(self.user)) # await asyncio.sleep(60 * 50 * 2) self.members_to_ping = Fetch().return_data() print(self.members_to_ping) self.ids, self.members_to_tag = [], [] for member in self.members: if member[2] in self.members_to_ping or member[ 1] in self.members_to_ping: self.ids.append(member[1]) self.members_to_tag.append(member[2])
import rospy import time import math import cv2 import tf from sensor_msgs.msg import Image from ar_track_alvar_msgs.msg import AlvarMarkers from geometry_msgs.msg import PoseStamped from tf.transformations import euler_from_quaternion, quaternion_from_euler from fetch import Fetch if ((__name__ == "__main__") and (not rospy.is_shutdown())): rospy.init_node("fetch_builder",anonymous=True) Fetch_Robot = Fetch() rospy.loginfo("Initialization") Fetch_Robot.Head.look_at(0.7,0,0.5,"base_link") rospy.loginfo("Till head") Fetch_Robot.Gripper.Open() rospy.loginfo("Gripper Open") Fetch_Robot.Arm.Tuck() rospy.loginfo("Tuck Arm") rospy.sleep(rospy.Duration(2)) #Take images RGB_image = Fetch_Robot.GetRGBImage() rospy.loginfo("Get RGB image")
def main(args=None): parser = argparse.ArgumentParser(description='Simple training script.') parser.add_argument('--cls_id', help='class id', type=int) parser.add_argument('--version', help='model version', type=float) parser.add_argument('--gamma', help='gamma for the SoftL1Loss', type=float, default=9.0) parser.add_argument('--lr', help='lr for optimization', type=float, default=1e-4) parser.add_argument('--epoches', help='num of epoches for optimization', type=int, default=4) parser.add_argument('--resume_epoch', help='trained model for resume', type=int, default=0) parser.add_argument('--batch_size', help='batch size for optimization', type=int, default=10) parser.add_argument('--checkpoints', help='checkpoints path', type=str, default='voc_checkpoints') parser = parser.parse_args(args) cls_name = classes[parser.cls_id] parser.checkpoints = '_'.join([parser.checkpoints,cls_name]) if not os.path.isdir(parser.checkpoints): os.mkdir(parser.checkpoints) print('will save checkpoints in '+parser.checkpoints) cls_dir = "../context_profile/voc_detection_{:s}_p10/"\ .format(cls_name) batch_size = parser.batch_size print('[data prepare]....') dataloader_train = DataLoader(Fetch('train_benign', root_dir=cls_dir), batch_size=batch_size, num_workers=2, shuffle=True) print('[model prepare]....') use_gpu = torch.cuda.device_count()>0 model = AutoEncoder(parser.gamma) if use_gpu: model = torch.nn.DataParallel(model).cuda() optimizer = torch.optim.Adam(model.parameters(), lr=parser.lr) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=2, verbose=True) if parser.resume_epoch > 0 : checkpoint_name = os.path.join(parser.checkpoints, 'model_{:1.1f}_epoch{:d}.pt'.format(parser.version, parser.resume_epoch)) if not os.path.isfile(checkpoint_name): raise ValueError('No checkpoint file {:s}'.format(checkpoint_name)) model.load_state_dict(torch.load(checkpoint_name)) print('model loaded from {:s}'.format(checkpoint_name)) print('[model training]...') loss_hist = [] epoch_loss = [] num_iter = len(dataloader_train) for epoch_num in range(parser.resume_epoch, parser.epoches): model.train() for iter_num, sample in enumerate(dataloader_train): if True:#try: optimizer.zero_grad() if use_gpu: data = sample['data'].cuda().float() else: data = sample['data'].float() loss = model(data).mean() if bool(loss==0): continue loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1) optimizer.step() epoch_loss.append(float(loss)) loss_hist.append(float(loss)) if iter_num % 30 == 0: print('Epoch {:d}/{:d} | Iteration: {:d}/{:d} | loss: {:1.5f}'.format( epoch_num+1, parser.epoches, iter_num+1, num_iter, float(loss))) if iter_num % 3000 == 0: scheduler.step(np.mean(epoch_loss)) epoch_loss = [] if epoch_num < 1: continue checkpoint_name = os.path.join(parser.checkpoints, 'model_{:1.1f}_epoch{:d}.pt'.format(parser.version, epoch_num+1)) torch.save(model.state_dict(), checkpoint_name) print('Model saved as {:s}'.format(checkpoint_name)) np.save('loss_hist.npy', loss_hist)
def coinpaprika(): """Return coinpaprika json data. """ fetcher = Fetch() data = fetcher.fetch_coinpaprika() return jsonify(data)
def newsapi(topic): """Return newsapi json data according to topic. """ fetcher = Fetch() data = fetcher.fetch_newsapi(topic) return jsonify(data)
def get_preds(args=None): parser = argparse.ArgumentParser(description='Simple testing script.') parser.add_argument('--cls_id', help='class id', type=int) parser.add_argument('--version', help='model version', type=float) parser.add_argument('--resume_epoch', help='trained model for resume', type=int) parser.add_argument('--set_name', help='imply attack goal', type=str, default='test_digi_ifgsm_hiding') parser.add_argument('--gamma', help='gamma for the SoftL1Loss', type=float, default=9.0) parser.add_argument('--checkpoints', help='checkpoints path', type=str, default='voc_checkpoints') parser.add_argument('--saves_dir', help='the save path for tested reconstruction error', type=str, default='voc_reconstruction_error') parser.add_argument('--batch_size', help='batch size for optimization', type=int, default=1) parser = parser.parse_args(args) batch_size = parser.batch_size if not os.path.isdir(parser.saves_dir): os.mkdir(parser.saves_dir) cls_name = classes[parser.cls_id] parser.checkpoints = '_'.join([parser.checkpoints, cls_name]) checkpoint_name = os.path.join( parser.checkpoints, 'model_{:1.1f}_epoch{:d}.pt'.format(parser.version, parser.resume_epoch)) if not os.path.isfile(checkpoint_name): raise ValueError('No checkpoint file {:s}'.format(checkpoint_name)) assert batch_size == 1 print('[data prepare]....') cls_dir = "../context_profile/voc_detection_{:s}_p10/"\ .format(cls_name) dataloader_test = DataLoader(Fetch(parser.set_name, root_dir=cls_dir), batch_size=batch_size, num_workers=1, shuffle=False) print('[model prepare]....') use_gpu = torch.cuda.device_count() > 0 model = AutoEncoder(parser.gamma) if use_gpu: model = torch.nn.DataParallel(model).cuda() model.load_state_dict(torch.load(checkpoint_name)) print('model loaded from {:s}'.format(checkpoint_name)) print('[model testing]...') model.eval() preds = [] with torch.no_grad(): for sample in iter(dataloader_test): if use_gpu: data = sample['data'].cuda().float() else: data = sample['data'].float() loss = model(data) preds.append(float(loss)) preds_name = '_model{:1.1f}_' + parser.set_name save_name = os.path.join(parser.saves_dir, cls_name + preds_name.format(parser.version)) np.save(save_name, preds) print('save preds in {:s}'.format(save_name))
def reddit(): """Return reddit json data. """ fetcher = Fetch('reddit_inputs.json') data = fetcher.fetch_reddit() return jsonify(data)
def get_data(url): req_obj = Fetch(url) ret_data = req_obj.get_data() if ret_data is not None: return json.loads(ret_data)
def parseDocument(self, doc): doc = pq(doc) wrapparent = self.articleRule.wrapparent pageparent = self.articleRule.pageparent content_re = "" #子页面url urls = [] #文本数据内容 content = "" article = doc.find(wrapparent) #pages if pageparent: urls = self.parsePage(article, pageparent) #need title, tags extrarules = self.articleRule.extrarules #只有文章是有content #TODO: 这里目前缺失一些特性 if len(extrarules): for key, rule, fetch_all, page_type in extrarules: field = Field(name=key, rule=rule) value = getElementData(doc, rule, self.data["images"], fetch_all) self.data[field.get('name')] = field if self.is_article_content(field): content_re = field.get("rule") content = value elif self.is_gallery_content(field): content_re = field.get("rule") content = [] if (isinstance(value, list)): content += value else: field.value = value #采集分页内容 if len(urls) > 0 and content_re: for next_url in urls: next_page = Fetch(next_url, charset=self.seed["charset"], timeout=self.seed["timeout"]).read() if next_page is not None: next_page = self._getContent(next_page, wrapparent, content_re) if next_page: if isinstance(content, list): content.append(next_page) else: content += next_page if content and content_re: if isinstance(content, list): self.data['content'].value = content self.data['images'] += content else: content = Readability(content, self.url, self.articleRule.filters) images = content.getImages() self.data['content'].value = content.getContent() self.data['images'] += images
lst.write('E', cur.regE) lst.write('M', cur.regM) lst.write('W', cur, regW) mem = Memory() InsCode = {} Init(InsCode, mem) reg = Register() pipereg = PipeRegister() tmp_pipereg = PipeRegister() CC = ConditionCode() Stat = Status() PC = 0 while Stat.stat == 'AOK': print 'Current Time:', PC tmp_pipereg = PipeRegister() Fetch(tmp_pipereg, InsCode[hex(PC)], PC) Decode(pipereg, tmp_pipereg, reg) Execute(pipereg, tmp_pipereg) Memory(pipereg, tmp_pipereg) WriteBack(pipereg, tmp_pipereg) PC = pipereg.regF['predPC'] Update(cur=tmp_pipereg, lst=pipireg) print 'RegF:', reg.regF print 'RegD:', reg.regD print 'RegE:', reg.regE print 'RegM:', reg.regM print 'RegW:', reg.regW
from fetch import Fetch from parsing import Parser, idGenerator from flask import Flask, Response, json, request from database import addDB, updateDB, removeDBbyCID, removeDBbyURL, getDBbyCID, getDBbyURL, getAllDB import json import re # Todo User System. # Todo 2 Database Implentation. # Todo 3 on Wrong RSS remove added URL and send back response. # Todo 4 If URL already exists reject # Todo 5 Proper Err msging and on resp newFetcher = Fetch() app = Flask(__name__) def checkURL(url): match = re.match(r'http[s]?://.*', url) if match: return True else: return False @app.route("/", methods=['GET']) def index(): r = {'success': True, 'msg': "Server Index Point ..."} resp = Response(json.dumps(r)) resp.headers['Content-Type'] = 'application/json'
import datetime as dt from fetch import Fetch from display import Display import pprint if __name__ == '__main__': # Performs queries: fetchy = Fetch() #fetchy.fetchTweets("prayforsyria", 1000, begindate=dt.date(2011,1,21), enddate=dt.date.today()) # Defines words that we want to use in the visualization. All other words will be ignored: chosenWords = [ "Syria", "people", "you", "all", "no", "we", "world", "will", "about", "innocent", "Allah", "paz", "just", "pray", "heart", "Syrian", "mundo", "children", "who", "one", "don't", "please", "help", "Pray", "more", "like", "apoyo", "oración", "#Aleppo", "going", "war", "God", "uno", "need", "prayers", "ayuno", "happening", "killed", "being", "peace", "sad", "NO", "know", "now", "because", "stop", "many", "everyone", "live", "unimos", "really", "lives", "orar", "much", "love", "suffering", "protect", "guerra", "morning", "even", "todos", "hope", "country", "#LLAP", "forget", "never", "over", "every", "still", "brothers", "think", "llamado", "ayunar", "human", "time", "feel", "praying", "keep", "bomb", "bombing", "kids", "Virgen", "today", "Assad", "better", "diligencia", "killing", "breaks", "trabajo", "video", "life", "firmemente", "rezar", "where", "lost" "kill", "pidiendo", "humanity", "deserve", "always", "gente", "little", "take", "safe", "end", "say", "sisters", "doesn't", "any", "dying", "dead", "after", "things", "good", "Trump", "some", "child", "needs", "heartbreaking", "other", "Dios", "care", "Syrians", "US", "personas", "news", "civilians", "não", "against", "inocentes", "unidos", "media",
from network import Network from fetch import Fetch co = Network() co.send("Test\n") num = co.receive() dataFetching = Fetch(num) data = co.receive() print(dataFetching.fetchMessage(data))
# App Modules from fetch import Fetch from processor import Processor ## Load ENV load_dotenv() isDev = os.getenv('FLASK_ENV') == 'development' accepted = '*' if isDev else 'https://wilsonj806.github.io/*' app = Flask(__name__) app.config['CORS_HEADERS'] = 'Content-Type' CORS(app, origins=accepted) fetcher = Fetch() @app.route('/') def initial_ping(): return """NYC Tree Data Fetcher; Version: 0.0.0""" @app.route('/data') def fetch_all(): json = fetcher.check_cache() return {'_data_length': len(json), 'data': json} @app.route('/data/count') def count_per_boro():
#import sys #sys.path.append('/home/ubuntu/projects/financial') from fetch import Fetch fetch = Fetch() params = [('sc', 431)] stocklist = fetch.fetch_stocks(params) print stocklist
def main(): parser = argparse.ArgumentParser( description="""This is a simple program that allows you to change you desktop wallpaper. It fetches the best wallpapers from Reddit and sets one as the wallpaper.""") parser.add_argument("-d", "--download", action="store_true", help="Downloads new wallpapers") parser.add_argument( "-c", "--change", action="store_true", help="sets a wallpaper without downloading new ones", ) parser.add_argument( "-a", "--all", action="store_true", help="Download new wallpapers and set one of them", ) parser.add_argument("-l", "--limit", help="Number of wallpapers to look for. Default = 1") parser.add_argument( "-p", "--path", metavar="PATH", help="Sets the download location for new wallpapers\n" "The img_path has to be in quotes", ) parser.add_argument("-i", "--image", help="Sets a user specified image as wallpaper.\n") parser.add_argument("-r", "--sub", help="Sets a user specified subreddit(s) as source.\n") parser.add_argument( "-f", "--folder", help="Uses images stored in the specified folders\n" "Path has to be in quotes", ) parser.add_argument("-s", "--settings", action="store_true", help="change settings permanently") parser.add_argument("-b", "--back", action="store_true", help="Sets the previous image as wallpaper") # args = parser.parse_args() args, unknown = parser.parse_known_args() if not len(sys.argv) > 1: Home().main_menu() if args.settings: if args.path: Settings().change_dl_path(args.path, True) return elif args.limit: Settings().max_dl_choice(args.limit, True) return elif args.sub: Settings().change_subs(args.sub, True) return else: print("No option selected or selection not understood") return if args.download: sub_list = "" if args.sub: sub_list = args.sub if args.limit: Fetch().d_limit = int(args.limit) Fetch(sub_list).wall_dl() elif args.change: if args.back: img_path = WallSet().sequetial(1) WallSet().set_wallpaper(img_path) else: img_path = WallSet().sequetial(0) WallSet().set_wallpaper(img_path) elif args.image: WallSet().set_wallpaper(args.image) elif args.folder: Fetch().custom_folder(args.folder[0]) args.change elif args.all: img_path = WallSet().sequetial(0) if args.limit: Fetch().d_limit = int(args.limit) Fetch().wall_dl() WallSet().set_wallpaper(img_path) elif args.back: img_path = WallSet().sequetial(1) WallSet().set_wallpaper(img_path) elif unknown: if len(unknown) == 1: WallSet().set_wallpaper(unknown[0]) else: Fetch().custom_folder(unknown) args.change