Exemple #1
0
def template_function(vars_dict):
    """
    :param vars_dict: All scraping functions are passed vars_dict, which contains all variables needed for operation
    :return: Nothing, passes the documents to lib.archive_engine()
    """
    lib.print_status("Starting <enter service name> run")
    # Connect to the archive page of the service and create a soup object
    template_page = lib.connect("https://templatebin.com/archive")
    template_soup = BeautifulSoup(template_page.text, 'html.parser')
    # parse the archive page to get links to individual documents.
    # The actual code here will vary depending on the HTML of your target service
    table = template_soup.find("table", attrs={'class': 'table_of_documents'})
    parameters = [a['href'] for a in table.findAll('a', href=True)]
    # Loop through each parameter and get the document:
    for param in parameters:
        # connect to document and make a soup object:
        document_page = lib.connect(f"https://templatebin.com/{param}")
        document_soup = BeautifulSoup(document_page.text, 'html.parser')
        # Do whatever html work (if any) you need to get the raw text.
        # If it's just in a <pre> tag, you can simple do str(document_soup)
        unprocessed = document_soup.find('textarea').contents[0]
        # the indentifer is used to name the file:
        identifier = f"service_name-{param}"
        # Pass the text to lib.archive_engine() for matching and saving:
        lib.archive_engine(unprocessed, identifier, vars_dict)
        # and wait for the amount of time specified by limiter:
        sleep(vars_dict['limiter'])
Exemple #2
0
def slexy(vars_dict):
    """
    Scraping function for slexy. This one is almost identical to ix.io, with the exception of having some
    tables to dig through. It also has a heavier rate limit, so a minimum limiter is enforced

    :param vars_dict: dict of necessary variables returned from config()
    :return: nothing
    """
    lib.print_status("Starting slexy run...")
    # Connect to archive and get parameters for individual documents
    soup = BeautifulSoup(
        lib.connect("https://slexy.org/recent", verify_ssl=False).text,
        'html.parser')
    table = soup.find("table", attrs={'id': "recent_pastes"})
    parameters = set([a['href'] for a in table.findAll('a', href=True)])
    # Loop through parameters
    for param in parameters:
        # Connect and fetch the raw text
        document_soup = BeautifulSoup(
            lib.connect(f'https://slexy.org{param}', verify_ssl=False).text,
            'html.parser')
        document_table = document_soup.findAll("table")
        raw_parameter = [
            a['href'] for a in document_table[1].findAll('a', href=True)
            if 'raw' in a['href']
        ][0]
        unprocessed = BeautifulSoup(
            lib.connect(f'https://slexy.org{raw_parameter}',
                        verify_ssl=False).text, 'html.parser')
        # Pass to archive engine
        # We remove the /view/ from the param for file naming purposes
        identifier = f'slexy-{param.split("/view/")[1]}'
        lib.archive_engine(str(unprocessed), identifier, vars_dict)
        sleep(5) if vars_dict['limiter'] < 5 else sleep(vars_dict['limiter'])
    lib.print_success("All slexy pastes processed.")
Exemple #3
0
def pastebin(vars_dict):
    """
    This function fetches the pastebin archive and all the pastes in it. It passes them to archive_engine(),
    then sleeps per the time specified by vars_dict['cooldown']

    :param vars_dict: dict of necessary variables returned from config()
    :return: Nothing
    """
    # Fetch the pastebin public archive
    lib.print_status(f"Starting pastebin run...")
    arch_page = lib.connect("https://pastebin.com/archive")
    arch_soup = BeautifulSoup(arch_page.text, 'html.parser')
    sleep(2)
    # Parse the archive HTML to get the individual document URLs
    table = arch_soup.find("table", attrs={'class': "maintable"})
    parameters = [
        a['href'] for a in table.findAll('a', href=True)
        if 'archive' not in a['href']
    ]
    # For each paste listed, connect and pass the text to archive_engine()
    for param in parameters:
        param = param[1:]  # removes the leading forward slash
        document_page = lib.connect(f"https://pastebin.com/{param}")
        document_soup = BeautifulSoup(document_page.text, 'html.parser')
        # Fetch the raw text and pass to archive_engine()
        unprocessed = document_soup.find('textarea').contents[0]
        identifier = f'pastebin-{param}'
        lib.archive_engine(unprocessed, identifier, vars_dict)
        sleep(vars_dict['limiter'])
    lib.print_success("All pastebin pastes processed.")
def main(server_name, image_name_prefix):
    lib.connect(config.RS_USER, config.RS_KEY)
    servers = lib.server_list()
    for server in servers:
        if server['name'].startswith(server_name):
            image_name = '%s%s' % (image_name_prefix, server['name'])
            logging.info('Creating image from server %s with name %s' %
                    (server['name'], image_name))
            data = lib.image_create(server, image_name_prefix)
            logging.info('  Id of new image: %d' % (data['id'],))
def main(image_id, flavor_id, server_name):
    lib.connect(config.RS_USER, config.RS_KEY)
    image = lib.image_details({'id': int(image_id)})
    logging.info('Creating server from image %s' % (image['name'],))
    flavor = lib.flavor_details({'id': int(flavor_id)})
    logging.info('Creating server from flavor %s' % (flavor['name'],))
    data = lib.server_create(image, flavor, server_name)
    logging.info('  Id of new server: %d' % (data['id'],))
    logging.info('  Admin pass (IMPORTANT): %s' % (data['adminPass'],))
    logging.info('  Public IP: %s' % (', '.join(data['addresses']['public']),))
    logging.info('  Private IP: %s' % (', '.join(data['addresses']['private']),))
Exemple #6
0
def ixio(vars_dict):
    """
    This is the scraping function for ix.io. It works very similar to the pastebin() function,
    and fetches a list of documents from an archive, processes them, and cools down

    :param vars_dict: dict of necessary variables returned from config()
    :return: nothing
    """
    lib.print_status("Starting ix.io run...")
    # Connect to archive and gather individual document parameters
    soup = BeautifulSoup(lib.connect("http://ix.io/user/").text, 'html.parser')
    # The parameter is sanitized (has its leading and trailing forward slashes removed) during this comprehension
    parameters = set(
        [a['href'].replace('/', '') for a in soup.findAll('a', href=True)])
    # Loop through parameters and get raw text
    for param in parameters:
        document_soup = BeautifulSoup(
            lib.connect(f'http://ix.io/{param}').text, 'html.parser')
        # Pass raw text to archive engine
        identifier = f'ixio-{param}'
        lib.archive_engine(str(document_soup), identifier, vars_dict)
        sleep(vars_dict['limiter'])
    lib.print_success("All ix.io pastes processed.")
Exemple #7
0
    def do_connect(self, url):
        """usage: connect [URL]

        Connect to the given endpoint URL.
        """
        if self.url:
            self.do_disconnect(None)

        try:
            uuid = connect(url, SECRET_KEY)
        except Exception as e:
            print(f"{Fore.RED}Connection failed{Fore.RESET}: {e}")
            return

        available_commands = get_available_commands(url, SECRET_KEY)
        for key, value in available_commands.items():
            self.add_cmd(key, value)

        print(f"{Fore.GREEN}Connected to {uuid}{Fore.RESET}\n")
        self.prompt = f"{Fore.BLUE}sl{Fore.RESET} > "
        self.url = url
Exemple #8
0
def login():
    data = request.json or request.form
    username = data['username']
    passwd = data['passwd']
    domain = data.get('domain', 'google.com')
    server = data.get('server', 'talk.google.com')
    port = int(data.get('port', 5222))
    client = lib.connect(username, passwd, domain, server, port)
    if not client:
        data = {'status': 'nok'}
        return jsonify(data)

    client.last = datetime.datetime.now()
    token = lib.newtoken()
    clients[token] = client

    if not running:
        disconnect()

    data = {'status': 'ok',
            'token': token}
    return jsonify(data)
Exemple #9
0
def main():
    lib.connect(config.RS_USER, config.RS_KEY)
    images = lib.image_list()
    logging.info('id - name - status - progress')
    for image in images:
        logging.info(image)
Exemple #10
0
def main():
    lib.connect(config.RS_USER, config.RS_KEY)
    flavors = lib.flavor_list()
    for flavor in flavors:
        logging.info(flavor)
def main(server_id, image_name):
    lib.connect(config.RS_USER, config.RS_KEY)
    server = lib.server_details({'id': int(server_id)})
    logging.info('Creating image from server %s' % (server['name'],))
    data = lib.image_create(server, image_name)
    logging.info('  Id of new image: %d' % (data['id'],))
Exemple #12
0
def main():
    lib.connect(config.RS_USER, config.RS_KEY)
    servers = lib.server_list()
    for server in servers:
        logging.info(server)
Exemple #13
0
def main(server_id):
    lib.connect(config.RS_USER, config.RS_KEY)
    lib.server_delete({'id': int(server_id)})
Exemple #14
0
def main(image_id):
    lib.connect(config.RS_USER, config.RS_KEY)
    image = lib.image_details({'id': int(image_id)})
    logging.info(image)
Exemple #15
0
def main(image_id):
    lib.connect(config.RS_USER, config.RS_KEY)
    lib.image_delete({'id': int(image_id)})
Exemple #16
0
# First initialization
import webrepl_setup
from machine import reset
import ubinascii as binascii
import network
import lib
import webrepl

# Connect with a dynamic IP
lib.connect('dynamic', 'Jarinya', '19771977')

webrepl.start()

mac = binascii.hexlify(network.WLAN().config('mac'), ':').decode()
print('Controller MAC is: ' + mac)

# Cloud update config file for this MAC
lib.updateConfig(mac)

# Cloud populate tags
lib.popTags()

# Only need to set the networking once

# Station Config : Connect to the nearest AP
lib.roaming()

# AP Config
lib.setAP()

#import webrepl_setup
def should_act(exchange, margin_buy, margin_sell, please_skip=False, extra=""):
    currency = exchange[4:]
    pList = lib.returnTicker(forceCache=True)
    data = lib.tradeHistory(exchange, forceCache=True)
    balanceMap = lib.returnCompleteBalances(forceCache=True)

    strike = find_next(data)
    if strike:
        sell_price = strike * (1 + margin_sell)
        buy_price = strike * (1 - margin_buy)
    # if we can't find anything then we can go off our averages
    else:
        analyzed = lib.analyze(data)
        sortlist = sorted(data, key=lambda x: x['rate'])
        sell_price = analyzed['lowestBuy'] * (1 + margin_sell)
        buy_price = analyzed['lowestBuy'] * (1 - margin_buy)

    order = False

    market_low = pList[exchange]['highestBid']
    market_high = pList[exchange]['lowestAsk']
    buy_rate = pList[exchange]['highestBid'] + 0.00000001
    sell_rate = pList[exchange]['lowestAsk'] - 0.00000001

    graph = graph_make(buy_price, market_low, market_high, sell_price,
                       margin_buy, margin_sell)
    market_graphic = "{:.8f} {}{:.8f}{:.8f} {}{:.8f} {}".format(
        buy_price, ' ' if buy_price < buy_rate else '>', buy_rate, sell_rate,
        ' ' if sell_price > sell_rate else '>', sell_price, graph)

    if please_skip:
        lib.plog("{:5} {:6} {} {:4}".format(currency, '*SKIP*', market_graphic,
                                            extra))
        return False

    if buy_rate < buy_price:

        p = lib.connect()
        amount_to_trade = unit / buy_rate
        order = p.buy(exchange, buy_rate, amount_to_trade)
        rate = buy_rate
        trade_type = 'buy'

    elif sell_rate > sell_price:

        p = lib.connect()
        amount_to_trade = unit / sell_rate
        if amount_to_trade < balanceMap[currency]['available']:
            #try:
            order = p.sell(exchange, sell_rate, amount_to_trade)
            rate = sell_rate
            trade_type = 'sell'
            #except:
            #    lib.plog("{:9} Failed sell {:.8f} @ {:.8f} (bal: {:.8f})".format(exchange, amount_to_trade, buy_price, balanceMap[currency]['available']))

    else:
        lib.plog("{:5} {:6} {} {:4}".format(currency, "", market_graphic,
                                            extra))
        return False

    if order:
        lib.showTrade(order,
                      exchange,
                      source='bot',
                      trade_type=trade_type,
                      rate=rate,
                      amount=amount_to_trade,
                      doPrint=False)
        lib.plog("{:5} {:6} {}".format(currency, trade_type, market_graphic))
        return True
"""
Connection module.
"""
from lib import connect

DB = connect()
Exemple #19
0
def main(server_id):
    lib.connect(config.RS_USER, config.RS_KEY)
    server = lib.server_details({'id': int(server_id)})
    logging.info(server)
#!/usr/bin/python3
import lib
import json
import sys

p = lib.connect()
for i in sys.argv[1:]:
    print("{} {}".format(i, p.cancelOrder(i)))
Exemple #21
0
#!/usr/bin/python3 -i

import vrep, math, random, time, copy, lib as ml
from deap import base, creator, tools
import vector as vv
from decimal import Decimal

NB_GENERATIONS = 100000
NB_ROBOTS = 10
NB_GENES = 20

#Initialisation
vrep.simxFinish(-1)
ml.connect()
ml.stopSimulation()
ml.loadScene()

random.seed()
wrist = ml.getHandle("WristMotor")
elbow = ml.getHandle("ElbowMotor")
shoulder = ml.getHandle("ShoulderMotor")
robot = ml.getHandle("2W1A")

rb_cur = 0
rbtLst = [[robot, wrist, elbow, shoulder]]
initPos = ml.getPosition(robot)
ml.setPosition(robot, [initPos[0], initPos[1] - 1.5, initPos[2]])
initPos = ml.getPosition(robot)
print(initPos)
for i in range(1, NB_ROBOTS):
    new_rb = ml.copyRobot(robot, i - 1)