Esempio n. 1
0
def get_machines(num_instances_to_use, aws_group_name):
    machines = []
    #connect to AWS
    ec2 = boto3.client('ec2')
    autoscale = boto3.client('autoscaling')

    #how many machines are currently running?
    instances = get_instances_in_group(autoscale, aws_group_name)
    num_instances = len(instances)
    print(get_time(), 'Number of instances online:', num_instances)

    #switch on more machines if we need them
    if num_instances < num_instances_to_use:
        print(get_time(), 'Launching instances...')
        autoscale.set_desired_capacity(AutoScalingGroupName=aws_group_name,
                                       DesiredCapacity=num_instances_to_use)

        #tell us status every few seconds
        while num_instances < num_instances_to_use:
            instances = get_instances_in_group(autoscale, aws_group_name)
            num_instances = len(instances)
            print(get_time(), 'Number of instances online:', num_instances)
            sleep(3)

    #grab instance IDs
    instance_ids = [i['InstanceId'] for i in instances]
    print(get_time(), "These instances are online:", instance_ids)

    for instance_id in instance_ids:
        print(get_time(), 'Waiting for instance', instance_id, 'to boot...')
        while True:
            try:
                state = state_name_of(instance_id, ec2)
                if state == 'running':
                    print(get_time(), instance_id, 'is running!')
                    break
                elif state == 'pending':
                    pass
                else:
                    print(instance_id, 'in state', state, ', not usable')
                    return False
            except IndexError:
                print(instance_id, 'not queryable yet')
                return False
            sleep(3)
    for instance_id in instance_ids:
        print(get_time(), 'Waiting for instance', instance_id,
              'to report OK...')
        while True:
            try:
                if status_of(instance_id, ec2) == 'ok':
                    print(get_time(), instance_id, 'reported OK!')
                    break
            except IndexError:
                print(get_time(), 'Instance', instance_id, 'disappeared!')
                return False
            sleep(3)
    for instance_id in instance_ids:
        machines.append(Machine(ip_address_of(instance_id, ec2)))
    return machines
Esempio n. 2
0
 def setup(self,codec):
     print(get_time(),'Connecting to',self.host)
     if self.rsync('./',self.work_root+'/rd_tool/') != 0:
         print(get_time(),'Couldn\'t set up machine '+self.host)
         sys.exit(1)
     self.check_shell('rm -rf '+self.work_root+'/'+codec)
     for binary in binaries[codec]:
         self.check_shell('mkdir -p '+self.work_root+'/'+codec+'/'+os.path.dirname(binary));
         if self.rsync('../'+codec+'/'+binary,self.work_root+'/'+codec+'/'+binary) != 0:
             print(get_time(),'Couldn\'t upload codec binary '+binary+'to '+self.host)
             sys.exit(1)
Esempio n. 3
0
def get_machines(num_instances_to_use, aws_group_name):
    machines = []
    # connect to AWS
    ec2 = boto3.client("ec2")
    autoscale = boto3.client("autoscaling")

    # how many machines are currently running?
    instances = get_instances_in_group(autoscale, aws_group_name)
    num_instances = len(instances)
    print(get_time(), "Number of instances online:", num_instances)

    # switch on more machines if we need them
    if num_instances < num_instances_to_use:
        print(get_time(), "Launching instances...")
        autoscale.set_desired_capacity(AutoScalingGroupName=aws_group_name, DesiredCapacity=num_instances_to_use)

        # tell us status every few seconds
        while num_instances < num_instances_to_use:
            instances = get_instances_in_group(autoscale, aws_group_name)
            num_instances = len(instances)
            print(get_time(), "Number of instances online:", num_instances)
            sleep(3)

    # grab instance IDs
    instance_ids = [i["InstanceId"] for i in instances]
    print(get_time(), "These instances are online:", instance_ids)

    for instance_id in instance_ids:
        print(get_time(), "Waiting for instance", instance_id, "to boot...")
        while True:
            try:
                state = state_name_of(instance_id, ec2)
                if state == "running":
                    print(get_time(), instance_id, "is running!")
                    break
                elif state == "pending":
                    pass
                else:
                    print(instance_id, "in state", state, ", not usable")
                    return False
            except IndexError:
                print(instance_id, "not queryable yet")
                return False
            sleep(3)
    for instance_id in instance_ids:
        print(get_time(), "Waiting for instance", instance_id, "to report OK...")
        while True:
            try:
                if status_of(instance_id, ec2) == "ok":
                    print(get_time(), instance_id, "reported OK!")
                    break
            except IndexError:
                print(get_time(), "Instance", instance_id, "disappeared!")
                return False
            sleep(3)
    for instance_id in instance_ids:
        machines.append(Machine(ip_address_of(instance_id, ec2)))
    return machines
Esempio n. 4
0
def run(work_items, slots):
    retries = 0
    max_retries = 5000
    free_slots = slots
    taken_slots = []
    work_done = []
    total_num_of_jobs = len(work_items)
    while (1):
        for slot in taken_slots:
            if slot.busy == False:
                if slot.work.failed == False:
                    work_done.append(slot.work)
                    print(get_time(), len(work_done), 'out of',
                          total_num_of_jobs, 'finished.')
                elif retries >= max_retries:
                    break
                else:
                    retries = retries + 1
                    print(get_time(), 'Retrying work...', retries, 'of',
                          max_retries, 'retries.')
                    work_items.append(slot.work)
                taken_slots.remove(slot)
                free_slots.append(slot)

        #have we finished all the work?
        if len(work_items) == 0:
            if len(taken_slots) == 0:
                print(get_time(), 'All work finished.')
                break
        elif retries >= max_retries:
            print(get_time(), 'Max number of failed retries reached!')
            sys.exit(1)
        else:
            if len(free_slots) != 0:
                slot = free_slots.pop()
                work = work_items.pop()
                slot.work = work
                print(get_time(), 'Encoding', work.get_name(), 'on',
                      slot.machine.host)
                work_thread = threading.Thread(target=slot.execute,
                                               args=(work, ))
                work_thread.daemon = True
                slot.busy = True
                work_thread.start()
                taken_slots.append(slot)
        sleep(0.2)
    return work_done
Esempio n. 5
0
def run(work_items, slots):
    retries = 0
    max_retries = 10
    free_slots = slots
    taken_slots = []
    work_done = []
    total_num_of_jobs = len(work_items)
    while(1):
        for slot in taken_slots:
            if slot.busy == False:
                if slot.work.failed == False:
                    work_done.append(slot.work)
                    print(get_time(),len(work_done),'out of',total_num_of_jobs,'finished.')
                elif retries >= max_retries:
                    break
                else:
                    retries = retries + 1
                    print(get_time(),'Retrying work...',retries,'of',max_retries,'retries.')
                    work_items.append(slot.work)
                taken_slots.remove(slot)
                free_slots.append(slot)

        #have we finished all the work?
        if len(work_items) == 0:
            if len(taken_slots) == 0:
                print(get_time(),'All work finished.')
                break
        elif retries >= max_retries:
            print(get_time(),'Max number of failed retries reached!')
            sys.exit(1)
        else:
            if len(free_slots) != 0:
                slot = free_slots.pop()
                work = work_items.pop()
                slot.work = work
                print(get_time(),'Encoding',work.get_name(),'on',slot.machine.host)
                work_thread = threading.Thread(target=slot.execute, args=(work,))
                work_thread.daemon = True
                slot.busy = True
                work_thread.start()
                taken_slots.append(slot)
        sleep(0.2)
    return work_done
Esempio n. 6
0
def get():
    # get the entered amount and set the input field to default
    
    n = amnt.get()
    amnt.set('Enter Amount')

    nt = note.get()
    note.set('Enter Note')
    t = get_time()


    # create a new dictionary of the entered amount and a timestamp as a key
    record = { f"{t}" : [f"{n}", f"{nt}"]}

    # Append to transactions.json file

    with open('./transactions.json', 'r+') as f:
        data = load(f)
        data['Records'].append(record)

        # Get the new cash after the transaction
        data['Cash'] = str(int(data['Cash']) + int(n))
        
        # set the money variable to the new cash
        global money
        money = data['Cash']

        # move the cursor to the beginning of the file
        f.seek(0)

        # update the data in the file with the new details
        dump(data,f)

    #The decorated string
    string = f"{t}      {int(n)}            {nt}"

    # insert the transaction to the UI and the new cash 
    mylist.insert(END,string)
    tot.config(text=str(money))
Esempio n. 7
0
    listOfres = df_fold['Resource'].unique()
    listOfresInt = list(range(1, unique_resources + 1))
    mapping_res = dict(zip(listOfres, listOfresInt))
    print(mapping_res)

    df_fold.Resource = [mapping_res[item] for item in df_fold.Resource]

    # group by activity, resource and timestamp by caseid
    act = df_fold.groupby('CaseID',
                          sort=False).agg({'Activity': lambda x: list(x)})
    res = df_fold.groupby('CaseID',
                          sort=False).agg({'Resource': lambda x: list(x)})
    temp = df_fold.groupby('CaseID',
                           sort=False).agg({'Timestamp': lambda x: list(x)})

    time_prefix = ut.get_time(temp, max_trace)
    i = 0
    time_prefix_new = []
    while i < len(time_prefix):
        time_val = [x for x in time_prefix[i] if x != 0.0]
        time_prefix_new.append(time_val)
        i = i + 1
    print(time_prefix_new[0])

    sequence_prefix = ut.get_sequence(act, max_trace)
    resource_prefix = ut.get_sequence(res, max_trace)

    i = 0
    list_sequence_prefix = []
    list_resource_prefix = []
Esempio n. 8
0
print('symbols with volume > 450 =', len(symbol_list))
# start order_book web socket > call back saves most recent data to disk
conn_key = bm.start_multiplex_socket(socket_list,
                                     ut.process_socket_pushes_order_book)
bm.start()
# start worker_get_klines_on_the_minute() thread
t = Thread(target=ut.worker_get_klines_on_the_minute,
           args=['1m', symbol_list, 230])
t.start()

# delay to start 5s past the minute (when we have candles)
secs = time.localtime().tm_sec
delay = 65 - secs
print('start superbot in', delay, 'seconds')
time.sleep(delay)
print('starting super bot now', ut.get_time())

# if current_state == dict, some sort of buying & selling >> always starts with buy(), always needs to end thread with sys.exit()
# if current_state == False, monitoring

# while True:
#     for s in symbols_trimmed:
#         # if s == 'ETCBTC':
#         #     print(ut.get_time()) # >> it is plenty fast  (10 per sec or so)
#         # current_state = load_current_state(symbol=s, file_number=1000, length='1m')
#         # if isinstance(current_state, dict):
#         #     print('loading state to sell coin..', current_state['symbol'])
#         #     ut.buy_coin_from_state(current_state)
#         #     continue
#         current_state = { 'status': 'monitoring' }
#         if current_state['status'] == 'monitoring':
# (starting with previous best params)

import utility as ut
import utility_2 as ut2
import datetime
import time
from pprint import pprint

################################################################################

save_24hr_candle_data = True
save_24hr_drop_data = True

################################################################################

print('start @', ut.get_time())

# generated by optimization
future_candles_length = 15

min_volume_btc = 0
minutes = 1
datapoints_trailing = 0
drops_to_collect = 2
do_print = True

# update symbol list
ut.update_symbol_list()
symbols = ut.pickle_read(
    '/home/ec2-user/environment/botfarming/Development/binance_btc_symbols.pklz'
)
Esempio n. 10
0
 def setup(self,codec):
     print(get_time(),'Connecting to',self.host)
     if subprocess.call(['./transfer_git.sh',self.host,codec]) != 0:
       print(get_time(),'Couldn\'t set up machine '+self.host)
       sys.exit(1)
Esempio n. 11
0
    # ['20180201_24', '2018-01-31 12:00', '2018-02-01 12:00'],
    # ['20180202_24', '2018-02-01 12:00', '2018-02-02 12:00'],
    # ['20180203_24', '2018-02-02 12:00', '2018-02-03 12:00'],
    # ['20180204_24', '2018-02-03 12:00', '2018-02-04 12:00'],
    # ['20180205_24', '2018-02-04 12:00', '2018-02-05 12:00'],
    # ['20180206_24', '2018-02-05 12:00', '2018-02-06 12:00'],
    # ['20180210_24', '2018-02-09 12:00', '2018-02-10 12:00'],
    # ['20180211_24', '2018-02-10 12:00', '2018-02-11 12:00'],
    # ['20180212_24', '2018-02-11 12:00', '2018-02-12 12:00'],
    # ['20180213_24', '2018-02-12 12:00', '2018-02-13 12:00'],
    ['20180215_24', '2018-02-14 12:00', '2018-02-15 12:00'],
    ['20180216_24', '2018-02-15 12:00', '2018-02-16 12:00'],
    ['20180217_24', '2018-02-16 12:00', '2018-02-17 12:00'],
]

print('start @', ut.get_time())

ut2.save_data(save_params, datapoints_trailing, min_volume, minutes)

print('done @', ut.get_time())

################################################################################ OLD CODE
# **function now in utility_2.py....     def save_data(save_params, datapoints_trailing, min_volume, minutes)

########### Step Back Style
# day_folder = '20180126'
# minutes = [1]
# step_backs = [2]

# for settings in save_params:
Esempio n. 12
0
#                ^^^ Required to make sure the `'` is captured.
def shellquote(s):
    return "'" + s.replace("'", "'\"'\"'") + "'"


if 'DAALA_ROOT' not in os.environ:
    rd_print(
        "Please specify the DAALA_ROOT environment variable to use this tool.")
    sys.exit(1)

daala_root = os.environ['DAALA_ROOT']

extra_options = ''
if 'EXTRA_OPTIONS' in os.environ:
    extra_options = os.environ['EXTRA_OPTIONS']
    print(get_time(),
          'Passing extra command-line options:"%s"' % extra_options)


class Work:
    def __init__(self):
        self.failed = False

    def parse(self, stdout, stderr):
        self.raw = stdout
        split = None
        try:
            split = self.raw.decode('utf-8').replace(')', ' ').split()
            self.pixels = split[1]
            self.size = split[2]
            self.metric = {}
Esempio n. 13
0
# `du_Parterre_d'Eau` must be converted into
#`'du_Parterre_d'"'"'Eau'
#                ^^^ Required to make sure the `'` is captured.
def shellquote(s):
    return "'" + s.replace("'", "'\"'\"'") + "'"

if 'DAALA_ROOT' not in os.environ:
    rd_print("Please specify the DAALA_ROOT environment variable to use this tool.")
    sys.exit(1)

daala_root = os.environ['DAALA_ROOT']

extra_options = ''
if 'EXTRA_OPTIONS' in os.environ:
    extra_options = os.environ['EXTRA_OPTIONS']
    print(get_time(),'Passing extra command-line options:"%s"' % extra_options)

class Work:
    def __init__(self):
        self.failed = False
    def parse(self, stdout, stderr):
        self.raw = stdout
        split = None
        try:
            split = self.raw.decode('utf-8').replace(')',' ').split()
            self.pixels = split[1]
            self.size = split[2]
            self.metric = {}
            self.metric['psnr'] = {}
            self.metric["psnr"][0] = split[6]
            self.metric["psnr"][1] = split[8]
def process_socket_pushes_tickers(msg):
    # close and restart the socket, if socket can't reconnect itself
    if 'e' in msg and msg['e'] == 'error':
        print('restarting socket in process_socket_pushes_tickers()')
        bm.close()
        conn_key = bm.start_multiplex_socket(socket_list, process_socket_pushes_tickers)
        bm.start()
    else:
        if 'stream' in msg and 'data' in msg:
            s = msg['stream'].split('@')[0].upper()
            ticker_path = '/home/ec2-user/environment/botfarming/Development/recent_tickers/'+s+'.pklz'
            current_price = float(msg['data']['c'])
            # ut.pickle_write(ticker_path, msg['data'])
            ut.pickle_write(ticker_path, current_price)

            if s == 'ETHBTC' and time.localtime().tm_sec == 0:
                current_price = float(msg['data']['c'])
                print('STILL ALIVE process_socket_pushes_tickers()', s, current_price, ut.get_time())
        else:
            print('ERROR: unexpected socket response in process_socket_pushes_tickers(), printing msg so we know what is going on')
            pprint(msg)
#!/usr/bin/python2.7
import sys
import utility as ut
import utility_2 as ut2

# DON'T FORGET to start:
# https://www.pythonanywhere.com/user/nelsonriley/files/home/nelsonriley/Development/binance_update_order_book.py?edit

bot_index = 1
total_bots = 2

the_ip = ut2.get_computer_ip()
allowed_ips = ['10.0.0.22', '10.0.0.212']
non_allowed_ips = ['10.0.0.98', '10.0.0.253']
if the_ip in allowed_ips:
    print(ut.get_time())
    ut2.run_24hr_1min_drop_strategy(bot_index, total_bots)
else:
    print(the_ip, 'is not allowed, start over')
Esempio n. 16
0

config_dir = os.getenv("CONFIG_DIR", os.getcwd())
runs_dst_dir = os.getenv("RUNS_DST_DIR", os.path.join(os.getcwd(), "../runs"))
codecs_src_dir = os.getenv("CODECS_SRC_DIR", os.path.join(os.getcwd(), ".."))

if 'DAALA_ROOT' not in os.environ:
    rd_print(None,"Please specify the DAALA_ROOT environment variable to use this tool.")
    sys.exit(1)

daala_root = os.environ['DAALA_ROOT']

extra_options = ''
if 'EXTRA_OPTIONS' in os.environ:
    extra_options = os.environ['EXTRA_OPTIONS']
    print(get_time(),'Passing extra command-line options:"%s"' % extra_options)

work_items = []

#load all the different sets and their filenames
video_sets_f = codecs.open(os.path.join(config_dir, 'sets.json'),'r',encoding='utf-8')
video_sets = json.load(video_sets_f)

parser = argparse.ArgumentParser(description='Collect RD curve data.')
parser.add_argument('set',metavar='Video set name',nargs='+')
parser.add_argument('-codec',default='daala')
parser.add_argument('-bindir',default='./')
parser.add_argument('-prefix',default='.')
parser.add_argument('-awsgroup', default='Daala')
parser.add_argument('-machines', default=14)
parser.add_argument('-mode', default='metric')