Esempio n. 1
0
def multiRunWrapper(modelTrainerFunction, modelName):
    options = op.read()

    num_of_runs = range(options['num_runs'])
    run_results = {
                        "train_seen_domains" : [],
                        "valid_seen_domains" : [],
                        "test_seen_domains": [],
                        "train_unseen_domains": [],
                        "valid_unseen_domains": [],
                        "test_unseen_domains" : []
                    }
    for run_id in num_of_runs:
        print(str(modelName) + " - starting run " + str(run_id))
        current_res = modelTrainerFunction(run_id)
        for key, obj in run_results.items():
            obj.append(current_res[key])

    #### calculating average and std of runs, store final results
    final_results = {
                        "train_seen_domains" : {"mean": np.mean(run_results["train_seen_domains"]), "std":np.std(run_results["train_seen_domains"])},
                        "valid_seen_domains" :  {"mean": np.mean(run_results["valid_seen_domains"]), "std":np.std(run_results["valid_seen_domains"])},
                        "test_seen_domains":  {"mean": np.mean(run_results["test_seen_domains"]), "std":np.std(run_results["test_seen_domains"])},
                        "train_unseen_domains":  {"mean": np.mean(run_results["train_unseen_domains"]), "std":np.std(run_results["train_unseen_domains"])},
                        "valid_unseen_domains":  {"mean": np.mean(run_results["valid_unseen_domains"]), "std":np.std(run_results["valid_unseen_domains"])},
                        "test_unseen_domains" :  {"mean": np.mean(run_results["test_unseen_domains"]), "std":np.std(run_results["test_unseen_domains"])}
                    }
    print("################################ FINAL AVERAGE RESULTS ###################################################")
    print(final_results)
    store_results(final_results, modelName, "average", options)
Esempio n. 2
0
def load_dataset_prepare_opt():
    OPT = options.read()
    # seed random for reproducibility
    if OPT.get("use_gpu"):
        torch.cuda.manual_seed_all(1337)
    else:
        torch.manual_seed(1337)

    dataset = ShapesQADataset(OPT)
    # pull out few attributes from dataset in main opts for other bots to use
    OPT["props"] = dataset.properties
    OPT["task_vocab"] = len(dataset.task_defn)
    # make a directory to save checkpoints
    timestamp = datetime.strftime(datetime.utcnow(), "%d-%b-%Y-%X")
    OPT["save_path"] = os.path.join(OPT["save_path"],
                                    "world-{}".format(timestamp))
    os.makedirs(OPT["save_path"])
    return OPT, dataset
Esempio n. 3
0
import socketserver, connections, time, options, log, sqlite3, ast, socks, hashlib, os, random, re, keys, base64
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA
(port, genesis_conf, verify_conf, version_conf, thread_limit_conf,
 rebuild_db_conf, debug_conf, purge_conf, pause_conf, ledger_path_conf,
 hyperblocks_conf, warning_list_limit_conf, tor_conf, debug_level_conf,
 allowed, pool_ip_conf, sync_conf, mining_threads_conf, diff_recalc_conf,
 pool_conf, pool_address, ram_conf, pool_percentage_conf,
 node_ip_conf) = options.read()
(key, private_key_readable, public_key_readable, public_key_hashed,
 address) = keys.read()  #import keys
app_log = log.log("pool.log", debug_level_conf)


def percentage(percent, whole):
    return int((percent * whole) / 100)


def payout():
    shares = sqlite3.connect('shares.db')
    shares.text_factory = str
    s = shares.cursor()

    conn = sqlite3.connect('static/ledger.db')
    conn.text_factory = str
    c = conn.cursor()

    #get unique addresses
    addresses = []
    for row in s.execute("SELECT * FROM shares"):
        shares_address = row[0]
Esempio n. 4
0
# author: satwik kottur

import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim

import itertools, pdb, random, pickle, os
import numpy as np
from chatbots import Team
from dataloader import Dataloader
import options
from time import gmtime, strftime

# read the command line options
options = options.read()
#------------------------------------------------------------------------
# setup experiment and dataset
#------------------------------------------------------------------------
data = Dataloader(options)
numInst = data.getInstCount()

params = data.params
# append options from options to params
for key, value in options.iteritems():
    params[key] = value

#------------------------------------------------------------------------
# build agents, and setup optmizer
#------------------------------------------------------------------------
team = Team(params)
Esempio n. 5
0
import sqlite3, options, db, time
(port, genesis_conf, verify_conf, version_conf, thread_limit_conf,
 rebuild_db_conf, debug_conf, purge_conf, pause_conf, ledger_path_conf,
 hyperblocks_conf, warning_list_limit_conf, tor_conf, debug_level_conf,
 allowed, mining_ip_conf, sync_conf, mining_threads_conf, diff_recalc_conf,
 pool_conf, pool_address, ram_conf) = options.read()


def diffget():
    conn = sqlite3.connect(ledger_path_conf)
    c = conn.cursor()

    db.execute(
        c,
        ("SELECT timestamp,block_height FROM transactions WHERE reward != 0 ORDER BY block_height DESC LIMIT 1;"
         ),
    )
    result = c.fetchall()
    db_timestamp_last = float(result[0][0])

    # calculate difficulty
    db.execute_param(c, (
        "SELECT block_height FROM transactions WHERE CAST(timestamp AS INTEGER) > ? AND reward != 0"
    ), (db_timestamp_last - 1800, ))  # 1800=30 min
    blocks_per_30 = len(c.fetchall())

    diff = blocks_per_30 * 2

    # drop diff per minute if over target
    time_drop = time.time()
Esempio n. 6
0
import base64, sqlite3, hashlib, time, socks, keys, log, sys, connections, ast, re, options
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA
from Crypto import Random
from multiprocessing import Process, freeze_support

try:
    import quickbismuth
except ImportError:
    quickbismuth = None

# load config
(port, genesis_conf, verify_conf, version_conf, thread_limit_conf, rebuild_db_conf, debug_conf, purge_conf, pause_conf, ledger_path_conf, hyperblocks_conf, warning_list_limit_conf, tor_conf, debug_level_conf, allowed, mining_ip_conf, sync_conf, mining_threads_conf, diff_recalc_conf, pool_conf, pool_address, ram_conf) = options.read()
# load config

def check_uptodate(interval, app_log):
    # check if blocks are up to date
    while sync_conf == 1:
        conn = sqlite3.connect("static/ledger.db")  # open to select the last tx to create a new hash from
        conn.text_factory = str
        c = conn.cursor()

        execute(c, ("SELECT timestamp FROM transactions WHERE reward != 0 ORDER BY block_height DESC LIMIT 1;"), app_log)
        timestamp_last_block = c.fetchone()[0]
        time_now = str(time.time())
        last_block_ago = float(time_now) - float(timestamp_last_block)

        if last_block_ago > interval:
            app_log.warning("Local blockchain is {} minutes behind ({} seconds), waiting for sync to complete".format(int(last_block_ago) / 60,last_block_ago))
            time.sleep(5)
        else:
Esempio n. 7
0
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim

import itertools, pdb, random, os
import numpy as np
from chatbots import Team
from dataloader import Dataloader
import options as op
from utilities import saveModel, load_best_results, store_results, makeDirs
from time import gmtime, strftime
from multiRunWrapper import multiRunWrapper

from copy import deepcopy
from random import sample, shuffle
options = op.read()
MODELNAME = 'maml'


def runMAMLtrain(runName='single'):

    #------------------------------------------------------------------------
    # setup experiment and dataset
    #------------------------------------------------------------------------
    data = Dataloader(options)
    numInst = data.getInstCount()

    params = data.params
    # append options from options to params
    for key, value in options.items():
        params[key] = value
Esempio n. 8
0
import os

import numpy as np
import torch
from torch import optim
from torch.autograd import Variable

from parlai.core.params import ParlaiParser

import options
from bots import Questioner, Answerer
from dataloader import ShapesQADataset
from world import QAWorld


OPT = options.read()

# seed random for reproducibility
if OPT.get('use_gpu'):
    torch.cuda.manual_seed_all(1337)
else:
    torch.manual_seed(1337)

#-------------------------------------------------------------------------------------------------
# setup dataset and opts
#-------------------------------------------------------------------------------------------------
dataset = ShapesQADataset(OPT)
# pull out few attributes from dataset in main opts for other bots to use
OPT['props'] = dataset.properties
OPT['task_vocab'] = len(dataset.task_defn)
Esempio n. 9
0
import SocketServer, threading, options, connections, log, time, socks

# load config
(port, genesis_conf, verify_conf, version_conf, thread_limit_conf,
 rebuild_db_conf, debug_conf, purge_conf, pause_conf, ledger_path_conf,
 hyperblocks_conf, warning_list_limit_conf, tor_conf,
 debug_level_conf) = options.read()
# load config

app_log = log.log("pool.log", debug_level_conf)


def execute(cursor, what):
    # secure execute for slow nodes
    passed = 0
    while passed == 0:
        try:
            cursor.execute(what)
            passed = 1
        except Exception, e:
            app_log.info("Retrying database execute due to {}".format(e))
            time.sleep(0.1)
            pass
            # secure execute for slow nodes
    return cursor


def execute_param(cursor, what, param):
    # secure execute for slow nodes
    passed = 0
    while passed == 0: