def train_2_hidden_layer_anns(lwr=10,
                              upr=50,
                              eta=0.25,
                              mini_batch_size=10,
                              num_epochs=10):
    assert 100 >= upr > lwr >= 10
    assert lwr % 10 == 0 and upr % 10 == 0  # divisible by 10
    hl1 = 10

    eta_vals = (2, 1.5, 1.0, 0.5, 0.25)
    results = np.zeros([6, 6], dtype=float)
    results[0, 0] = -1
    results[1:, 0] = np.array(list(eta_vals))
    for y in range(len(results) - 1):
        results[0, y + 1] = 10 * (y + 1)
    for i in range(len(eta_vals)):
        eta = eta_vals[i]
        hl2 = lwr
        while hl2 <= upr:
            print(
                f"==== Training {input_layer}x{hl1}x{hl2}x{output_layer} - eta:{eta} ANN ======"
            )
            net = ann([input_layer, hl1, hl2, output_layer])
            net.mini_batch_sgd(train_d, num_epochs, mini_batch_size, eta,
                               test_d)
            result = net.evaluate(test_d)
            results[i + 1, hl2 // 10] = result
            print(
                f"==== Training {input_layer}x{hl1}x{hl2}x{output_layer} ANN - DONE - eta:{eta} ======"
            )
            hl2 += 10

    print(f"Training DONE {input_layer}x{hl1}x10-50x{output_layer}")
    print(results)
Beispiel #2
0
    def __init__(self, suffix, name, main, num_episodes):
        self.total_steps = 0
        self.batch_size = 64
        self.gamma = 0.99

        self.gradient_update_step = 1
        self.main_update_step = 2

        self.grads = {}

        self.env = gym.make(name)

        oshape = self.env.observation_space.shape
        assert main.output_size == self.env.action_space.n
        assert oshape[0] == main.input_shape

        self.steps = history.history(self.batch_size)

        self.main = main
        self.num_episodes = num_episodes

        self.current_state = state.state(oshape[0], main.state_size)

        self.network = ann.ann('thread_' + suffix, main.input_size,
                               main.output_size, main.swriter)
        self.network.import_params(main.network.export_params())

        self.thread = threading.Thread(target=self.run)
Beispiel #3
0
    def __init__(self, name, state_size, output_path):
        self.name = name

        self.state_size = state_size
        self.input_shape = 2
        self.output_size = 3
        self.input_size = self.state_size * self.input_shape

        output_path += '/run.%d' % (time.time())
        self.swriter = tf.summary.FileWriter(output_path)

        self.network = ann.ann("main", self.input_size, self.output_size,
                               self.swriter)
def train_1_hidden_layer_anns(lwr=10,
                              upr=50,
                              eta=0.25,
                              mini_batch_size=10,
                              num_epochs=10):
    assert 100 >= upr > lwr >= 10
    assert lwr % 10 == 0 and upr % 10 == 0  # divisible by 10
    while lwr <= upr:
        print(f"==== Training {input_layer}x{lwr}x{output_layer} ANN ======")
        net = ann([input_layer, lwr, output_layer])
        net.mini_batch_sgd(train_d, num_epochs, mini_batch_size, eta, test_d)

        print(
            f"==== Training {input_layer}x{lwr}x{output_layer} ANN - DONE - eta:{eta} ======"
        )
        lwr += 10
import pathlib as path
from ann import ann
from pickle import dump
from mnist_loader import load_data_wrapper
train_d, valid_d, test_d = load_data_wrapper()


dir_name = "pck_nets"
p = path.Path(dir_name)
assert p.is_dir()

nets = []
dummy_sizes = [10, 10]
for json in p.iterdir():
    if json.is_file() and json.name.endswith(".json"):
        net = ann(dummy_sizes)
        net.load(json)
        # nets.append(net)
        name = json.name.replace(".json", ".pkl")
        with open(path.Path.joinpath(p, name), "wb") as my_path:
            dump(net, my_path)

# print(nets)
# best_3 = ""
# best_3_score = 0
# best_4 = ""
# best_4_score = 0
# best_5 = ""
# best_5_score = 0
# for net in nets:
#     accuracy = net.accuracy(valid_d)
def main():

    myann = ann.ann()
    myann.request()
Beispiel #7
0
def load_csq(self, pos_subset=None):
    #pos_subset = ["1","2","3"]
    obj_ann = ann.ann(self.params["annfile"], self.params["tabix"])
    results = {"loci": [], "variants": []}
    for l in open(self.params["dr_vcffile"]):
        if l[0] == "#": continue
        arr = l.rstrip().split()
        if pos_subset and arr[1] not in pos_subset: continue
        dp4 = [int(x) for x in re_dp4.search(l).group(1).split(",")]
        dp4r = (dp4[2] + dp4[3]) / (dp4[0] + dp4[1] + dp4[2] + dp4[3])

        var = {}
        var["pos"] = arr[1]
        var["chr"] = arr[0]
        var["ref"] = arr[3]
        var["alt"] = arr[4]
        var["dp4r"] = dp4r

        if "BCSQ" not in l:
            var["bcsq"] = "intergenic"
            results["variants"].append(var)
            results["loci"].append((arr[0], arr[1]))
            continue
        csq = re.search("BCSQ=(.*)", arr[7]).group(1).split("|")
        if csq[0][0] == "@": continue
        var["bcsq"] = csq[0]
        if csq[0] == "non_coding":
            var["csq"] = {
                "locus_tag": csq[1],
                "nt_change": "-",
                "aa_change": "-"
            }
        elif csq[0] == "stop_lost&inframe_deletion":
            var["csq"] = {
                "locus_tag": csq[1],
                "nt_change": "-",
                "aa_change": "-"
            }
        elif csq[0] == "inframe_deletion&start_lost":
            var["csq"] = {
                "locus_tag": csq[1],
                "nt_change": "-",
                "aa_change": "-"
            }
        elif csq[0] == "frameshift":
            var["csq"] = {
                "locus_tag": csq[1],
                "nt_change": "-",
                "aa_change": "-"
            }
        else:
            if len(csq) == 7:
                var["csq"] = {
                    "locus_tag": csq[1],
                    "aa_change": csq[5],
                    "nt_change": csq[6]
                }
            else:
                var["csq"] = {
                    "locus_tag": csq[1],
                    "nt_change": "-",
                    "aa_change": "-"
                }
        results["variants"].append(var)
        results["loci"].append((arr[0], arr[1]))

    if len(results["loci"]) == 0:
        print("Warning! No Variants")
    pos_tup = [("Chromosome", x["pos"]) for x in results["variants"]]
    dict_ann = obj_ann.pos2ann(pos_tup)
    for var in results["variants"]:
        tann = dict_ann["Chromosome"][var["pos"]]
        if var["bcsq"] == "intergenic" or var["bcsq"] == "non_coding" or var[
                "bcsq"] == "frameshift":
            locus_type = "intergenic" if tann["ncr"] == "inter" else "ncRNA"
            change = "%s%s>%s" % (tann["gene_nt"], var["ref"], var["alt"])
            var["csq"] = {
                "var_type": locus_type,
                "locus_tag": tann["rv"],
                "nt_change": change,
                "aa_change": "-"
            }
        var["csq"]["gene"] = tann["gene"]
    return results
Beispiel #8
0
import game as myGame
from board import Board
import random
import os
import ann as annie
import numpy
from load import normalize, append_snake
from ai2048demo import welch
import scipy

moves = [Board.LEFT, Board.UP, Board.RIGHT, Board.DOWN]

ann = annie.ann([1000])

def setup_ai(silent=False):
    ann.main(silent)

def get_ai_move(game):
    board = get_board_weird(game)
    board = numpy.asarray(board)
    prob = ann.predict_move(board)
    #print("prob", prob)
    return moves[prob]
    
def get_ai_moves(game):
    board = get_board_weird(game)
    board = numpy.asarray(board)
    board = normalize(board)
    #board = append_snake(board)
    return ann.predict_move(board)
import pandas as p
import sys
from ann import ann
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
#from data_plotter import data_plotter

data = p.read_csv("data_cancer.txt", header=None, index_col=0)
data = data.replace("M", 1)
data = data.replace("B", 0)

x = data.values  #returns a numpy array
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
data = p.DataFrame(x_scaled)

train, test = train_test_split(data, test_size=0.5)

machine = ann()
machine.create_ann(input_nodes=30,
                   hidden_layer_lenght=30,
                   number_hidden_layers=4,
                   output_nodes=1)

machine.train(train[list(range(1, 31))], train[[0]])
machine.test(test[list(range(1, 31))], test[[0]])
#meta = computeMeta(ds)
qds = quantifyData(learning, dum, False)
#testing = ds[:200000]
print "Quantifying testing data with dum"
tds = quantifyData(testing, dum, False)
td = quantifyData(ts, dum, True)
#print len(qds[0])

#tds = quantifyData(testing, dum)
# s, i, g = learn(10)
print "Doing gradient descent..."
#s = gdescent(0.1, 20, 6)
print "ANN"
# s = ann.ann(qds, 0.005, 8000)
# s = ann.ann(qds, 0.008, 12000)
s = ann.ann(qds, 0.01, 10000)
#print "Resulting weights of the gradient descent"
#print s
# s = loadWeights()
print "Before doing prediction"
prediction = ann.predict(tds, False)
#prediction = predict(s,tds, False)
#prediction = loadPrediction()
print accuracy(prediction, testing, 0.5)
print "Printing ROC"
x,y = plotROC(prediction, testing)
print "AUC"
print AUC(x, y)

prediction = ann.predict(td, True)
            net = ann([input_layer, hl1, hl2, output_layer])
            net.mini_batch_sgd(train_d, num_epochs, mini_batch_size, eta,
                               test_d)
            result = net.evaluate(test_d)
            results[i + 1, hl2 // 10] = result
            print(
                f"==== Training {input_layer}x{hl1}x{hl2}x{output_layer} ANN - DONE - eta:{eta} ======"
            )
            hl2 += 10

    print(f"Training DONE {input_layer}x{hl1}x10-50x{output_layer}")
    print(results)


# define your networks
net1 = ann([input_layer, 50, 60, output_layer])  # 4 layers
net2 = ann([input_layer, 50, 60, 30, output_layer])  # 5 layers
net3 = ann([input_layer, 10, 30, output_layer])  # 3 layers
net4 = ann([input_layer, 60, 30, 20, 40, 20, output_layer])  # 7 layers
net5 = ann([input_layer, 10, 50, 60, 30, output_layer])  # 6 layers

# define an ensemble of 5 nets
networks = (net1, net2, net3, net4, net5)
eta_vals = (0.1, 0.25, 0.3, 0.4, 0.5)

mini_batch_sizes = (5, 10, 15, 20)


# train networks
def train_nets(networks, eta_vals, mini_batch_sizes, num_epochs, path):
    assert pathlib.Path(path).is_dir()
 x_train2=train_X[e+1:len(train_X):,]
 y_train2=train_Y[e+1:len(train_X):,]
 
 x_test=train_X[s:e:,]
 y_test=train_Y[s:e:,]
 x_train=np.vstack((x_train1,x_train2))
 y_train=np.vstack((y_train1,y_train2))
 #print len(x_train1),len(x_train2),len(x_train),len(x_test)
 #print len(y_train1),len(y_train2),len(y_train),len(y_test)
 #x_train=pd.DataFrame(x_train)
 train['x']=x_train
 train['y']=y_train
 val['x']=x_test
 val['y']=y_test
 print ('------------------------k=',i,'--------------------')
 model=ann.ann()
 #model=logistic_regression.LRegression()
 model.learning_rate=0.0007
 model.model_restore =False
 model.batch_size=100000
 epoch=2000
 model.epochs=epoch
 model.no_of_features=X.shape[1]
 model.no_of_classes=no_of_classes
 model.working_dir=str(i)
 #model.hidden_layer_list=[100,100,100]
 model.hidden_layer_list=lay
 model.activation_list=['relu','relu','tanh']
 model.loss_type=l
 model.optimizer_type=opt
 model.setup()
Beispiel #13
0
from ann import ann
import mnist_basics as mnist

EPOCHS_PER_GAME = 3
BATCH = 100
NEURONS_IN_HIDDEN_LAYERS = [784, 500, 500, 10]
LIST_OF_FUNCTIONS = ["rectify", "rectify", "softmax"]
LEARNING_RATE = 0.001
MOMENTUM_RATE = 0.9

a = ann(neuronsInHiddenLayers=NEURONS_IN_HIDDEN_LAYERS,
        listOfFunctions=LIST_OF_FUNCTIONS,
        learningRate=LEARNING_RATE,
        momentumRate=MOMENTUM_RATE,
        errorFunc="RMSprop")
a.run(BATCH, EPOCHS_PER_GAME)
mnist.minor_demo(a)
Beispiel #14
0
from ann import ann
import mnist_basics as mnist


EPOCHS_PER_GAME             = 3
BATCH                       = 100
NEURONS_IN_HIDDEN_LAYERS    = [784,500,500,10]
LIST_OF_FUNCTIONS           = ["rectify","rectify","softmax"]
LEARNING_RATE               = 0.001
MOMENTUM_RATE               = 0.9

a = ann(neuronsInHiddenLayers=NEURONS_IN_HIDDEN_LAYERS, listOfFunctions=LIST_OF_FUNCTIONS, learningRate=LEARNING_RATE, momentumRate=MOMENTUM_RATE, errorFunc="RMSprop")
a.run(BATCH,EPOCHS_PER_GAME)
mnist.minor_demo(a)
Beispiel #15
0
# files.saveData(eann, 'eann-all.db')
# files.saveData(eann.w, 'eann-w-all.db')



#init ann
eann = files.loadData('eann-all.db')
eann_w = files.loadData('eann-w-all.db')
opt = {
	'architecture' : eann.architecture,
	'learningRate' : 9,
	'error' : 0.001,
	'epochs' : 50,
	'batch' : 100
}
nn = ann.ann(opt)
eann_w = np.asarray(eann_w)
for i in range(len(eann_w)):
	eann_w[i] = eann_w[i].astype(float)
nn.w = eann_w

nn.train(train_data, train_result)
# files.saveData(nn, 'eann-bp.db')

_results = nn.sim(test_data)
_results = _results.transpose()

accuracy = 0
for i in range(len(test_result)):
	if i < 20:
		print _results[i].argmax(), " : ", test_result[i].argmax()
Beispiel #16
0
    "Stop": "*"
}
re_mut = re.compile("([A-Za-z]+)([\s-]*[0-9]+)([A-Za-z]+)")
infile = sys.argv[1]
outfile = sys.argv[2]
drdb = defaultdict(lambda: defaultdict(list))

positions = set()
for l in open(infile):
    arr = l.rstrip().split()
    for p in arr[1].split("/"):
        if p == "-": continue
        positions.add(int(p))

obj_ann = ann.ann(
    "/Users/jody/github/TBProfiler/ref/MTB-h37rv_asm19595v2-eg18.tab.ann.gz",
    "/Users/jody/github/TBProfiler/bin/tabix")

annot = obj_ann.pos2ann([("Chromosome", str(x)) for x in sorted(positions)])

for l in open(infile):
    #    print l.rstrip()
    #AMINOGLYCOSIDES 1473247 C       A       rrs     C1402A
    arr = l.rstrip().split()
    re_obj = re_mut.match(arr[5])
    ref = re_obj.group(1)
    gene_pos = re_obj.group(2)
    alt = re_obj.group(3)
    if arr[1] == "-": continue
    #    print "%s\t%s\t%s" % (ref,gene_pos,alt)
    tmp = annot["Chromosome"][arr[1].split("/")[0]]
def main():

    myann = ann.ann()
    myann.test()