Esempio n. 1
0
if not os.path.exists(train_env.path):
    os.makedirs(train_env.path)

if np.size(os.listdir(train_env.path)) > 0:
    resume = FLAGS.resume
else:
    resume = False
""" Main training step """
with tf.Session() as sess:
    if FLAGS.use_GPU:
        sess = tf.Session(config=tf.ConfigProto(device_count={'GPU': 0}))
    else:
        sess = tf.Session(config=tf.ConfigProto(device_count={'CPU': 0}))

    sess.run(init)
    agent = Agent(FLAGS, True, train_env, train_model)

    if resume:
        print('Loading Model...')
        ckpt = tf.train.get_checkpoint_state(train_env.path)
        saver.restore(sess, ckpt.model_checkpoint_path)

    startdate = time.localtime()
    for i in range(train_model.param.num_episodes):
        rT1, rT2, processingtime, j = agent.Global(sess)
        print("Train scenario  :", i, agent.total_steps, rT1, rT2,
              str(float("{0:.2f}".format(processingtime))) + "s")

        # Periodically test the model.
        if i % train_model.param.test_freq == 0 and agent.total_steps > agent.param.pre_train_steps:
            filename = "result/" + FLAGS.model_name + "/training%04d%02d%02d%02d%02d%02d_%d_%d.txt" % (
Esempio n. 2
0
    print('Loading Model...')
    ckpt = tf.train.get_checkpoint_state(train_env.path)
    saver.restore(sess, ckpt.model_checkpoint_path)

    start = time.time()
    startdate = time.localtime()

    #SNP case
    SNPprob = [0.05, 0.1, 0.2]
    for _ in range(3):
        print(_)
        for __ in range(10):
            train_env.maxI = 2
            train_env.p = [SNPprob[_], 0]
            agent = Agent(FLAGS, False, train_env, train_model)
            seq1 = agent.env.seq1
            seq2 = agent.env.seq2
            start1, start2, lcslen = lcs.longestSubstring(seq1, seq2)

            if FLAGS.show_align:
                dot_plot = 255 * np.ones((len(seq1), len(seq2)))
                for i in range(lcslen):
                    dot_plot[start1 + i, start2 + i] = 0
            if FLAGS.print_align:
                record = recordalign.record_align()

            print("test", _, __)
            print("raw seq len", len(seq1), len(seq2))
            print("lcs len 1", start1, lcslen, len(seq1) - start1 - lcslen)
            print("lcs len 2", start2, lcslen, len(seq2) - start2 - lcslen)
Esempio n. 3
0
    sess.run(init)

    if resume:
        print('Loading Model...')
        ckpt = tf.train.get_checkpoint_state(game_env().path)
        saver.restore(sess, ckpt.model_checkpoint_path)
        
    startdate = time.localtime()
        
    copyGraph(train_model.copyOps, sess)
    startdate = time.localtime()
    SeedNum = REMiner2.REMiner2(1, seq1, seq2)
    uX1, uX2, uY1, uY2 = REMiner2.GetSEED(SeedNum,True)
    uX1, uX2, uY1, uY2 = function.sortalign(uX1, uX2, uY1, uY2)
    X = 200
    endlen = 20000
    print(len(uX1))

    for i in range(param.num_finetune):
        index = random.randrange(len(uX1))
        agent = Agent(FLAGS, False, game_env(), train_model, seq1[uX2[index]:uX2[index]+endlen]+"A", seq2[uY2[index]:uY2[index]+endlen]+"A", ismeta=True)
        rT11, rT21, j1, _ = agent.metatrain(sess, X=X)
        agent = Agent(FLAGS, False, game_env(), train_model, seq1[uX1[index]-endlen:uX1[index]][::-1]+"A", seq2[uY1[index]-endlen:uY1[index]][::-1]+"A", ismeta=True)
        rT12, rT22, j2, _ = agent.metatrain(sess, X=X)
        print(i,index,j1+np.abs(uX2[index]-uX1[index])+j2,rT21+np.abs(uX2[index]-uX1[index])+rT22,rT11+np.abs(uX2[index]-uX1[index])+rT12)

        copyGraph(train_model.copyOps2, sess)
        saver.save(sess, game_env().path + '/tuned-' + str(i) + '.ckpt')
        print(str(i)+"th Model Saved")

Esempio n. 4
0
    os.makedirs(train_env.path)

if np.size(os.listdir(train_env.path)) > 0:
    resume = FLAGS.resume
else:
    resume = False

""" Main test step """
with tf.Session() as sess:
    if FLAGS.use_GPU:
        sess = tf.Session(config=tf.ConfigProto(device_count={'GPU': 0}))
    else:
        sess = tf.Session(config=tf.ConfigProto(device_count={'CPU': 0}))

    sess.run(init)
    agent = Agent(FLAGS, False, train_env, train_model)

    print('Loading Model...')
    ckpt = tf.train.get_checkpoint_state(train_env.path)
    saver.restore(sess, ckpt.model_checkpoint_path)
    
    start = time.time()
    startdate = time.localtime()

    for _ in range(47):
        #print(_)
        for __ in range(_+1,47):
            seq1 = seq[_]
            seq2 = seq[__]
            rT2o = 0
            jo = 0
Esempio n. 5
0
if not os.path.exists(train_env.path):
    os.makedirs(train_env.path)

if np.size(os.listdir(train_env.path)) > 0:
    resume = FLAGS.resume
else:
    resume = False
""" Main test step """
with tf.Session() as sess:
    if FLAGS.use_GPU:
        sess = tf.Session(config=tf.ConfigProto(device_count={'GPU': 0}))
    else:
        sess = tf.Session(config=tf.ConfigProto(device_count={'CPU': 0}))

    sess.run(init)
    agent = Agent(FLAGS, False, train_env, train_model)

    print('Loading Model...')
    ckpt = tf.train.get_checkpoint_state(train_env.path)
    saver.restore(sess, ckpt.model_checkpoint_path)

    start = time.time()
    startdate = time.localtime()

    X = 100  # Greedy-X algorithm parameter

    agent.set(seq1, seq2)
    path = []
    score = []
    ptime = []
Esempio n. 6
0
    os.makedirs(train_env.path)

if np.size(os.listdir(train_env.path)) > 0:
    resume = FLAGS.resume
else:
    resume = False

""" Main test step """
with tf.Session() as sess:
    if FLAGS.use_GPU:
        sess = tf.Session(config=tf.ConfigProto(device_count={'GPU': 0}))
    else:
        sess = tf.Session(config=tf.ConfigProto(device_count={'CPU': 0}))

    sess.run(init)
    agent = Agent(FLAGS, False, train_env, train_model)

    print('Loading Model...')
    ckpt = tf.train.get_checkpoint_state(train_env.path)
    saver.restore(sess, ckpt.model_checkpoint_path)
    
    start = time.time()
    startdate = time.localtime()
    rT2o = 0
    jo = 0
    processingtimeo = 0

    #start1,start2,lcslen = lcs.longestSubstring(seq1,seq2)
    if FLAGS.show_align:
        dot_plot = 255*np.ones((len(seq1),len(seq2)))
    if FLAGS.print_align:
Esempio n. 7
0
        ckpt = tf.train.get_checkpoint_state(game_env().path)
        saver.restore(sess, ckpt.model_checkpoint_path)

    startdate = time.localtime()

    copyGraph(train_model.copyOps, sess)
    startdate = time.localtime()
    for i in range(param.num_finetune):
        pair = random.sample(range_seq, 2)
        seq1 = seq[pair[0]]
        seq2 = seq[pair[1]]
        start1, start2, lcslen = lcs.longestSubstring(seq1, seq2)
        agent = Agent(FLAGS,
                      False,
                      game_env(),
                      train_model,
                      seq1[start1 + lcslen:] + "A",
                      seq2[start2 + lcslen:] + "A",
                      ismeta=True)
        rT11, rT21, j1, _ = agent.metatrain(sess)
        agent = Agent(FLAGS,
                      False,
                      game_env(),
                      train_model,
                      seq1[start1 - 1::-1] + "A",
                      seq2[start2 - 1::-1] + "A",
                      ismeta=True)
        rT12, rT22, j2, _ = agent.metatrain(sess)
        print(i, pair[0], pair[1], j1 + lcslen + j2, rT21 + lcslen + rT22,
              rT11 + lcslen + rT12)
Esempio n. 8
0
if not os.path.exists(train_env.path):
    os.makedirs(train_env.path)

if np.size(os.listdir(train_env.path)) > 0:
    resume = FLAGS.resume
else:
    resume = False
""" Main test step """
with tf.Session() as sess:
    if FLAGS.use_GPU:
        sess = tf.Session(config=tf.ConfigProto(device_count={'GPU': 0}))
    else:
        sess = tf.Session(config=tf.ConfigProto(device_count={'CPU': 0}))

    sess.run(init)
    agent = Agent(FLAGS, False, train_env, train_model)

    print('Loading Model...')
    ckpt = tf.train.get_checkpoint_state(train_env.path)
    saver.restore(sess, ckpt.model_checkpoint_path)

    start = time.time()
    startdate = time.localtime()

    for _ in range(47):
        #print(_)
        for __ in range(_ + 1, 47):
            seq1 = seq[_]
            seq2 = seq[__]

            print("test", _, __)
Esempio n. 9
0
if not os.path.exists(train_env.path):
    os.makedirs(train_env.path)

if np.size(os.listdir(train_env.path)) > 0:
    resume = FLAGS.resume
else:
    resume = False
""" Main test step """
with tf.Session() as sess:
    if FLAGS.use_GPU:
        sess = tf.Session(config=tf.ConfigProto(device_count={'GPU': 0}))
    else:
        sess = tf.Session(config=tf.ConfigProto(device_count={'CPU': 0}))

    sess.run(init)
    agent = Agent(FLAGS, False, train_env, train_model)

    print('Loading Model...')
    ckpt = tf.train.get_checkpoint_state(train_env.path)
    saver.restore(sess, ckpt.model_checkpoint_path)

    start = time.time()
    startdate = time.localtime()

    for pairs in [["Homo_sapiens_BRCA", "Mus_musculus_BRCA"],
                  ["Mus_musculus_BRCA", "Rattus_norvegicus_BRCA"],
                  ["Rattus_norvegicus_BRCA", "Homo_sapiens_BRCA"],
                  ["Homo_sapiens_ELK1", "Mus_musculus_ELK1"],
                  ["Mus_musculus_ELK1", "Rattus_norvegicus_ELK1"],
                  ["Rattus_norvegicus_ELK1", "Homo_sapiens_ELK1"],
                  ["Homo_sapiens_CCDC91", "Mus_musculus_CCDC91"],
Esempio n. 10
0
        sess = tf.Session(config=tf.ConfigProto(device_count={'CPU': 0}))

    sess.run(init)

    if resume:
        print('Loading Model...')
        ckpt = tf.train.get_checkpoint_state(game_env().path)
        saver.restore(sess, ckpt.model_checkpoint_path)
        
    startdate = time.localtime()
    for i in range(param.num_episodes):
        mainBuffer = experience_buffer()
        p = [random.choice(range_SNP), random.choice(range_indel)]
        maxI = random.choice(range_maxI)
        print(i, p, maxI)
        agent = Agent(FLAGS, True, game_env(p, maxI), train_model, ismeta=True)

        for k in range(param.K):
            copyGraph(agent.copyOps, sess)
            rT1, rT2, j, mainBuffer = agent.metatrain2(sess, mainBuffer)
            print(i,k,j,rT1)

        for t in range(param.meta_train_step):
            # update the main network
            trainBatch = mainBuffer.sample(param.meta_batch_size)  # Select the batch from the experience buffer
        
            # The estimated Q value from main network is Q1, from target network is Q2
            Q1 = sess.run(agent.mainQN.predict, feed_dict={agent.mainQN.scalarInput: np.vstack(trainBatch[:, 3])})
            Q2 = sess.run(agent.mainQN.Qout, feed_dict={agent.mainQN.scalarInput: np.vstack(trainBatch[:, 3])})
            
            # trainBatch[:,4] means that the action was the last step of the episode
Esempio n. 11
0
    os.makedirs(train_env.path)

if np.size(os.listdir(train_env.path)) > 0:
    resume = FLAGS.resume
else:
    resume = False

""" Main test step """
with tf.Session() as sess:
    if FLAGS.use_GPU:
        sess = tf.Session(config=tf.ConfigProto(device_count={'GPU': 0}))
    else:
        sess = tf.Session(config=tf.ConfigProto(device_count={'CPU': 0}))

    sess.run(init)
    agent = Agent(FLAGS, False, train_env, train_model)

    print('Loading Model...')
    ckpt = tf.train.get_checkpoint_state(train_env.path)
    saver.restore(sess, ckpt.model_checkpoint_path)
    
    start = time.time()
    startdate = time.localtime()

    #start1,start2,lcslen = lcs.longestSubstring(seq1,seq2)
    if FLAGS.print_align:
        record = recordalign.record_align()

    print("Ecoli test")
    print("raw seq len",len(seq1),len(seq2))
    #print("lcs len 1",start1,lcslen,len(seq1)-start1-lcslen)
Esempio n. 12
0
    os.makedirs(train_env.path)

if np.size(os.listdir(train_env.path)) > 0:
    resume = FLAGS.resume
else:
    resume = False

""" Main test step """
with tf.Session() as sess:
    if FLAGS.use_GPU:
        sess = tf.Session(config=tf.ConfigProto(device_count={'GPU': 0}))
    else:
        sess = tf.Session(config=tf.ConfigProto(device_count={'CPU': 0}))

    sess.run(init)
    agent = Agent(FLAGS, False, train_env, train_model)

    print('Loading Model...')
    ckpt = tf.train.get_checkpoint_state(train_env.path)
    saver.restore(sess, ckpt.model_checkpoint_path)
    
    start = time.time()
    startdate = time.localtime()
    rT2o = 0
    jo = 0
    processingtimeo = 0

    #start1,start2,lcslen = lcs.longestSubstring(seq1,seq2)
    if FLAGS.show_align:
        dot_plot = 255*np.ones((len(seq1),len(seq2)))
    if FLAGS.print_align: