def worker(job_id):
    db = TensorDB(ip='146.169.33.34', port=27020, db_name='TransferGan', user_name='akara', password='******', studyID="MNIST")

    from bson.objectid import ObjectId
    job = db.find_one_job(args={'_id': ObjectId(job_id)})
    if job['model'] == 'cnn':
        train_cnn(db=db, n_cnn_layers=job['n_cnn_layers'], lr=job['lr'], n_epochs=job['n_epochs'])
    elif job['model'] == 'mlp':
        train_mlp(db=db, n_layers=job['n_layers'], lr=job['lr'], n_epochs=job['n_epochs'])
Beispiel #2
0
def main():
    db = TensorDB(ip='146.169.33.34',
                  port=27020,
                  db_name='TransferGan',
                  user_name='akara',
                  password='******',
                  studyID="MNIST")
    create_mnist_dataset(db=db)
    create_jobs(db=db,
                job_name="cv_mnist",
                models_dict={
                    "cnn": {
                        "lr": [0.01, 0.001, 0.001],
                        "n_cnn_layers": [1, 2, 2],
                        "n_filters": [64, 128, 256],
                        "n_epochs": [10, 10, 10],
                    },
                    "mlp": {
                        "lr": [0.05, 0.0001],
                        "n_layers": [1, 2],
                        "n_epochs": [10, 10],
                    }
                })

    # Setting up the connection to interface
    ip = "interfaceeae.doc.ic.ac.uk"
    port = 443
    eae = eAE(ip, port)

    # Testing if the interface is Alive
    is_alive = eae.is_eae_alive()
    if is_alive != 200:
        raise Exception("!!!")

    # Get all jobs
    jobs = db.get_all_jobs()
    args = [str(j['_id']) for j in jobs]

    # We submit a dummy job
    parameters_set = "\n".join(args)
    cluster = "gpu_dev"
    computation_type = "GPU"
    main_file = "/home/akara/Workspace/tl_paper/tutorial_tensordb_cv_mnist_worker.py"
    data_files = ['/home/akara/Workspace/tl_paper/tensorlayer']
    host_ip = "dsihuaweiroom.doc.ic.ac.uk"
    ssh_port = "22"
    job = eae.submit_jobs(parameters_set, cluster, computation_type, main_file,
                          data_files, host_ip, ssh_port)
    print(job)
Beispiel #3
0
def main():
    # This is to initialize the connection to your MondonDB server
    # Note: make sure your MongoDB is reachable before changing this line
    db = TensorDB(ip='IP_ADDRESS_OR_YOUR_MONGODB', port=27017, db_name='DATABASE_NAME', user_name=None, password=None, studyID='ANY_ID (e.g., mnist)')

    create_mnist_dataset(db=db)
    create_jobs(db=db, job_name="cv_mnist", models_dict={
        "cnn": {
            "lr": [0.01, 0.001, 0.001],
            "n_cnn_layers": [1, 2, 2],
            "n_filters": [64, 128, 256],
            "n_epochs": [10, 10, 10],
        },
        "mlp": {
            "lr": [0.05, 0.0001],
            "n_layers": [1, 2],
            "n_epochs": [10, 10],
        }
    })

    # Setting up the connection to interface
    ip = "IP_ADDRESS_OF_EAE (e.g., interfaceeae.doc.ic.ac.uk)"
    port = 443
    eae = eAE(ip, port)

    # Testing if the interface is Alive
    is_alive = eae.is_eae_alive()
    if is_alive != 200:
        raise Exception("!!!")

    # Get all jobs
    jobs = db.get_all_jobs()
    args = [str(j['_id']) for j in jobs]

    # We submit a dummy job
    parameters_set = "\n".join(args)
    cluster = "NAME_OF_CLUSTER (e.g., gpu_dev)"
    computation_type = "COMPUTATION_TYPE (e.g., GPU)"
    main_file = "ABSOLUTE_PATH_TO_MAIN_FILE"
    data_files = ['ABSOLUTE_PATH_TO_DIRECTORY_OR_FILES_TO_BE_COPIED_TO_RUN_THE_MAIN_FILE']
    host_ip = "IP_ADDRESS_OF_HOST_MACHINE_RUNNING_THIS_SCRIPT"
    ssh_port = "SSH_PORT_OF_HOST_MACHINE"
    job = eae.submit_jobs(parameters_set, cluster, computation_type, main_file, data_files, host_ip, ssh_port)
    print(job)
def worker(job_id):
    # This is to initialize the connection to your MondonDB server
    # Note: make sure your MongoDB is reachable before changing this line
    db = TensorDB(ip='IP_ADDRESS_OR_YOUR_MONGODB',
                  port=27017,
                  db_name='DATABASE_NAME',
                  user_name=None,
                  password=None,
                  studyID='ANY_ID (e.g., mnist)')

    from bson.objectid import ObjectId
    job = db.find_one_job(args={'_id': ObjectId(job_id)})
    if job['model'] == 'cnn':
        train_cnn(db=db,
                  n_cnn_layers=job['n_cnn_layers'],
                  lr=job['lr'],
                  n_epochs=job['n_epochs'])
    elif job['model'] == 'mlp':
        train_mlp(db=db,
                  n_layers=job['n_layers'],
                  lr=job['lr'],
                  n_epochs=job['n_epochs'])
Beispiel #5
0
def main():
    # db = TensorDB(ip='146.169.33.34', port=27020, db_name='DRL', user_name='tensorlayer', password='******', studyID="20170524_1")
    db = TensorDB(ip='146.169.15.140',
                  port=27017,
                  db_name='DRL',
                  user_name=None,
                  password=None,
                  studyID="1")

    # Create jobs
    n_jobs = 5
    for j in range(n_jobs):
        args = {
            "id": j,
            "name": "Deep Reinforcement Learning",
            "file": "tutorial_tensordb_atari_pong_generator.py",
            "args": "",
        }
        db.submit_job(args=args)

    # Setting up the connection to interface
    ip = "interfaceeae.doc.ic.ac.uk"
    port = 443
    eae = eAE(ip, port)

    # Testing if the interface is Alive
    is_alive = eae.is_eae_alive()
    if is_alive != 200:
        raise Exception("!!!")

    # Get all jobs
    jobs = db.get_jobs(status=JobStatus.WAITING)

    for j in jobs:
        # Start worker
        parameters_set = "--job_id={}".format(str(j["_id"]))
        cluster = "gpu"
        computation_type = "GPU"
        main_file = j["file"]
        data_files = ['tensorlayer']
        host_ip = "dsigpu2.ict-doc.ic.ac.uk"
        ssh_port = "22222"
        job = eae.submit_jobs(parameters_set, cluster, computation_type,
                              main_file, data_files, host_ip, ssh_port)
        db.change_job_status(job_id=j["_id"], status=JobStatus.RUNNING)
        print(job)
Beispiel #6
0
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from sklearn.metrics import confusion_matrix, f1_score

from deepsleep.data_loader import NonSeqDataLoader, SeqDataLoader
from deepsleep.model import DeepFeatureNet, DeepSleepNet
from deepsleep.optimize import adam, adam_clipping_list_lr
from deepsleep.utils import iterate_minibatches, iterate_batch_seq_minibatches

from tensorlayer.db import TensorDB
from tensorlayer.db import JobStatus

db = TensorDB(ip='localhost', port=27017, db_name='DeepSleepNet', studyID='1')


class Trainer(object):
    def __init__(self,
                 interval_plot_filter=50,
                 interval_save_model=15,
                 interval_print_cm=10):
        self.interval_plot_filter = interval_plot_filter
        self.interval_save_model = interval_save_model
        self.interval_print_cm = interval_print_cm

    def print_performance(self, sess, output_dir, network_name,
                          n_train_examples, n_valid_examples, train_cm,
                          valid_cm, epoch, n_epochs, train_duration,
                          train_loss, train_acc, train_f1, valid_duration,
from tensorlayer.db import TensorDB
from tensorlayer.db import JobStatus

db = TensorDB(ip='146.169.15.140', port=27017, db_name='DRL', user_name=None, password=None, studyID="1")

# Terminate running jobs
jobs = db.get_jobs(status=JobStatus.RUNNING)
for j in jobs:
    print db.change_job_status(job_id=j["_id"], status=JobStatus.TERMINATED)
import gym
import numpy as np
import time, os
import argparse
from bson.objectid import ObjectId

os.environ["CUDA_VISIBLE_DEVICES"] = ""  # CPU

from tensorlayer.db import TensorDB
from tensorlayer.db import JobStatus

# This is to initialize the connection to your MondonDB server
# Note: make sure your MongoDB is reachable before changing this line
db = TensorDB(ip='IP_ADDRESS_OR_YOUR_MONGODB',
              port=27017,
              db_name='DATABASE_NAME',
              user_name=None,
              password=None,
              studyID='ANY_ID (e.g., mnist)')


def main(args):
    # hyperparameters
    image_size = 80
    D = image_size * image_size
    H = 200
    batch_size = 10
    # learning_rate = 1e-4
    gamma = 0.99
    # decay_rate = 0.99
    # render = False  # display the game environment
    # resume = False      # load existing policy network
image_size = 80
D = image_size * image_size
H = 200
batch_size = 10
learning_rate = 1e-4
gamma = 0.99
decay_rate = 0.99
# render = False      # display the game environment
# resume = False      # load existing policy network
# model_file_name = "model_pong"
np.set_printoptions(threshold=np.nan)

from tensorlayer.db import TensorDB
# db = TensorDB(ip='localhost', port=27017, db_name='atari', user_name=None, password=None) #<- if none password
# db = TensorDB(ip='146.169.33.34', port=27020, db_name='DRL', user_name='tensorlayer', password='******', studyID='1')
db = TensorDB(ip='146.169.15.140', port=27017, db_name='DRL', user_name=None, password=None, studyID='1')

# def prepro(I):
#     """ prepro 210x160x3 uint8 frame into 6400 (80x80) 1D float vector """
#     I = I[35:195]
#     I = I[::2,::2,0]
#     I[I == 144] = 0
#     I[I == 109] = 0
#     I[I != 0] = 1
#     return I.astype(np.float).ravel()

# env = gym.make("Pong-v0")
# observation = env.reset()
# prev_x = None
# running_reward = None
# reward_sum = 0
Beispiel #10
0
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from sklearn.metrics import confusion_matrix, f1_score

from deepsleep.data_loader import NonSeqDataLoader, SeqDataLoader
from deepsleep.model import DeepFeatureNet, DeepSleepNet
from deepsleep.optimize import adam, adam_clipping_list_lr
from deepsleep.utils import iterate_minibatches, iterate_batch_seq_minibatches

from tensorlayer.db import TensorDB
from tensorlayer.db import JobStatus

db = TensorDB(ip='146.169.33.34',
              port=27020,
              db_name='DeepSleepNet',
              user_name='tensorlayer',
              password='******',
              studyID='1')


class Trainer(object):
    def __init__(self,
                 interval_plot_filter=50,
                 interval_save_model=100,
                 interval_print_cm=10):
        self.interval_plot_filter = interval_plot_filter
        self.interval_save_model = interval_save_model
        self.interval_print_cm = interval_print_cm

    def print_performance(self, sess, output_dir, network_name,
                          n_train_examples, n_valid_examples, train_cm,