Beispiel #1
0
    def __init__(self, control_port, max_mb, patience, validFreq):
        """
        Initialize the LSTMController

        Parameters
        ----------
        max_mb : int
            Max number of minibatches to train on.
        patience: : int
            Training stops when this many minibatches have been trained on
            without any reported improvement.
        validFreq : int
            Number of minibatches to train on between every monitoring step.
        """

        Controller.__init__(self, control_port)
        self.patience = patience
        self.max_mb = int(max_mb)

        self.validFreq = validFreq
        self.uidx = 0
        self.eidx = 0
        self.history_errs = []
        self.bad_counter = 0

        self.valid = False
        self.start_time = None
Beispiel #2
0
def lstm_control(saveFreq=1110, saveto=None):
    parser = Controller.default_parser()
    parser.add_argument('--max-mb',
                        default=((5000 * 1998) / 10),
                        type=int,
                        required=False,
                        help='Maximum mini-batches to train upon in total.')
    parser.add_argument(
        '--patience',
        default=10,
        type=int,
        required=False,
        help='Maximum patience when failing to get better validation results.')
    parser.add_argument(
        '--valid-freq',
        default=370,
        type=int,
        required=False,
        help=
        'How often in mini-batches prediction function should get validated.')
    args = parser.parse_args()

    l = LSTMController(max_mb=args.max_mb,
                       patience=args.patience,
                       valid_freq=args.valid_freq,
                       default_args=Controller.default_arguments(args))

    print("Controller is ready")
    return l.serve()
Beispiel #3
0
    def __init__(self, control_port, max_mb, valid_freq):
        """
        Initialize the LSTMController

        Parameters
        ----------
        max_mb : int
            Max number of minibatches to train on.
        valid_freq : int
            Number of minibatches to train on between every monitoring step.
        """
        Controller.__init__(self, control_port)

        self.max_mb = max_mb

        self.valid_freq = valid_freq
        self.nb_mb = 0

        self.valid = False
        self.start_time = None

        config = get_config()

        self.experiment_dir = "{}exp_{}".format(config['plot_output_directory'], time.strftime("%Y-%m-%d_%H-%M-%S"))
        os.mkdir(self.experiment_dir)
Beispiel #4
0
    def __init__(self, control_port, max_mb, patience, validFreq):
        """
        Initialize the LSTMController

        Parameters
        ----------
        max_mb : int
            Max number of minibatches to train on.
        patience: : int
            Training stops when this many minibatches have been trained on
            without any reported improvement.
        validFreq : int
            Number of minibatches to train on between every monitoring step.
        """

        Controller.__init__(self, control_port)
        self.patience = patience
        self.max_mb = int(max_mb)

        self.validFreq = validFreq
        self.uidx = 0
        self.eidx = 0
        self.history_errs = []
        self.bad_counter = 0

        self.valid = False
        self.start_time = None
    def __init__(self, control_port, batch_port, dataset, batch_size):
        Controller.__init__(self, control_port, None)
        # The data socket should be initialized in the process that will handle the batch.
        # That is why it's not initialized in the parent constructor. Second param = None
        self._batch_port = batch_port

        self._start_time = None
        self._should_stop = False

        self._batch_size = batch_size
        self._dataset = dataset

        self._nb_batch_processed = 0
        self._nb_batch_to_process = (dataset.shape[0] // batch_size)
def wavenet_control(saveFreq=1110, saveto=None):
    parser = Controller.default_parser()
    parser.add_argument('--max-mb',
                        default=((5000 * 1998) / 10),
                        type=int,
                        required=False,
                        help='Maximum mini-batches to train upon in total.')

    args = parser.parse_args()

    l = WaveNetController(max_mb=10000,
                          saveFreq=1000,
                          default_args=Controller.default_arguments(args))

    print("Controller is ready")
    return l.serve()
    def __init__(self, control_port, batch_port, dataset, batch_size):
        Controller.__init__(self, control_port, None)
        # The data socket should be initialized in the process that will handle
        # the batch.
        # That is why it's not initialized in the parent constructor. Second
        # param = None
        self._batch_port = batch_port

        self._start_time = None
        self._should_stop = False

        self._batch_size = batch_size
        self._dataset = dataset

        self._nb_batch_processed = 0
        self._nb_batch_to_process = (dataset.shape[0] // batch_size)
Beispiel #8
0
    def __init__(
            self,
            port=CONTROLLER_PORT,
            easgd_alpha=0.5,
            # Following arguments can be received from workers
            start_halving_at=6,
            end_at=10,
            step_len=10,
            valid_freq=1500,
            learning_rate=0.1,
            log_path=None):
        """
        Initialize the controller.

        Args:
            port (int): batches in one training step
            easgd_alpha (float)
        """

        Controller.__init__(self, port)
        self.epoch_start_halving = start_halving_at
        self.end_at = end_at
        self.step_len = step_len
        self.start_time = None
        self.rand = np.random.RandomState(3)
        self.epoch = 0
        self._current_iter = 0
        self._iters_from_last_valid = 0
        self._evaluating = False
        self._valid_freq = valid_freq
        self._done = False
        self._lr = learning_rate
        self._easgd_alpha = easgd_alpha
        self._training_names = []
        self._evaluation_names = []
        self._best_valid_cost = sys.float_info.max
        self._lock = Lock()

        self.num_train_batches = 0
        self.batch_pool = []
        self._train_costs = []
        self._epoch_start_time = None
        self.prepared_worker_pool = set()
        self.log_file = open(log_path, "w") if log_path else None
        if log_path:
            logging.info("write logs into {}".format(log_path))
        logging.info("multi-gpu server is listening port {}".format(port))
def parse_arguments():
    parser = Controller.default_parser()
    parser.add_argument('--batch_port', default=5566, type=int, required=False,
                        help='Port on which the batches will be transfered.')
    parser.add_argument('--batch-size', default=1000, type=int, required=False,
                        help='Size of the batches.')

    return parser.parse_args()
Beispiel #10
0
def lstm_control(saveFreq=1110, saveto=None):
    parser = Controller.default_parser()
    parser.add_argument('--max-mb', default=((5000 * 1998) / 10), type=int,
                        required=False, help='Maximum mini-batches to train upon in total.')
    parser.add_argument('--patience', default=10, type=int,
                        required=False, help='Maximum patience when failing to get better validation results.')
    parser.add_argument('--valid-freq', default=370, type=int,
                        required=False, help='How often in mini-batches prediction function should get validated.')
    args = parser.parse_args()

    l = LSTMController(max_mb=args.max_mb,
                       patience=args.patience,
                       valid_freq=args.valid_freq,
                       default_args=Controller.default_arguments(args))

    print("Controller is ready")
    return l.serve()
Beispiel #11
0
def lstm_control(saveFreq=1110, saveto=None):
    parser = Controller.default_parser()
    parser.add_argument('--seed',
                        default=1234,
                        type=int,
                        required=False,
                        help='Maximum mini-batches to train upon in total.')
    parser.add_argument(
        '--patience',
        default=10,
        type=int,
        required=False,
        help='Maximum patience when failing to get better validation results.')
    args = parser.parse_args()

    l = LSTMController(seed=args.seed,
                       patience=args.patience,
                       default_args=Controller.default_arguments(args))

    print("Controller is ready")
    return l.serve()
    def __init__(self, control_port, max_mb, valid_freq):
        """
        Initialize the LSTMController
        Parameters
        ----------
        max_mb : int
            Max number of minibatches to train on.
        valid_freq : int
            Number of minibatches to train on between every monitoring step.
        """
        Controller.__init__(self, control_port)

        self.max_mb = max_mb

        self.valid_freq = valid_freq
        self.nb_mb = 0

        self.valid = False
        self.start_time = None

        config = get_config()
Beispiel #13
0
    def __init__(self, port=CONTROLLER_PORT, easgd_alpha=0.5,
                 # Following arguments can be received from workers
                 start_halving_at=6, end_at=10, step_len=10,
                 valid_freq=1500, learning_rate=0.1, log_path=None):
        """
        Initialize the controller.

        Args:
            port (int): batches in one training step
            easgd_alpha (float)
        """

        Controller.__init__(self, port)
        self.epoch_start_halving = start_halving_at
        self.end_at = end_at
        self.step_len = step_len
        self.start_time = None
        self.rand = np.random.RandomState(3)
        self.epoch = 0
        self._current_iter = 0
        self._iters_from_last_valid = 0
        self._evaluating = False
        self._valid_freq = valid_freq
        self._done = False
        self._lr = learning_rate
        self._easgd_alpha = easgd_alpha
        self._training_names = []
        self._evaluation_names = []
        self._best_valid_cost = sys.float_info.max
        self._lock = Lock()

        self.num_train_batches = 0
        self.batch_pool = []
        self._train_costs = []
        self._epoch_start_time = None
        self.prepared_worker_pool = set()
        self.log_file = open(log_path, "w") if log_path else None
        if log_path:
            logging.info("write logs into {}".format(log_path))
        logging.info("multi-gpu server is listening port {}".format(port))
Beispiel #14
0
def parse_arguments():
    parser = Controller.default_parser()
    parser.add_argument('--batch_port',
                        default=5566,
                        type=int,
                        required=False,
                        help='Port on which the batches will be transfered.')
    parser.add_argument('--batch-size',
                        default=1000,
                        type=int,
                        required=False,
                        help='Size of the batches.')

    return parser.parse_args()
Beispiel #15
0
 def __init__(self, control_port,max_mb,validFreq):
 	"""
 	Initialize the ConvNetController
 	
 	Parameters
 	----------
 	max_mb : int
         Max number of minibatches to train on.
     validFreq : int
         Number of minibatches to train on between every monitoring step.
     """
     
     Controller.__init__(self,control_port)
     
     self.validFreq = validFreq
     self.max_mb = int(max_mb)
     self.uidx = {}
     self.valid = {}
     self.start_time = None
     self.uepoch = 0
     self.last_uepoch = 0
     self.epoch_time=[]
     self.last = None
     self.last_uidx = 0
def spawn_controller():
    args = parse_arguments()

    mnist_path = "../data/mnist.pkl.gz"

    get_mnist(mnist_path)

    with gzip.open(mnist_path, 'rb') as f:
        kwargs = {}
        if six.PY3:
            kwargs['encoding'] = 'latin1'
        train_set, _, _ = cPickle.load(f, **kwargs)

    controller = BatchedPixelSumController(batch_port=args.batch_port,
                                           dataset=train_set[0],
                                           batch_size=args.batch_size,
                                           default_args=Controller.default_arguments(args))
    controller.start_batch_server()
    return controller.serve()
Beispiel #17
0
def spawn_controller():
    args = parse_arguments()

    mnist_path = "../data/mnist.pkl.gz"

    get_mnist(mnist_path)

    with gzip.open(mnist_path, 'rb') as f:
        kwargs = {}
        if six.PY3:
            kwargs['encoding'] = 'latin1'
        train_set, _, _ = cPickle.load(f, **kwargs)

    controller = BatchedPixelSumController(
        batch_port=args.batch_port,
        dataset=train_set[0],
        batch_size=args.batch_size,
        default_args=Controller.default_arguments(args))
    controller.start_batch_server()
    return controller.serve()
Beispiel #18
0
 def __init__(self, control_port):
     Controller.__init__(self, control_port)
     self.wps = {}
 def __init__(self, control_port):
     Controller.__init__(self, control_port)
     self.wps = {}