Example #1
0
    def __init__(self, control_port, max_mb, patience, validFreq):
        """
        Initialize the LSTMController

        Parameters
        ----------
        max_mb : int
            Max number of minibatches to train on.
        patience: : int
            Training stops when this many minibatches have been trained on
            without any reported improvement.
        validFreq : int
            Number of minibatches to train on between every monitoring step.
        """

        Controller.__init__(self, control_port)
        self.patience = patience
        self.max_mb = int(max_mb)

        self.validFreq = validFreq
        self.uidx = 0
        self.eidx = 0
        self.history_errs = []
        self.bad_counter = 0

        self.valid = False
        self.start_time = None
Example #2
0
    def __init__(self, control_port, max_mb, patience, validFreq):
        """
        Initialize the LSTMController

        Parameters
        ----------
        max_mb : int
            Max number of minibatches to train on.
        patience: : int
            Training stops when this many minibatches have been trained on
            without any reported improvement.
        validFreq : int
            Number of minibatches to train on between every monitoring step.
        """

        Controller.__init__(self, control_port)
        self.patience = patience
        self.max_mb = int(max_mb)

        self.validFreq = validFreq
        self.uidx = 0
        self.eidx = 0
        self.history_errs = []
        self.bad_counter = 0

        self.valid = False
        self.start_time = None
Example #3
0
    def __init__(self, control_port, max_mb, valid_freq):
        """
        Initialize the LSTMController

        Parameters
        ----------
        max_mb : int
            Max number of minibatches to train on.
        valid_freq : int
            Number of minibatches to train on between every monitoring step.
        """
        Controller.__init__(self, control_port)

        self.max_mb = max_mb

        self.valid_freq = valid_freq
        self.nb_mb = 0

        self.valid = False
        self.start_time = None

        config = get_config()

        self.experiment_dir = "{}exp_{}".format(config['plot_output_directory'], time.strftime("%Y-%m-%d_%H-%M-%S"))
        os.mkdir(self.experiment_dir)
    def __init__(self, control_port, batch_port, dataset, batch_size):
        Controller.__init__(self, control_port, None)
        # The data socket should be initialized in the process that will handle the batch.
        # That is why it's not initialized in the parent constructor. Second param = None
        self._batch_port = batch_port

        self._start_time = None
        self._should_stop = False

        self._batch_size = batch_size
        self._dataset = dataset

        self._nb_batch_processed = 0
        self._nb_batch_to_process = (dataset.shape[0] // batch_size)
    def __init__(self, control_port, batch_port, dataset, batch_size):
        Controller.__init__(self, control_port, None)
        # The data socket should be initialized in the process that will handle
        # the batch.
        # That is why it's not initialized in the parent constructor. Second
        # param = None
        self._batch_port = batch_port

        self._start_time = None
        self._should_stop = False

        self._batch_size = batch_size
        self._dataset = dataset

        self._nb_batch_processed = 0
        self._nb_batch_to_process = (dataset.shape[0] // batch_size)
Example #6
0
    def __init__(
            self,
            port=CONTROLLER_PORT,
            easgd_alpha=0.5,
            # Following arguments can be received from workers
            start_halving_at=6,
            end_at=10,
            step_len=10,
            valid_freq=1500,
            learning_rate=0.1,
            log_path=None):
        """
        Initialize the controller.

        Args:
            port (int): batches in one training step
            easgd_alpha (float)
        """

        Controller.__init__(self, port)
        self.epoch_start_halving = start_halving_at
        self.end_at = end_at
        self.step_len = step_len
        self.start_time = None
        self.rand = np.random.RandomState(3)
        self.epoch = 0
        self._current_iter = 0
        self._iters_from_last_valid = 0
        self._evaluating = False
        self._valid_freq = valid_freq
        self._done = False
        self._lr = learning_rate
        self._easgd_alpha = easgd_alpha
        self._training_names = []
        self._evaluation_names = []
        self._best_valid_cost = sys.float_info.max
        self._lock = Lock()

        self.num_train_batches = 0
        self.batch_pool = []
        self._train_costs = []
        self._epoch_start_time = None
        self.prepared_worker_pool = set()
        self.log_file = open(log_path, "w") if log_path else None
        if log_path:
            logging.info("write logs into {}".format(log_path))
        logging.info("multi-gpu server is listening port {}".format(port))
    def __init__(self, control_port, max_mb, valid_freq):
        """
        Initialize the LSTMController
        Parameters
        ----------
        max_mb : int
            Max number of minibatches to train on.
        valid_freq : int
            Number of minibatches to train on between every monitoring step.
        """
        Controller.__init__(self, control_port)

        self.max_mb = max_mb

        self.valid_freq = valid_freq
        self.nb_mb = 0

        self.valid = False
        self.start_time = None

        config = get_config()
Example #8
0
    def __init__(self, port=CONTROLLER_PORT, easgd_alpha=0.5,
                 # Following arguments can be received from workers
                 start_halving_at=6, end_at=10, step_len=10,
                 valid_freq=1500, learning_rate=0.1, log_path=None):
        """
        Initialize the controller.

        Args:
            port (int): batches in one training step
            easgd_alpha (float)
        """

        Controller.__init__(self, port)
        self.epoch_start_halving = start_halving_at
        self.end_at = end_at
        self.step_len = step_len
        self.start_time = None
        self.rand = np.random.RandomState(3)
        self.epoch = 0
        self._current_iter = 0
        self._iters_from_last_valid = 0
        self._evaluating = False
        self._valid_freq = valid_freq
        self._done = False
        self._lr = learning_rate
        self._easgd_alpha = easgd_alpha
        self._training_names = []
        self._evaluation_names = []
        self._best_valid_cost = sys.float_info.max
        self._lock = Lock()

        self.num_train_batches = 0
        self.batch_pool = []
        self._train_costs = []
        self._epoch_start_time = None
        self.prepared_worker_pool = set()
        self.log_file = open(log_path, "w") if log_path else None
        if log_path:
            logging.info("write logs into {}".format(log_path))
        logging.info("multi-gpu server is listening port {}".format(port))
Example #9
0
 def __init__(self, control_port,max_mb,validFreq):
 	"""
 	Initialize the ConvNetController
 	
 	Parameters
 	----------
 	max_mb : int
         Max number of minibatches to train on.
     validFreq : int
         Number of minibatches to train on between every monitoring step.
     """
     
     Controller.__init__(self,control_port)
     
     self.validFreq = validFreq
     self.max_mb = int(max_mb)
     self.uidx = {}
     self.valid = {}
     self.start_time = None
     self.uepoch = 0
     self.last_uepoch = 0
     self.epoch_time=[]
     self.last = None
     self.last_uidx = 0
Example #10
0
 def __init__(self, control_port):
     Controller.__init__(self, control_port)
     self.wps = {}
Example #11
0
 def __init__(self, control_port):
     Controller.__init__(self, control_port)
     self.wps = {}