示例#1
0
class IPNeuralNetwork(NeuralNetwork):
    def fit(self, training_data, validation_data=None):
        '''
        Override this function to create and destroy workers
        '''

        self._jobs_queue = mp.Queue()
        self._res_queue = MyQueue()
        n_cpus = mp.cpu_count()
        self._processes = [
            Worker(self._jobs_queue, self._res_queue) for i in range(n_cpus)
        ]
        for p in self._processes:
            p.start()

        try:
            super().fit(training_data, validation_data)
        except:
            for p in self._processes:
                p.terminate()
            raise

        for p in self._processes:
            p.terminate()

    def create_batches(self, data, labels, batch_size):
        '''
        Override this function to return batches created by workers
        '''
        n_samples = len(data)

        for d, l in zip(data, labels):
            self._jobs_queue.put((d, l))

        result = []

        while len(result) != n_samples:
            result.append(self._res_queue.get())

        data = np.asarray([r[0] for r in result])
        labels = np.asarray([r[1] for r in result])

        return super().create_batches(data, labels, batch_size)
示例#2
0
class IPNeuralNetwork(NeuralNetwork):
    def __init__(self,
                 sizes=list(),
                 learning_rate=1.0,
                 mini_batch_size=16,
                 number_of_batches=16,
                 epochs=10,
                 matmul=np.matmul):
        super().__init__(sizes, learning_rate, mini_batch_size,
                         number_of_batches, epochs, matmul)

        self.workers = []
        self.jobs = mp.Queue()  # Queue of tuples (image, label)
        self.results = ResultQueue()  # Queue of tuples (image, label)

    def _n_cpus(self):
        if platform.system() == 'Windows':
            return mp.cpu_count(
            )  # Good for tests, but gets wrong number on CDP servers
        m = re.search(r'(?m)^Cpus_allowed:\s*(.*)$',
                      open('/proc/self/status').read())
        num_cpu = bin(int(m.group(1).replace(',', ''), 16)).count('1')
        return num_cpu

    def fit(self, training_data, validation_data=None):
        '''
        Override this function to create and destroy workers
        '''
        #Create Workers and set jobs
        n_workers = self._n_cpus()

        data = training_data[0]
        labels = training_data[1]
        jobs_worker = SetJobsWorker(
            n_workers,
            data,
            labels,
            self.jobs,
            n_jobs=(self.number_of_batches * self.mini_batch_size *
                    self.epochs))
        jobs_worker.start()

        for _ in range(n_workers):
            worker = Worker(self.jobs, self.results)
            worker.start()
            self.workers.append(worker)

        #Call the parent's fit
        super().fit(training_data, validation_data)

        #Stop Workers
        for worker in self.workers:
            worker.join()
        self.workers = []
        jobs_worker.join()

    def create_batches(self, data, labels, batch_size):
        """
         Parameters
         ----------
         data : np.array of input data
         labels : np.array of input labels
         batch_size : int size of batch
    
         Returns
         -------
         list
             list of tuples of (data batch of batch_size, labels batch of batch_size)

        """
        batches_flat = [
        ]  # all augmented data in one list, without splitting into batches
        for k in range(self.number_of_batches * self.mini_batch_size):
            # Stop condition for results queue:
            #  we know that number of results is the same as number of jobs
            #  so here we don't use None-terminated queue like with jobs
            batches_flat.append(self.results.get())

        batches = []
        sz = self.mini_batch_size
        for i in range(self.number_of_batches):
            batch = batches_flat[i * sz:(i + 1) *
                                 sz]  # list of tuples (image, label)
            batches.append(
                (np.array([tup[0] for tup in batch]),
                 np.array([tup[1] for tup in batch]))
            )  # tuple of (data batch of batch_size, labels batch of batch_size)
        return batches