def create(email, password, first_name=None, middle_name=None, last_name=None): """ :param first_name: first name of a user :type first_name: str :param middle_name: middle name of a user :type middle_name: str :param last_name: last name of a user :type last_name: str :param email: email of a user :type email: str :param password: password of a user :type password: str :return: a new user object which is also written into the DB """ # print(validate_email("96mail.com")) data = {} data['first_name'] = first_name if first_name else '' data['last_name'] = last_name if last_name else '' data['middle_name'] = middle_name if middle_name else '' data['email'] = email user = CustomUser(**data) user.set_password(password) try: validate_email(user.email) user.save() return user except (IntegrityError, AttributeError, ValidationError, DataError): LOGGER.error("Wrong attributes or relational integrity error") raise ValueError("Some troubles with creating user!")
def hello(request): LOGGER.info('\nWreszcie coś działa') return render( request, template_name='hello.xhtml', context={'adjectives': ['beautiful', 'cruel', 'wonderful']}, )
def get_by_id(calculator_id): try: calculator = Calculator.objects.get(id=calculator_id) return calculator except CustomUser.DoesNotExist: LOGGER.error("User does not exist") return False
def post(self, request, *args, **kwargs): result = super().post(request, *args, **kwargs) # print(request.__dict__) # dict pokazuje szczegóły naszego działania, wywala mnóstwo danych, do których można się dostać # interesuje nas Query Dict, który wyszedł z _post (słownik) title = request._post.get( 'title') # klucz: title, wartość: tytuł filmu LOGGER.info(f'Movie {title} has been added to database.') return result
def delete_by_user_id(user_id): try: tasks = Calculator.results(user_id) for i in tasks: i.delete() return True except Calculator.DoesNotExist: LOGGER.error("Task does not exist") return False
def get_by_id(user_id): """ :param user_id: SERIAL: the id of a user to be found in the DB :return: user object or None if a user with such ID does not exist """ try: user = CustomUser.objects.get(id=user_id) return user except CustomUser.DoesNotExist: LOGGER.error("User does not exist") return False
def create(size, matrix_a, vector_b, e, user): calculator = Calculator(size=size, matrix_a=matrix_a, vector_b=vector_b, e=e, user=user, result=[0 for i in range(size)]) try: calculator.save() return calculator except (IntegrityError, AttributeError, DataError, ValueError) as err: LOGGER.error("Wrong attributes or relational integrity error")
def get_by_email(email): """ Returns user by email :param email: email by which we need to find the user :type email: str :return: user object or None if a user with such ID does not exist """ try: user = CustomUser.objects.get(email=email) return user except CustomUser.DoesNotExist: LOGGER.error("User does not exist") raise ValueError("Incorrect email")
def test_done_callback_raises(self): LOGGER.removeHandler(STDERR_HANDLER) logging_stream = io.StringIO() handler = logging.StreamHandler(logging_stream) LOGGER.addHandler(handler) try: raising_was_called = False fn_was_called = False def raising_fn(callback_future): nonlocal raising_was_called raising_was_called = True raise Exception('doh!') def fn(callback_future): nonlocal fn_was_called fn_was_called = True f = Future() f.add_done_callback(raising_fn) f.add_done_callback(fn) f.set_result(5) self.assertTrue(raising_was_called) self.assertTrue(fn_was_called) self.assertIn('Exception: doh!', logging_stream.getvalue()) finally: LOGGER.removeHandler(handler) LOGGER.addHandler(STDERR_HANDLER)
def delete_by_id(user_id): """ :param user_id: an id of a user to be deleted :type user_id: int :return: True if object existed in the db and was removed or False if it didn't exist """ try: user = CustomUser.objects.get(id=user_id) user.delete() return True except CustomUser.DoesNotExist: LOGGER.error("User does not exist") return False
def set_running_or_notify_cancel(self): """Mark the future as running or process any cancel notifications. Should only be used by Executor implementations and unit tests. If the future has been cancelled (cancel() was called and returned True) then any threads waiting on the future completing (though calls to as_completed() or wait()) are notified and False is returned. If the future was not cancelled then it is put in the running state (future calls to running() will return True) and True is returned. This method should be called by Executor implementations before executing the work associated with this future. If this method returns False then the work should not be executed. Returns: False if the Future was cancelled, True otherwise. Raises: RuntimeError: if this method was already called or if set_result() or set_exception() was called. """ with self._condition: if self._state == CANCELLED: self._state = CANCELLED_AND_NOTIFIED for waiter in self._waiters: waiter.add_cancelled(self) # self._condition.notify_all() is not necessary because # self.cancel() triggers a notification. return False elif self._state == PENDING: self._state = RUNNING return True else: LOGGER.critical('Future %s in unexpected state: %s', id(self), self._state) raise RuntimeError('Future in unexpected state')
def _invoke_callbacks(self): for callback in self._done_callbacks: try: callback(self) except BaseException: LOGGER.exception('exception calling callback for %r', self)
def form_invalid(self, form): LOGGER.warning('Invalid data provided.') return super().form_invalid(form)
def _process_worker(call_queue, result_queue, initializer, initargs, processes_management_lock, timeout, worker_exit_lock, current_depth): """Evaluates calls from call_queue and places the results in result_queue. This worker is run in a separate process. Args: call_queue: A ctx.Queue of _CallItems that will be read and evaluated by the worker. result_queue: A ctx.Queue of _ResultItems that will written to by the worker. initializer: A callable initializer, or None initargs: A tuple of args for the initializer process_management_lock: A ctx.Lock avoiding worker timeout while some workers are being spawned. timeout: maximum time to wait for a new item in the call_queue. If that time is expired, the worker will shutdown. worker_exit_lock: Lock to avoid flagging the executor as broken on workers timeout. current_depth: Nested parallelism level, to avoid infinite spawning. """ if initializer is not None: try: initializer(*initargs) except BaseException: LOGGER.critical('Exception in initializer:', exc_info=True) # The parent will notice that the process stopped and # mark the pool broken return # set the global _CURRENT_DEPTH mechanism to limit recursive call global _CURRENT_DEPTH _CURRENT_DEPTH = current_depth _process_reference_size = None _last_memory_leak_check = None pid = os.getpid() mp.util.debug(f'Worker started with timeout={timeout}') while True: try: call_item = call_queue.get(block=True, timeout=timeout) if call_item is None: mp.util.info("Shutting down worker on sentinel") except queue.Empty: mp.util.info(f"Shutting down worker after timeout {timeout:0.3f}s") if processes_management_lock.acquire(block=False): processes_management_lock.release() call_item = None else: mp.util.info("Could not acquire processes_management_lock") continue except BaseException: previous_tb = traceback.format_exc() try: result_queue.put(_RemoteTraceback(previous_tb)) except BaseException: # If we cannot format correctly the exception, at least print # the traceback. print(previous_tb) mp.util.debug('Exiting with code 1') sys.exit(1) if call_item is None: # Notify queue management thread about clean worker shutdown result_queue.put(pid) with worker_exit_lock: mp.util.debug('Exited cleanly') return try: r = call_item() except BaseException as e: exc = _ExceptionWithTraceback(e) result_queue.put(_ResultItem(call_item.work_id, exception=exc)) else: _sendback_result(result_queue, call_item.work_id, result=r) del r # Free the resource as soon as possible, to avoid holding onto # open files or shared memory that is not needed anymore del call_item if _USE_PSUTIL: if _process_reference_size is None: # Make reference measurement after the first call _process_reference_size = _get_memory_usage(pid, force_gc=True) _last_memory_leak_check = time() continue if time() - _last_memory_leak_check > _MEMORY_LEAK_CHECK_DELAY: mem_usage = _get_memory_usage(pid) _last_memory_leak_check = time() if mem_usage - _process_reference_size < _MAX_MEMORY_LEAK_SIZE: # Memory usage stays within bounds: everything is fine. continue # Check again memory usage; this time take the measurement # after a forced garbage collection to break any reference # cycles. mem_usage = _get_memory_usage(pid, force_gc=True) _last_memory_leak_check = time() if mem_usage - _process_reference_size < _MAX_MEMORY_LEAK_SIZE: # The GC managed to free the memory: everything is fine. continue # The process is leaking memory: let the master process # know that we need to start a new worker. mp.util.info("Memory leak detected: shutting down worker") result_queue.put(pid) with worker_exit_lock: mp.util.debug('Exit due to memory leak') return else: # if psutil is not installed, trigger gc.collect events # regularly to limit potential memory leaks due to reference cycles if ((_last_memory_leak_check is None) or (time() - _last_memory_leak_check > _MEMORY_LEAK_CHECK_DELAY)): gc.collect() _last_memory_leak_check = time()