def __init__(self, workers_stop, pyro_stop, workers_started, daemon): self.workers_stop = workers_stop self.workers_started = workers_started self.pyro_stop = pyro_stop self.daemon = daemon self.work = None self.log = AppLogger(name='syncdaemon') self.log.info('Pyro service started, ready for commands')
def __init__(self, workers_stop, pyro_stop, workers_started): # , work): super().__init__(name='pyro-object') self.pyro_stop = pyro_stop self.workers_stop = workers_stop self.workers_started = workers_started self.log = AppLogger(name='syncdaemon')
def __init__(self, stopper, workers_started, restart=False): self.log = AppLogger(name='syncdaemon') self.workers = [] self.failed_clubs = [] self.stopper = stopper self.workers_started = workers_started self.club_list = [] self.integration = NifIntegration() self.bound_semaphore = threading.BoundedSemaphore(value=SYNC_CONNECTIONPOOL_SIZE) self.restart = restart
def __init__(self, limit: int = sys.maxsize, starting_page: int = 0, image_size: str = "s"): try: size_arg = ApiClient.__IMG_SIZE_ARGS[image_size] except IndexError: raise ValueError( f"Argument size has to be one from 's' (default), 'm', 'l'! Got '{image_size}' instead." ) if starting_page < 0 or limit < 0: raise ValueError( "Starting page number and limit must not be negative!") self.limit = limit self.starting_page = starting_page self.image_size = size_arg output_dir = Path("images") / "training" / image_size / "sneakers" output_dir.mkdir(exist_ok=True, parents=True) self.output_dir = output_dir self.log = AppLogger("AsyncApiClient") self.session = None self.download_queue = None self.save_queue = None
def __init__(self, module_name): self.module = module_name self.config = Config() self.log = AppLogger(self.config, self.module)
def __init__(self): self.log = AppLogger(name='nif-stream', stdout=False, last_logs=0, restart=True) self.restarts = 0 self.max_restarts = 10 self.token_reset = False self.resume_token = None self.resume_token_path = Path(STREAM_RESUME_TOKEN_FILE) self.resume_token_lock = False self.tz_local = tz.gettz("Europe/Oslo") self.tz_utc = tz.gettz('UTC') # Lungo Api self.api_collections = { 'Person': { 'url': '{}/persons/process'.format(API_URL), 'id': 'id' }, 'Function': { 'url': '{}/functions/process'.format(API_URL), 'id': 'id' }, 'Organization': { 'url': '{}/organizations/process'.format(API_URL), 'id': 'id' }, 'Competence': { 'url': '{}/competences/process'.format(API_URL), 'id': 'id' }, 'License': { 'url': '{}/licenses/process'.format(API_URL), 'id': 'id' }, 'Changes': { 'url': '{}/integration/changes'.format(API_URL), 'id': 'id' }, } # NIF Api # Needs one of the clubs? Using platform user! self.api_license = NifApiIntegration(username=NIF_FEDERATION_USERNAME, password=NIF_FEDERATION_PASSWORD, log_file=STREAM_LOG_FILE, realm=NIF_REALM) self.api_competence = NifApiCompetence( username=NIF_FEDERATION_USERNAME, password=NIF_FEDERATION_PASSWORD, log_file=STREAM_LOG_FILE, realm=NIF_REALM) self.api = NifApiIntegration(username=ACLUBU, password=ACLUBP, log_file=STREAM_LOG_FILE, realm=NIF_REALM) status, hello = self.api._test() if status is not True: self.log.error('[TERMINATING] Problems with NIF authentication') sys.exit(0) # Change stream client = pymongo.MongoClient() self.db = client.ka
# You should have received a copy of the GNU General Public License # along with this program (the LICENSE.md file). If not, see <http://www.gnu.org/licenses/>. # import tkinter as tk # In python2 it's Tkinter from tkinter import font as tkfont, messagebox import datetime import glob from functools import partial from animated_gif_label import AnimatedGIFLabel from configuration import QConfiguration from display_controller import DisplayController from app_logger import AppLogger # Logger init the_app_logger = AppLogger("lumiclock") logger = the_app_logger.getAppLogger() class LumiClockApplication(tk.Frame): """ Main window of the application. Designed to be a singleton. """ def __init__(self, master=None, sensor=None, display=None): tk.Frame.__init__(self, master, bg='black') self._sensor = sensor self._display = display self.last_time = "" self.master = master self.toggle_ampm = True self.fullscreen = False
def __init__(self, club_id, create_delay=1, log_file='integration_user.log'): self.ALPHNUM = ('abcdefghijklmnopqrstuvwxyz' + 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' + '01234567890') self.username = None self.password = None self.nif_user = {} self.user_id = None self.club_created = None self.club_name = None self.log_file = log_file self.log = AppLogger('klubb-{0}'.format(club_id)) self.log.debug('[Integration user]') self.club_id = club_id self.test_client = None api_user = requests.get( '%s/integration/users/?where={"club_id": %s, "_active": true, "_realm": "%s"}&max_results=1000' % (API_URL, self.club_id, NIF_REALM), headers=API_HEADERS) if api_user.status_code == 200: api_users_json = api_user.json() if len(api_users_json['_items']) > 1: # multiple users self.log.error( 'More than one active club in realm {} for club id {}'. format(NIF_REALM, self.club_id)) raise NifIntegrationUserError('More than one active club') elif len(api_users_json['_items']) == 1: # One user only api_user_json = api_users_json['_items'][0] self.username = '******'.format( NIF_CLUB_APP_ID, api_user_json['function_id'], api_user_json['username']) self.password = api_user_json['password'] self.user_id = api_user_json['id'] self.log.debug('Using existing integration user {}'.format( self.username)) if 'club_created' in api_user_json: self.club_created = api_user_json['club_created'] self.club_name = api_user_json['club_name'] else: self.club_created, self.club_name = self._get_club_details( ) if create_delay > 0: if self._time_authentication( create_delay=create_delay) is True: self.log.debug('Authentication ok') else: # @TODO set user _active = False? self.log.error('Failed authentication via Hello') time.sleep(5) elif len(api_users_json['_items'] ) == 0: # No users found but 200 anyway """Not found create user!""" self.log.debug( 'No existing integration user found but http 200, creating...' ) self.club_created, self.club_name = self._get_club_details() if self._create(): self.log.debug( 'Created integration user for club id {}'.format( self.club_id)) if create_delay > 0: self._time_authentication(create_delay=create_delay) else: raise NifIntegrationUserCreateError else: self.log.exception( 'Creation of user for club id {} failed, got {} users and http 200' .format(self.club_id, len(api_users_json['_items']))) elif api_user.status_code == 404: """Not found create user!""" self.log.debug('No existing integration user found, creating...') self.club_created, self.club_name = self._get_club_details() if self._create(): self.log.debug( 'Created integration user for club id {}'.format( self.club_id)) # if create_delay > 0: # self.log('Delaying {}s before proceeding'.format(create_delay)) # time.sleep(create_delay) # Test authentication of user # Only if create_delay > 0 # This will try to loop in intervals until authenticated or timeout if create_delay > 0: self._time_authentication(create_delay=create_delay) else: raise NifIntegrationUserCreateError else: self.log.exception('Unknown error') raise NifIntegrationUserError
import matplotlib.pyplot as plt import numpy as np import torch.nn as nn import torch.optim as optim import torch.utils.data import torchvision.datasets as dset import torchvision.transforms as transforms import torchvision.utils as vutils from app_logger import AppLogger from .nets import Discriminator from .nets import Generator # built with https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html __log = AppLogger(__name__) __image_size = 256 ngpu = torch.cuda.device_count() __uses_cuda = torch.cuda.is_available() and ngpu > 0 device = torch.device("cuda:0" if __uses_cuda else "cpu") __session_start = datetime.now().strftime("%m%d_%H%M") models_dir = Path(__file__).parent.parent / Path("models") / __session_start plots_dir = Path(__file__).parent.parent / Path("plots") / __session_start def start_training(img_root, num_epochs, batch_size=128, learning_rate=2e-4,
''' This class handles the writing of APDM Opal sensor data to files. ''' import csv import settings from app_logger import AppLogger from datetime import datetime logger = AppLogger(__name__) class StreamCsvWriter: def __init__(self): timestamp = datetime.now().strftime("%Y%m%d-%H%M%S") csv_name = 'trial_{0}.csv'.format(timestamp) file = open(csv_name, 'w') self.sensor_data_csv = csv.writer(file) self._write_header() def _write_header(self): column_names = [[ 'computer_unix_time_ms', 'sensor_unix_time_ms', 'device_id', 'accel_x', 'accel_y', 'accel_z', 'gyro_x', 'gyro_y', 'gyro_z', 'mag_x', 'mag_y', 'mag_z' ]] self.write(column_names) def write(self, sensor_data): logger.logger.debug( "Writing sensor data to CSV file: {0}".format(sensor_data))
sys.exit(0) # Signal maps to function living in global ex SIGKILL signal_map = { signal.SIGHUP: shutdown, signal.SIGINT: shutdown, signal.SIGTTIN: shutdown, signal.SIGTTOU: shutdown, signal.SIGTERM: shutdown, signal.SIGTSTP: shutdown, signal.SIGUSR1: shutdown, } if __name__ == '__main__': log = AppLogger(name='streamfixdaemon') log.info('[STARTUP]') log.info('Entering daemon context') with DaemonContext( signal_map=signal_map, detach_process=True, # False for running front stdin=None, stdout=sys.stdout, # None stderr=sys.stderr, # None pidfile=pidfile.PIDLockFile('{}/{}'.format(os.getcwd(), STREAMFIX_PID_FILE)), chroot_directory=None, # Same working_directory='{}/'.format(os.getcwd())): stream = NifStream()
import urllib.request import urllib.parse import urllib.error from html.parser import HTMLParser import ssl import datetime import json import os import os.path from app_logger import AppLogger import sys import threading # Logger init app_logger = AppLogger("smf-extension") logger = app_logger.getAppLogger() # logger.debug("Python system path: %s", sys.path) # Configuration lock dialog_lock = threading.Lock() class QConfiguration: """ Encapsulates Intrinio configuration including credentials. """ auth_user = "" auth_passwd = "" cacerts = "" # Base URL for Intrinio services
def __init__(self, org_id, username, password, created, stopper=False, restart=False, background=False, initial_timedelta=0, overlap_timedelta=0, lock=None, sync_type='changes', sync_interval=NIF_CHANGES_SYNC_INTERVAL, populate_interval=NIF_POPULATE_INTERVAL): self.state = SyncState() # Init thread super().__init__(name='klubb-{0}'.format(org_id)) if sync_type in ['changes', 'license', 'competence', 'federation']: self.sync_type = sync_type else: raise Exception('{} is not a valid sync type'.format(sync_type)) self.id = org_id self.username = username self.started = datetime.now() self.sync_errors = 0 # self.sync_errors_max = 3 # Errors in a row! self.sync_interval = sync_interval # minutes self.populate_interval = populate_interval # days self.initial_timedelta = initial_timedelta self.overlap_timedelta = overlap_timedelta self.messages = 0 # Holds number of successfully processed messages self.stopper = stopper self.background = background self.initial_start = None self.from_to = [None, None] self.sync_started = False self.tz_local = tz.gettz(LOCAL_TIMEZONE) self.tz_utc = tz.gettz('UTC') # Init logger self.log = AppLogger(name='klubb-{0}'.format(org_id), stdout=not background, last_logs=100, restart=restart) # No stopper, started directly check for stream resume token! if self.stopper is False: from pathlib import Path resume_token = Path(STREAM_RESUME_TOKEN_FILE) if resume_token.is_file() is not True: self.log.warning( 'No resume token at {}'.format(STREAM_RESUME_TOKEN_FILE)) self.log.warning( 'Requires stream to have or be running and a valid token file' ) if lock is not None and (isinstance(lock, threading.BoundedSemaphore) or isinstance(lock, threading.Semaphore)): self.lock = lock else: self.lock = FakeSemaphore( ) # Be able to run singlethreaded as well # Lungo REST API self.api_integration_url = '%s/integration/changes' % API_URL # Make a startup log entry self.log.debug('[STARTUP]') self.log.debug('Org_id: {0}'.format(org_id)) self.log.debug('Login: {0}'.format(username)) self.log.debug('Pwd: {0}'.format(password)) self.log.debug('Created: {0}'.format(created)) self.log.debug('Skew: {0} seconds'.format( self.initial_timedelta)) self.log.debug('Sync: {0} minutes'.format(self.sync_interval)) self.log.debug('Populate: {0} hours'.format(self.populate_interval)) self.log.debug('Api url: {0}'.format(self.api_integration_url)) # Created self.org_created = dateutil.parser.parse(created) if self.org_created.tzinfo is None or self.org_created.tzinfo.utcoffset( self.org_created) is None: """self.org_created is naive, no timezone we assume CET""" self.org_created = self.org_created.replace(tzinfo=self.tz_local) self.org_id = org_id try: self.nif = NifApiSynchronization(username, password, realm=NIF_REALM, log_file=SYNC_LOG_FILE, test_login=False) except: self.log.exception( 'Sync client creation for {} failed, terminating'.format( username)) # sys.exit(0) raise Exception('Could not create sync client') # Setup job scheduler if self.background: self.scheduler = BackgroundScheduler() self.log.info('Scheduler: BackgroundScheduler') else: self.scheduler = BlockingScheduler() self.log.info('Scheduler: BlockingScheduler') self.job_misfires = 0 self.scheduler.add_listener(self._job_fire, EVENT_JOB_EXECUTED) self.scheduler.add_listener(self._job_misfire, EVENT_JOB_MISSED) self.job = self.scheduler.add_job(self.sync, 'interval', minutes=self.sync_interval, max_instances=1) self.state.set_state(state='finished')
Shuts down all workers, then restarts them""" # Signal maps to function living in global ex SIGKILL signal_map = { signal.SIGHUP: shutdown_workers, signal.SIGINT: shutdown_workers, signal.SIGTTIN: shutdown_workers, signal.SIGTTOU: shutdown_workers, signal.SIGTERM: shutdown_workers, signal.SIGTSTP: shutdown_workers, signal.SIGUSR1: reboot_workers, } if __name__ == '__main__': log = AppLogger(name='syncdaemon') log.info('[STARTUP]') log.info('** ENV: {} **'.format(NIF_REALM)) log.info('Entering daemon context') with DaemonContext(signal_map=signal_map, detach_process=True, # False for running front stdin=None, stdout=None, # sys.stdout, # None stderr=None, # sys.stderr, # None pidfile=pidfile.PIDLockFile( '{}/{}'.format(os.getcwd(), SYNCDAEMON_PID_FILE)), chroot_directory=None, # Same working_directory='{}/'.format(os.getcwd()) ):