def __init__(self): # these are defined in subclasses self.connected = False singleton.SingleInstance() self.q1 = None self.q2 = None self.th = None
def tendo_singleton(): from tendo import singleton try: me = singleton.SingleInstance() except singleton.SingleInstanceException as e: raise OSError('Only one instance of db_logger_gui allowed') return me
def main(argv): try: me = singleton.SingleInstance( ) # will sys.exit(-1) if other instance is runnin except singleton.SingleInstanceException: exit(-1) try: args = arguments(argv) sync_wallpapers, reset_wallpapers = generate_wallpapers(args) while True: wallpaper, target = sync_wallpapers() set_wallpaper(wallpaper) try: pause_until(target) except SysTimeModified: """ This error is raised when the clock is at a time earlier than the time it began to pause. This indicates a hard/manual clock change. In order to keep the program running and in defined state, the wallpapers list is reset through 'reset_wallpapers' and gets resynced by sync_wallpapers function. """ reset_wallpapers() except ValueError as e: print(e) __socket.close() return
def run(app): singleton.SingleInstance('crawler') app = spoof_request(app) # noqa login_as_admin(app) # noqa count = 0 while True: try: if 'site-id' in sys.argv: siteid = sys.argv['site-id'] setup_site(app[siteid]) crawl_site(app[siteid]) # noqa else: for oid in app.objectIds(): # noqa obj = app[oid] # noqa if IPloneSiteRoot.providedBy(obj): try: setup_site(obj) obj._p_jar.sync() crawl_site(obj, count % 10 == 0) except Exception: logger.error('Error crawling site %s' % oid, exc_info=True) except KeyError: pass except Exception: logger.error('Error setting up crawling', exc_info=True) logger.info('Waiting to crawl again') time.sleep(10 * 60) count += 1
def main (): #make sure to shown tacskIcon right when use pyinstaller to make exe file you can commont it ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID("BatterayDetection") me = singleton.SingleInstance() app = QtGui.QApplication(sys.argv) splash = QSplashScreen(QPixmap(":/Start/Resource/start.png"), Qt.WindowStaysOnTopHint) splash.setDisabled(True) # Disables widget input events, or when mouse click splash will be hide splash.show() splash.showMessage("Start Application ......", Qt.AlignBottom, Qt.black) app.processEvents() start = time() while time() - start < 1: sleep(0.1) app.processEvents() login = Login() #login.show() splash.finish(login) if (login.exec_() == QtGui.QDialog.Accepted) : main = MainWindow() main.show() sys.exit(app.exec_())
def __init__(self): self.connected = False singleton.SingleInstance() self.q1 = queue.Queue() self.q2 = queue.Queue() self.th = engine_moana(self.q1, self.q2) self.th.start()
def __init__(self, parent = None): # Establish a connection to LabRAD. try: me = singleton.SingleInstance() # will sys.exit(-1) if other instance is running except: print("Multiple instances cannot be running") time.sleep(2) sys.exit(1) try: cxn = labrad.connect() except: print("Please start the LabRAD manager") time.sleep(2) sys.exit(0) try: tele = cxn.telecomm_server except: print("Please start the telecomm server") time.sleep(2) sys.exit(1) self.gui = MGui.MGui() lm3000 = RS232Device("Light Meter 3000", "COM7", baud = 115200, lock_logging_settings = True) lm3000.addButton("Off", 'b0', message = "You are about to turn off the LED.") lm3000.addButton("20%", 'b2') lm3000.addButton("50%", 'b5') lm3000.addButton("80%", 'b8') lm3000.addButton("100%", 'b9') lm3000.addParameter("Light Level", "s", log = False, show = False) lm3000.setYLabel("Light Level") lm3000.addPlot() lm3000.begin() self.gui.addDevice(lm3000) self.nodeTree = MNodeTree.NodeTree() lightMeterNode = MDeviceNode.MDeviceNode(lm3000) self.nodeTree.addNode(lightMeterNode) rawLightOutput = lightMeterNode.getAnchorByName("Light Level") filtLight = lightMeterNode.addAnchor(name = "Filtered Light Level", type = "input", terminate = True) # avg = runningAverage.runningAverage() # avg.setWindowWidth(100) # avgInput = avg.getAnchorByName("data") # avgOutput = avg.getAnchorByName("running avg") spikeFilt = spikeFilter.spikeFilter() # You can set the data of an input anchor when nothing is connected. spikeFilt.getAnchorByName("threshold").setData(50) rawSpikeDataInput = spikeFilt.getAnchorByName("raw_data") deSpikedData = spikeFilt.getAnchorByName("filtered_data") self.nodeTree.connect(rawLightOutput, rawSpikeDataInput) self.nodeTree.connect(deSpikedData, filtLight) self.gui.startGui('Light Meter 3000 GUI', tele)
def handle(self, *args, **options): # Prevent multiple instances. Apparently fcntl.lockf is very useful and does completely nothing. me = singleton.SingleInstance() boards = Board.objects.filter(active=True) # Show progress? if options['progress']: progress = True else: progress = False # Get new data for each board. for board in boards: # Info. processing_start = datetime.datetime.utcnow().replace(tzinfo=utc) update = Update.objects.create( board=board, start=processing_start, used_threads=AppSettings.get('SCRAPER_THREADS_NUMBER')) try: # Actual update. scraper = BoardScraper(board, progress=progress) scraper.update() # Info. update.status = Update.COMPLETED except Exception as e: sys.stderr.write('%s\n' % (e)) finally: # Info. try: if update.status != Update.COMPLETED: update.status = Update.FAILED processing_end = datetime.datetime.utcnow().replace( tzinfo=utc) processing_time = processing_end - processing_start update.end = processing_end update = scraper.stats.add_to_record( update, processing_time) except Exception as e: sys.stderr.write('%s\n' % (e)) finally: update.save() # Everything below is just info. print('%s Board: %s %s' % ( datetime.datetime.now(), board, scraper.stats.get_text(processing_time), ))
def run(app): singleton.SingleInstance('forcedpublishalert') user = app.acl_users.getUser('admin') # noqa newSecurityManager(None, user.__of__(app.acl_users)) # noqa for oid in app.objectIds(): # noqa obj = app[oid] # noqa if IPloneSiteRoot.providedBy(obj): check_site(obj)
def run(app): singleton.SingleInstance('linkreport') user = app.acl_users.getUser('admin') newSecurityManager(None, user.__of__(app.acl_users)) for oid in app.objectIds(): obj = app[oid] if IPloneSiteRoot.providedBy(obj): run_link_report(obj)
def run(app): singleton.SingleInstance('socialcounts') user = app.acl_users.getUser('admin') # noqa newSecurityManager(None, user.__of__(app.acl_users)) # noqa for oid in app.objectIds(): # noqa obj = app[oid] # noqa if IPloneSiteRoot.providedBy(obj): retrieve(obj)
def main(force_reprocess_all=False): # will sys.exit(-1) if other instance is running me = singleton.SingleInstance() wip_soop_path = os.path.join(os.environ['WIP_DIR'], 'AATAMS_SATTAG_DM') lftp_output_path = os.path.join(wip_soop_path, 'zipped') log_filepath = os.path.join(wip_soop_path, 'aatams_sattag_dm.log') logging = IMOSLogging() logger = logging.logging_start(log_filepath) if not os.path.exists(lftp_output_path): os.makedirs(lftp_output_path) lftp_access = { 'ftp_address': os.environ['IMOS_PO_CREDS_AATAMS_FTP_ADDRESS'], 'ftp_subdir': '/', 'ftp_user': os.environ['IMOS_PO_CREDS_AATAMS_FTP_USERNAME'], 'ftp_password': os.environ['IMOS_PO_CREDS_AATAMS_FTP_PASSWORD'], 'ftp_exclude_dir': '', 'lftp_options': '--only-newer --exclude-glob TDR/* --exclude-glob *_ODV.zip', 'output_dir': lftp_output_path } lftp = LFTPSync() logger.info('Download new AATAMS SATTAG DM files') lftp.lftp_sync(lftp_access) # optional function argument to force the reprocess of all ZIP files if force_reprocess_all: manifest_list = list_files_recursively(lftp_output_path, '*.zip') else: manifest_list = lftp.list_new_files_path(check_file_exist=True) fd, manifest_file = mkstemp() with open(manifest_file, 'w') as f: for zip_file in manifest_list: if not(zip_file == []): f.write('%s\n' % zip_file) os.close(fd) os.chmod(manifest_file, 0o664) # since msktemp creates 600 for security # only create manifest file for non empty files if os.stat(manifest_file).st_size > 0: logger.info('ADD manifest to INCOMING_DIR') manifest_file_inco_path = os.path.join(os.environ['INCOMING_DIR'], 'AATAMS', 'AATAMS_SATTAG_DM', 'aatams_sattag_dm_lftp.%s.manifest' % datetime.datetime.utcnow().strftime('%Y%m%d-%H%M%S')) if not os.path.exists(manifest_file_inco_path): shutil.copy(manifest_file, manifest_file_inco_path) else: logger.warning('File already exist in INCOMING_DIR') exit(1) lftp.close() logging.logging_stop()
def run(app): singleton.SingleInstance('archivecontent') app = spoof_request(app) # noqa for oid in app.objectIds(): # noqa obj = app[oid] # noqa if IPloneSiteRoot.providedBy(obj): try: archive(obj) except: logger.error('Could not archive %s' % oid, exc_info=True)
def cli(opacity, time, ntimepoints, simple, debug): """Simple focus animations for tiling window managers.""" params = locals() single_instance_lock = singleton.SingleInstance() log('Initializing with parameters:') log('%s', params) flasher = Flasher(opacity, time, ntimepoints, simple) if debug: log('Flasher attributes: %s', flasher.__dict__) else: flasher.monitor_focus()
def run(app): singleton.SingleInstance('importauditlog') args = get_args() user = app.acl_users.getUser('admin') newSecurityManager(None, user.__of__(app.acl_users)) site = app[args.site_id] setSite(site) doimport(args)
def launch(): freeze_support() check_app_running() me = singleton.SingleInstance() windows_appusermodelid() app = QtGui.QApplication(sys.argv) QtGui.QApplication.setQuitOnLastWindowClosed(False) if check_settings() == False or is_user_info_invalid() == True: waiting = WaitingWindow() else: main = MainWindow() sys.exit(app.exec_())
def run(app): singleton.SingleInstance('exportauditlog') args = get_args() user = app.acl_users.getUser('admin') # noqa: F821 newSecurityManager(None, user.__of__(app.acl_users)) # noqa: F821 if args.site_id is not None: site = app[args.site_id] setSite(site) export(args)
def handle(self, *args, **options): me = singleton.SingleInstance(flavor_id="crondm") try: logger.info("Will run the Dailymotion harvesters.") dailymotionch.run_dailymotion_harvester() except: msg = u"Highest exception for the dailymotion cron. Not good." logger.exception(msg) logger.info("The harvest has end for the DailyMotion harvesters.")
def handle(self, *args, **options): me = singleton.SingleInstance(flavor_id="crontw") try: logger.info("Will run the Twitter harvesters.") twitterch.run_twitter_harvester() except: msg = u"Highest exception for the twitter cron. Not good." logger.exception(msg) logger.info("The harvest has end for the Twitter harvesters.")
def handle(self, *args, **options): me = singleton.SingleInstance(flavor_id="cronyt") try: logger.info("Will run the Youtube harvesters.") youtubech.run_youtube_harvester() except: msg = u"Highest exception for the youtube cron. Not good." logger.exception(msg) logger.info("The harvest has end for the Youtube harvesters.")
def handle(self, *args, **options): me = singleton.SingleInstance(flavor_id="cronfb") try: logger.info("Will run the Facebook harvesters.") facebookch.run_facebook_harvester() except: msg = u"Highest exception for the facebook cron. Not good." logger.exception(msg) logger.info("The harvest has end for the Facebook harvesters.")
def run(app): singleton.SingleInstance('pwexpiry') user = app.acl_users.getUser('admin') # noqa newSecurityManager(None, user.__of__(app.acl_users)) # noqa for oid in app.objectIds(): # noqa obj = app[oid] # noqa if IPloneSiteRoot.providedBy(obj): try: update_password_expiry(obj) except Exception: logger.error('Could not update password expiry data for %s' % oid, exc_info=True)
def run(app): singleton.SingleInstance('cleanusers') user = app.acl_users.getUser('admin') # noqa newSecurityManager(None, user.__of__(app.acl_users)) # noqa for oid in app.objectIds(): # noqa obj = app[oid] # noqa if IPloneSiteRoot.providedBy(obj): try: clean(obj) except Exception: logger.error('Could not clean users %s' % oid, exc_info=True)
def main(argv): try: # ensure single instance process me = tendo_singleton.SingleInstance() # set high priority # os.nice(-20) # os.setpriority(os.PRIO_PROCESS, 0, -20) # import psutil # p = psutil.Process() # p.ionice(0) logging.info("Started") loop = asyncio.get_event_loop() # https://stackoverflow.com/questions/2720319/python-figure-out-local-timezone local_tz_str = datetime.datetime.now( datetime.timezone.utc).astimezone().tzinfo.tzname( datetime.datetime.now()) logging.info("TZ: " + local_tz_str) #scheduler = BackgroundScheduler(timezone=local_tz_str, standalone=False, job_defaults={'misfire_grace_time': 60 * 60}, ) scheduler = BackgroundScheduler( timezone=None, standalone=False, job_defaults={'misfire_grace_time': 60 * 60}, ) scheduler.add_job( set_tv_source, 'cron', minute=0) # Turn the TV on/off, and set the TV source scheduler.add_job( set_tv_source, 'cron', minute=30) # Turn the TV on/off, and set the TV source # Daily reset job # scheduler.add_job(daily_check, 'cron', hour='3', minute='0', args=[scheduler]) # scheduler.add_job(set_daily_jobs, 'cron', hour='0', minute='1', args=[scheduler]) scheduler.start() scheduler.print_jobs() set_hebdaily_jobs(scheduler) # check the times for now logging.debug('waiting...') loop.run_forever() loop.close() scheduler.shutdown(wait=False) except Exception: logging.exception('unhandled exception') except SystemExit: logging.debug('System Exiting')
def handle(self, *args, **options): me = singleton.SingleInstance(flavor_id="cronES") try: logger.info("Will archive data into ElasticSearch") twitterch.esArchive() #facebookch.esArchive() #youtubech.esArchive() except: msg = u"Highest exception for the ElasticSearch cron. Not good." logger.exception(msg) logger.info("The archiving into the ElasticSearch has ended.")
def launch(): freeze_support() check_app_running() me = singleton.SingleInstance() if sys.platform == "win32": windows_appusermodelid() app = QApplication(sys.argv) app.setWindowIcon(QtGui.QIcon(resource_path("icons/windows_icon.ico"))) QApplication.setQuitOnLastWindowClosed(False) if check_settings() == False or is_user_info_invalid() == True: waiting = WaitingWindow() else: main = MainWindow() sys.exit(app.exec_())
def handle(self, *args, **options): me = singleton.SingleInstance(flavor_id="cronyt") m = '%s: Will run the Youtube harvester' % now.strftime('%y-%m-%d_%H:%M') print(m) youtubeLogger.log(m) try: harvestYoutube() except: youtubeLogger.exception('YOUTUBE ROUTINE HAS ENCOUNTERED A TOP-LEVEL ERROR:') finally: print(myEmailTitle[0]) print(myEmailMessage[0]) youtubeLogger.log("The harvest has ended for the Youtube harvesters",showTime=True) if not DEBUG: send_routine_email(myEmailTitle[0], myEmailMessage[0])
def handle(self, *args, **options): me = singleton.SingleInstance(flavor_id="crontw") try: logger.info("Will run the Twitter harvesters.") twitterch.run_twitter_harvester() except: print "Global failure. exception logged in 'twitter.log'" msg = u"Highest exception for the twitter cron. Not good." logger.exception(msg) dLogger.exception('TOP LEVEL ERROR:') logger.info("The harvest has end for the Twitter harvesters." + " " * 200)
def handle(self, *args, **options): me = singleton.SingleInstance(flavor_id="crontw") m = '%s: Will run the Twitter harvester' % now.strftime('%y-%m-%d %H:%M') print(m) twitterLogger.log(m) try: harvestTwitter() except: myEmailMessage[0] = 'TWITTER HARVEST ROUTINE HAS ENCOUNTERED A TOP-LEVEL ERROR:' twitterLogger.exception(myEmailMessage[0]) finally: print(myEmailTitle[0]) print(myEmailMessage[0]) twitterLogger.log("The harvest has ended for the Twitter harvesters",showTime=True) if not DEBUG: send_routine_email(myEmailTitle[0], myEmailMessage[0])
def main(): reload(sys) sys.setdefaultencoding('utf-8') #sys.tracebacklimit=0 # allow only one instance of a script me = singleton.SingleInstance() # change the relative directory os.chdir(os.path.dirname(os.path.realpath(__file__))) # get local ini config config = configparser.ConfigParser() config.read('../resources/config/photo-gallery.conf') config.read('../resources/config/default.conf') output_dir = config['PHOTO GALLERY']['output_dir'] input_dir = config['PHOTO GALLERY']['input_dir'] # connect to mysql db = MySQLdb.connect(host=config['DATABASE']['host'], user=config['DATABASE']['user'], passwd=config['DATABASE']['password'], db=config['DATABASE']['dbname'], cursorclass=cursors.SSCursor) db.set_character_set('utf8') print '<<script start>>' # generate thumbnails for files without thumbnails for root, dirs, files in os.walk(output_dir): for file_id in dirs: file_dir = os.path.join(root, file_id) # find complete name of original file file_original_path = glob.glob(os.path.join( file_dir, 'original.*'))[0] date = os.path.getmtime(file_original_path) try: cursor = db.cursor() cursor.execute( 'UPDATE `file` SET `last_modified` = %s WHERE `file_id` = %s LIMIT 1', (datetime.datetime.fromtimestamp(date).strftime( '%Y-%m-%d %H:%M:%S'), file_id)) db.commit() print 'File#' + file_id + file_original_path + ' date updated to ' + datetime.datetime.fromtimestamp( date).strftime('%Y-%m-%d %H:%M:%S') except: print 'Failed to update File#' + file_id + file_original_path + ' date to ' + datetime.datetime.fromtimestamp( date).strftime('%Y-%m-%d %H:%M:%S') db.close() print '<<script end>>'