Example #1
0
    def __init__(self):
        cfg = Config()
        opencv_home = cfg.get("face_detection", "opencv_home")
        haarcascade = cfg.get("face_detection", "haarcascade")

        self.haarcascade = cv2.CascadeClassifier('{0}/{1}'.format(opencv_home,
                                                                  haarcascade))
Example #2
0
	def access_token_for_id(cls, id, callback):
		"""Returns the access token for an id, acquiring a new one if necessary."""
		token = Cache.get(cls.auth_cache_key_template % id)
		if token:
			return IOLoop.instance().add_callback(lambda: callback(token))

		# If we don't have an access token cached, see if we have a refresh token
		token = TokenIdMapping.lookup_refresh_token(id)
		if token:
			post_body = urllib.urlencode({
				'client_id': Config.get('oauth', 'client-id'),
				'client_secret': Config.get('oauth', 'client-secret'),
				'refresh_token': token,
				'grant_type': 'refresh_token',
			})
			http_client = AsyncHTTPClient()
			return http_client.fetch(
				'https://accounts.google.com/o/oauth2/token',
				lambda response: cls.on_refresh_complete(response, id, callback),
				method='POST',
				body=post_body,
				request_timeout=20.0,
				connect_timeout=15.0,
			)
		else:
			logging.error("Unable to update access token for %s, no refresh token stored.", id)
			return IOLoop.instance().add_callback(lambda: callback(None))
Example #3
0
def initialize():
	messages = []
	levels = {'NOTSET': logging.NOTSET, 'DEBUG': logging.DEBUG, 'INFO': logging.INFO, 'WARNING': logging.WARNING, 'ERROR': logging.ERROR, 'CRITICAL': logging.CRITICAL}
	loglevel = Config.get("logging", "severity")
	logfile = Config.get("logging", "logfile")

	# If the configfile lists a loglevel that is not valid, assume info.
	if(loglevel not in levels):
		# Since the logger is not yet initialized, add the logging-message to the messagelist, so that we can
		#   log it whenever the logger is initialized.
		messages.append(("LogLevel is not correctly set in the config-file. Assuming INFO", logging.ERROR))
		print "A"
		loglevel = "INFO"
	
	rootlogger = logging.getLogger()
	formatter = logging.Formatter('%(asctime)s: %(name)s: %(levelname)s - %(message)s')
	
	fh = logging.FileHandler(logfile)
	fh.setFormatter(formatter)
	rootlogger.addHandler(fh)
	rootlogger.setLevel(levels[loglevel])
	
	messages.append(("Logger initialized", logging.INFO))
		
	# Now that the logger is initialized, log the messages that appared during the initialization of the module 
	logger = logging.getLogger(__name__)
	for m in messages:
		logger.log(m[1], m[0])
Example #4
0
    def __init__(self, config):
        """
        __init__()
        Purpose:    Constructor for the HootPy class.
        Parameters:    config [type=dictionary]
                        Dictionary containing the configuration parameters for this run.
        """

        self._valid_time = datetime.utcnow()

        for key, value in config.iteritems():
            setattr(self, "_%s" % key, value)

        try:
            meta_filename = config['meta_filename']
        except KeyError:
            meta_filename = "default/meta.hp"

        meta = Config(meta_filename)

        for key, value in meta.iteritems():
            setattr(self, "_%s" % key, value)

        self._var_name_cache = []

        self._sanitizeHootPy()
        self._sanitize()

        return
Example #5
0
    def __init__(self, message, str = None):       
        cfg = Config('chats', message)

        if (str is None):
            self.str = cfg.get('message')
        else:
            self.str = str
        
        self.str = self.str.replace('\\\n', '').replace('\n','\n\n')
        
        self.duration = cfg.get('duration')
        
        self.font = FontManager.getFont(cfg.get('font'))
        self.font.setPointSize(cfg.get('font_size'))
        self.font_color = QColor.fromRgb(*cfg.get('font_color'))
        
        self.image = QImage(cfg.get('image_path'))
        
        p = cfg.get('image_pos')
        self.image_rect = QRect(0.,0.,self.image.width(),self.image.height())
        self.image_rect.moveCenter(QPoint(p[0],p[1]))
        
        self.text_rect = QRect(*cfg.get('text_rect'))
        
        self.has_cursor = True
        self.blink_elapsed = 0.
        self.blink_time = cfg.get('blink_time')

        self.elapsed = 0.
        self.message_sz = len(self.str)
Example #6
0
	def get(self):
		"""Initial request handler for receiving auth code."""

		err = self.request.arguments.get('error', [None])[0]
		if err is not None:
			if err == 'access_denied':
				return self.redirect(self.reverse_url('auth_denied'))
			return self.send_error(500)	

		self.http_client = AsyncHTTPClient()

		code = self.request.arguments.get('code', [None])[0]
		if code is not None:
			self.gplus_auth_code = code
			# OAuth step #2: Receive authorization code, POST it
			# back to Google to get an access token and a refresh token.
			post_body = urllib.urlencode({
				'code': code,
				'client_id': Config.get('oauth', 'client-id'),
				'client_secret': Config.get('oauth', 'client-secret'),
				'redirect_uri': 'http://%s/oauth2callback' % self.request.host,
				'grant_type': 'authorization_code',
			})
			return self.http_client.fetch(
				'https://accounts.google.com/o/oauth2/token',
				self.on_token_request_complete,
				method='POST',
				body=post_body,
				request_timeout=20.0,
				connect_timeout=15.0,
			)
	
		# If we got here, we don't recognize why this endpoint was called.
		self.send_error(501) # 501 Not Implemented
Example #7
0
    def __init__(self, level_number):
        Level.instance = self
        
        self.number = level_number
        
        self.camera = None
        self.controller = None
        
        self.objects = set()
        self.asteroids = set()
        self.shots = set()
        self.planets = set()
        self.portals = set()
        self.particles = set()
        self.missiles = set()
        self.ship = None
	self.models = {}
	self.enemy_shots = set()
	self.enemy_ships = set()
        
        self.has_skybox = False
        
        cfg = Config('levels',str(level_number))
        
        level_name = cfg.get('name')    
        self.load_file(level_name)
        
        skybox = cfg.get('skybox')
        
        if (self.has_skybox):
            self.setup_skybox('resources/'+skybox)
        
        self.first = True
Example #8
0
    def test_config_get_team_for_story_none(self):
        config = Config()

        class FakeStory:
            team = "no-such-team"

        team = config.get_team_for_story(FakeStory)
        self.assertEqual(team["ga_org_id"], "DEFAULT-ID")
Example #9
0
    def test_config_get_team_for_story(self):
        config = Config()

        class FakeStory:
            team = "viz"

        team = config.get_team_for_story(FakeStory)
        self.assertEqual(team["ga_org_id"], "visuals-sample-id")
Example #10
0
 def __init__(self):
     if (Player.instance is None):
         Player.instance = self
     
     cfg = Config('game', 'Player')
     self.max_hp = cfg.get('max_hp')
     self.initial_lifes = cfg.get('initial_lifes')
     
     self.reset()
Example #11
0
 def __init__(self, parent):
     QGLWidget.__init__(self, parent)
     
     cfg = Config('game','OpenGL')
     
     self.fps = cfg.get('fps')
     self.clearColor = cfg.get('clear_color')
     
     self.adjust_widget()
     self.adjust_timer()
Example #12
0
 def __init__(self, message, str = None, pos = None):
     cfg = Config('messages', message)
     
     if (pos is None):
         self.pos = QPoint(*cfg.get('position'))
     else:
         self.pos = QPoint(*pos)
     
     self.velocity = cfg.get('velocity') 
     
     FadeMessage.__init__(self, message, str)
Example #13
0
 def resizeGL(self, width, height):
     QGLWidget.resizeGL(self,width,height)
     
     glViewport(0,0,width,height)
     
     glMatrixMode(GL_PROJECTION)
     glLoadIdentity()
     
     cfg = Config('game','OpenGL')
     fovy = cfg.get('y_field_of_view')
     z_near = cfg.get('z_near')
     z_far = cfg.get('z_far')
     gluPerspective(fovy,float(width)/height,z_near,z_far)
Example #14
0
    def __init__(self, message, str = None):
        cfg = Config('messages', message)

        if (str is None):
            self.str = cfg.get('message')
        else:
            self.str = str
        
        self.duration = cfg.get('duration')
        self.fade_duration = cfg.get('fade_duration')
        
        self.color = QColor.fromRgb(*cfg.get('color'))
        self.alpha_final = self.color.alpha()
        self.color.setAlpha(0)
        
        self.font = FontManager.getFont(cfg.get('font'))
        self.font.setPointSize(cfg.get('font_size'))
        
        self.font_color = QColor.fromRgb(*cfg.get('font_color'))
        self.font_alpha_final = self.font_color.alpha()
        self.font_color.setAlpha(0)
        
        self.elapsed = 0.0        
        self.state = 0
        self.tick_funcs = [self.tick_fade_in, self.tick_message, self.tick_fade_out]
Example #15
0
    def __parse_config(self, config_file):
        from util.config import Config

        config = Config(config_file)
        config.set_general_setting()
        image_target = config.get_setting("image", "target")
        if not image_target:
            print(get_msg(Msg.not_any_image_specified_program_exit))
            sys.exit()
        phrase_target = config.get_setting("phrase", "target")
        import glob

        self.__image_setting += glob.glob(image_target)
        self.__phrase_setting += glob.glob(phrase_target) if phrase_target else []
Example #16
0
 def __init__(self):
     self.__mq_server = None
     self.__data_db = DB(Constants.HIST_DATA_DB_NAME)
     self.__config = Config()
     self.__tick_db = FileDB(self.__config.get_config('persistent', 'hist_tick_dir'))
     self.__trading_strategy = None
     self.__tick_collector = None
Example #17
0
 def from_config(cls, section, interface):     
     cfg = Config('interface', section)
      
     img_path = cfg.get('image_path')
     img_pos = cfg.get('image_pos')
     img_scale = cfg.get('image_scale')
     
     img = QImage('resources/images/'+ img_path)
     img_w, img_h = img.width(), img.height()
     img_rect = QRect(
         img_pos[0], img_pos[1],
         int(img_w*img_scale), int(img_h*img_scale)
     )
     
     view_rect = QRect(*cfg.get('view_rect'))
     
     return cls(img, img_rect, view_rect, interface)
Example #18
0
    def __init__(self, args=None):
        QtCore.QObject.__init__(self)

        self.app = QtGui.QApplication(["FireSim"])
        self.args = args
        self.config = Config("data/config.json")

        self._selected_fixture_strand = 0
        self._selected_fixture_address = 0
        self._selected_fixture_pixels = 0

        self.selected_fixture = None

        self.scene = Scene(os.path.join(self.config.get("scene_root"), self.args.scene) + ".json")
        self.scenecontroller = SceneController(app=self, scene=self.scene)

        QtDeclarative.qmlRegisterType(CanvasWidget, "FireSim", 1, 0, "SimCanvas")
        QtDeclarative.qmlRegisterType(FixtureWidget, "FireSim", 1, 0, "Fixture")

        self.view = QtDeclarative.QDeclarativeView()

        self.view.setWindowTitle("FireSim")
        self.view.setResizeMode(QtDeclarative.QDeclarativeView.SizeRootObjectToView)

        self.view.closeEvent = self.on_close

        self.context = self.view.rootContext()
        self.context.setContextProperty("main", self)

        self.fixture_info_list = []
        self.context.setContextProperty("fixtureInfoModel", self.fixture_info_list)

        self.view.setSource(QtCore.QUrl("ui/qml/FireSimGUI.qml"))

        self.root = self.view.rootObject()
        self.item_frame = self.root.findChild(QtDeclarative.QDeclarativeItem)
        self.canvas = self.root.findChild(CanvasWidget)

        self.scenecontroller.set_canvas(self.canvas)

        cw, ch = self.scenecontroller.scene.get("extents")
        self.canvas.setWidth(cw)
        self.canvas.setHeight(ch)

        self.root.backdrop_showhide_callback.connect(self.on_btn_backdrop_showhide)
        self.root.labels_showhide_callback.connect(self.on_btn_labels_showhide)
        self.root.lock_callback.connect(self.on_btn_lock)

        self.netcontroller = NetController(self)

        self.canvas_timer = QtCore.QTimer(self)
        self.canvas_timer.timeout.connect(self.on_canvas_timer)

        self.view.setFixedSize(max(640, cw + 130), max(480, ch))

        log.info("FireSimGUI Ready.")
        self.view.show()
        self.canvas_timer.start(300)
Example #19
0
def index(request):
    """The default view for the update section."""
    data = {}

    # Create a list over sources.
    data["sources"] = createSourceList()

    # If something is posted:
    if request.POST:
        # Create the form based on the posted data
        data["manualUpdateForm"] = ManualUpdateForm(request.POST, request.FILES)

        # If the form is considered valid:
        if data["manualUpdateForm"].is_valid():
            # Construct some path where we can work.
            workarea = Config.get("storage", "inputFiles")
            create = Config.get("storage", "createIfNotExists")
            filename = os.path.join(workarea, request.FILES["file"].name)

            # Create the working-directories, if needed and wanted.
            if os.path.isdir(workarea) == False and create == "true":
                os.makedirs(workarea)

                # Store the uploaded file.
            upload = open(filename, "wb+")
            for chunk in request.FILES["file"].chunks():
                upload.write(chunk)
            upload.close()

            # Generate a message for the user
            source = Source.objects.get(pk=request.POST["source"])
            if source.locked:
                data["uploadMessage"] = "There is already an update going for this source!"
            else:
                data[
                    "uploadMessage"
                ] = "The ruleset is now uploaded, and the processing of the file is started. This might take a while however, depending on the size of the file."
                # Call the background-update script.
                subprocess.call(["/usr/bin/snowman-manualUpdate", filename, source.name])

                # If nothing is posted, create an empty form
    else:
        data["manualUpdateForm"] = ManualUpdateForm()

    return render(request, "update/index.tpl", data)
Example #20
0
	def get(self):
		redir_uri = "https://accounts.google.com/o/oauth2/auth?%s" % urllib.urlencode({
			'client_id': Config.get('oauth', 'client-id'),
			'redirect_uri': 'http://%s/oauth2callback' % self.request.host,
			'scope': 'https://www.googleapis.com/auth/plus.me', # G+ API
			'response_type': 'code', # server-side webapp
			'access_type': 'offline', # needed to get refresh tokens
			'approval_prompt': 'force', # needed to get refresh tokens
		})
		self.redirect(redir_uri)
Example #21
0
 def __init__(self, level_number, level):
     self.level_number = level_number
     
     self.info = {}
     self.fields = set()
     
     self.level = level
     self.player_state = Player.get_instance()
             
     cfg = Config('interface', 'Settings')
     font_name = cfg.get('field_font')
     font_size = cfg.get('field_font_sz')
     self.field_font = FontManager.getFont(font_name)
     self.field_font.setPointSize(font_size)
     self.field_color = QColor.fromRgb(*cfg.get('field_color'))
     
     for f_name in ConfigManager.getOptions('interface', 'Fields'):
         s = ConfigManager.getVal('interface', 'Fields', f_name)
         s = map(str.strip, s.split('||'))
         
         img = QImage('resources/images/'+s[0])
         img_pos = QPoint(*eval(s[1]))
         info_rect = QRect(*eval(s[2]))
         scale = float(s[3])
         
         if (len(s) >= 5):
             font = QFont(self.field_font)
             font.setPointSize(int(s[4]))
         else:
             font = self.field_font
         
         img_w, img_h = img.width(), img.height()
         img_rect = QRect(
             img_pos.x(), img_pos.y(),
             int(img_w*scale), int(img_h*scale)
         )
         
         self.info[f_name] = ''
         
         self.fields.add(Field(f_name, img, img_rect, info_rect, font))
     
     self.radar = Radar.from_config('E-Radar', self)
     self.missile = GuidedMissile.from_config('GuidedMissile', self)
Example #22
0
	def __init__(self):
		"""Initializes internal data-structure, and makes sure that the folder
		where we are going to store the configurationfiles actually exists."""

		logger = logging.getLogger(__name__)
		self.configlocation = Config.get("configfiles", "location")
		self.configfiles = []
		
		if(os.path.exists(self.configlocation) == False):
			logger.warning("Location for the configfiles does not exist. Creating the folders.")
			os.makedirs(self.configlocation, 0755)
Example #23
0
    def __init__(self, record):
        self._name = record['name']
        self._basename = record['basename']
        # font records usually only have a remote or a local version
        # as the version is eventually compared numerically
        # they are initialized with zero-string
        self._local_version = record.get('local_version', '0')
        self._remote_version = record.get('remote_version', '0')

        # existing font files in repo
        self._otf_files = []
        self._svg_files = [] # svg includes woff files

        # actions to be performed
        self._actions = {}

        # determine files and paths
        self.archive = os.path.join(Config.font_repo(), "{}.zip".format(self._basename))
        self.font_dir = os.path.join(Config.font_repo(), self._basename)
        self.otf_dir = os.path.join(self.font_dir, 'otf')
        self.svg_dir = os.path.join(self.font_dir, 'svg')
Example #24
0
    def __init__(self):
        cfg = Config()
        # set up face detection models
        opencv_home = cfg.get("face_detection", "opencv_home")
        haarcascade = cfg.get("face_detection", "haarcascade")
        cascadePath = "/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml"
        self.faceCascade = cv2.CascadeClassifier('{0}/{1}'.format(opencv_home, haarcascade))

        self.recognizer = cv2.face.createLBPHFaceRecognizer()
        #self.recognizer = cv2.face.createEigenFaceRecognizer()
        #self.recognizer = cv2.face.createFisherFaceRecognizer()

        # the faces and Raspberry Pi locations we'll use
        self.names = ["james", "juanjo", "sayantan", "vineet"]
        self.rasp_names = ["FrontDoor", "Entrance", "Garage"]
        access = cfg.get("aws", "access_key_id")
        secret = cfg.get("aws", "secret_access_key")

        # connect to dynamo
        self.conn = boto.dynamodb2.connect_to_region('us-west-1', aws_access_key_id=access, aws_secret_access_key=secret)
        self.sc = Table('SMARTCAM', connection=self.conn)
Example #25
0
	def requestUpdate(self):
		"""This method contacts the sensor, and asks it to do an update of its ruleset."""
		logger = logging.getLogger(__name__)
		port = int(Config.get("sensor", "port"))
		timeout = int(Config.get("sensor", "pingTimeout"))
		sensor = xmlrpclib.Server("https://%s:%s" % (self.ipAddress, port))
		
		try:
			with Timeout(timeout):
				result = sensor.startUpdate(self.name)
		except Timeout.Timeout:
			logger.warning("Ping to sensor timed out")
			return {'status': False, 'message': "Ping to sensor timed out"}
		except socket.gaierror:
			logger.warning("Could not ping sensor. Address is malformed")
			return {'status': False, 'message': "Could not ping sensor. Address is malformed"}
		except socket.error as e:
			logger.warning("Could not ping sensor. %s" % str(e))
			return {'status': False, 'message': "Could not ping sensor. %s" % str(e)}
		
		return result
Example #26
0
    def _check(self):
        """
        Determine the necessary actions for the font,
        returning a dictionary with boolean values
        """

        # shortcut
        r = self._actions

        # download archive if
        # - font not declared locally
        # - remote is newer
        # - archive is missing locally
        r['download'] = False
        if not Config.local():
            r['download'] = True if (self._local_version == '0' or
                                 self._remote_newer() or
                                 not self._archive_present()) else False

        # extract archive if
        # - it (a new one) was downloaded
        # - the archive is present but not the target font directory
        r['extract'] = True if (r['download'] or
                                (self._archive_present() and not
                                    self._font_dir_present())) else False

        # the font is considered up to date if
        # - it doesn't have to be downloaded or extracted
        # - the font repo matches the links in the installation
        if (r['download'] or r['extract']):
            r['update_links'] = Config.lilypond_font_roots()
        else:
            r['update_links'] = []
            for l in Config.lilypond_font_roots():
                if not self._check_links(l):
                    r['update_links'].append(l)

        self._up_to_date = False if (r['download'] or
            r['extract'] or
            r['update_links']) else True
Example #27
0
def initialize(logDict = None, toConsole=False):
	messages = []
	levels = {'NOTSET': logging.NOTSET, 'DEBUG': logging.DEBUG, 'INFO': logging.INFO, 'WARNING': logging.WARNING, 'ERROR': logging.ERROR, 'CRITICAL': logging.CRITICAL}
	loglevel = Config.get("logging", "severity")
	logfile = Config.get("logging", "logfile")

	# If the configfile lists a loglevel that is not valid, assume info.
	if(loglevel not in levels):
		# Since the logger is not yet initialized, add the logging-message to the messagelist, so that we can
		#   log it whenever the logger is initialized.
		messages.append(("LogLevel is not correctly set in the config-file. Assuming INFO", logging.ERROR))
		loglevel = "INFO"
	
	rootlogger = logging.getLogger()
	formatter = logging.Formatter('%(asctime)s: %(name)s %(lineno)s: %(levelname)s - %(message)s')
	
	# Open the logfile, and set its logging-level
	fh = logging.FileHandler(logfile)
	fh.setFormatter(formatter)
	rootlogger.addHandler(fh)
	rootlogger.setLevel(levels[loglevel])
	
	# Set the django logging-level
	djangologlevel = Config.get("logging", "djangoSeverity")
	djangologger = logging.getLogger("django")
	djangologger.setLevel(levels[djangologlevel])
	
	# If we want to log to console, initialize that handler.
	if(toConsole):
		ch = logging.StreamHandler()
		ch.setLevel(logging.INFO)
		ch.setFormatter(formatter)	
		rootlogger.addHandler(ch)
	
		
	# Now that the logger is initialized, log the messages that appared during the initialization of the module 
	messages.append(("Logger initialized", logging.INFO))
	logger = logging.getLogger(__name__)
	for m in messages:
		logger.log(m[1], m[0])
Example #28
0
	def _initialize():
		"""This method reads the database-configuration from the configuration-files, and
		initializes the connection to the databse. It also makes sure that the db-schema
		is created and present."""

		# Read the configuration from file:
		dbType = Config.get("localdb", "type")
		dbName = Config.get("localdb", "name")
		dbHost = Config.get("localdb", "hostname")
		dbUser = Config.get("localdb", "username")
		dbPass = Config.get("localdb", "password")
		
		# Construct the dbPath string, or rais an exception if the dbtype is unknown.
		if(dbType == "sqlite"):
			dbpath = 'sqlite:///' + dbName
		elif(dbType == "mysql"):
			dbpath = dbType + "://" + dbUser + ":" + dbPass + "@" + dbHost + "/" + dbName
		else:
			raise Exception("DatabaseConfiguration is not correct")
		
		# Create a dbengine, and depending on the configfile maybe turn on the debug.
		if(Config.get("localdb", "debug") == "0"):
			Session.engine = create_engine(dbpath)
		else:
			Session.engine = create_engine(dbpath, echo=True)
		
		# Create a session, and bind it to the engine.
		Session.session = sessionmaker(bind=Session.engine)
		
		# Making sure that the dbSchema is created.
		Base.metadata.create_all(Session.engine)
Example #29
0
	def __init__(self, registerInstance, server_address, logRequests=True):
		"""Secure Documenting XML-RPC server.
		It it very similar to DocXMLRPCServer but it uses HTTPS for transporting XML data.
		"""
		DocXMLRPCServer.__init__(self, server_address, RPCHandler, logRequests)
		self.logRequests = logRequests

		# stuff for doc server
		try: self.set_server_title(registerInstance.title)
		except AttributeError: self.set_server_title('default title')
		try: self.set_server_name(registerInstance.name)
		except AttributeError: self.set_server_name('default name')
		if registerInstance.__doc__: self.set_server_documentation(registerInstance.__doc__)
		else: self.set_server_documentation('default documentation')
		self.register_introspection_functions()

		# init stuff, handle different versions:
		try:
			SimpleXMLRPCServer.SimpleXMLRPCDispatcher.__init__(self)
		except TypeError:
			# An exception is raised in Python 2.5 as the prototype of the __init__
			# method has changed and now has 3 arguments (self, allow_none, encoding)
			SimpleXMLRPCServer.SimpleXMLRPCDispatcher.__init__(self, False, None)
		SocketServer.BaseServer.__init__(self, server_address, RPCHandler)
		self.register_instance(registerInstance) # for some reason, have to register instance down here!

		# SSL socket stuff
		ctx = SSL.Context(SSL.SSLv23_METHOD)
		keyfile = os.path.join(Config.djangoroot, Config.get("xmlrpc-server", "keyfile"))
		certfile = os.path.join(Config.djangoroot, Config.get("xmlrpc-server", "certfile"))
		ctx.use_privatekey_file(keyfile)
		ctx.use_certificate_file(certfile)
		self.socket = SSL.Connection(ctx, socket.socket(self.address_family, self.socket_type))
		self.server_bind()
		self.server_activate()

		# requests count and condition, to allow for keyboard quit via CTL-C
		self.requests = 0
		self.rCondition = Condition()
Example #30
0
 def __init__(self, parent):
     if (GLController.instance is None):
         GLController.instance = self
     
     QGLWidget.__init__(self, parent)
     
     self.painter = QPainter()
     
     cfg = Config('game','OpenGL')
     
     self.clearColor = cfg.get('clear_color')
     
     self.fps = cfg.get('fps')
     self.fps_sum = 0.
     self.fps_count = 0
     self.show_fps = 0.
     self.fps_elapsed = 0.
     
     self.adjust_widget()
     self.adjust_timer()
     
     self.to_hook = None
     self.hooks = {}
class SkimageSegmenter(object):
    """Abstract class for segmenters implemented in skimage.segmentation."""

    __metaclass__ = ABCMeta

    # flag FORCE_OPT highly increases speed of method paint_segment_skimage but performs a flawed painting
    FORCE_OPT = True

    def __init__(self, border_color='Yellow', border_outline='No'):
        """Constructor.

        Parameters
        ----------
        border_color : string
            X11Color name of segment border color.
        border_outline : string
            If 'yes' double the size of segment border.
        """
        self.border_color = Config("Border Color", border_color, 'color')
        self.border_outline = Config("Border Outline", border_outline, str)

        self._segments = None
        self._original_image = None

    def get_list_segments_skimage(self):
        """Return a list with segments after apply segmentation. 
        
        Returns
        -------
        segments : list
            List of segments of segmented image.
        """
        if self._segments is None:
            return []

        # print(np.unique(self._segments))
        return np.unique(self._segments)

    def get_segment_skimage(self,
                            px=0,
                            py=0,
                            idx_segment=None,
                            path_to_mask=None):
        """Return a specified segment using a index or position in image. 
        
        Parameters
        ----------
        px : integer, optional, default = 0
            Segment point inside the image in x-axis.
        py : integer, optional, default = 0
            Segment point inside the image in y-axis.
        idx_segment : integer, optional, default = None
            Index of segment returned by previous call of this method.
            
        Returns
        -------
        segment : opencv 3-channel color image.
            Rectangle encompassing the segment image.
        size_segment : integer
            Number of pixels of segment.
        idx_segment : integer
            Index of segment if found, -1 otherwise.
        run_time : integer
            Running time spent in milliseconds.
        """
        # Check if segmentation was already performed
        if self._segments is None:
            return None, 0, -1, 0

        start_time = TimeUtils.get_time()

        # If idx_segment not passed, get the segment index using the points (px, py)
        if idx_segment is None:
            idx_segment = self._segments[py, px]

        # Create a mask, painting black all pixels outside of segment and white the pixels inside.
        mask_segment = np.zeros(self._original_image.shape[:2], dtype="uint8")
        mask_segment[self._segments == idx_segment] = 255

        minas_mask_segment = mask_segment = np.zeros(
            self._original_image.shape[:2], dtype="uint8")
        minas_mask_segment[self._segments == idx_segment] = 1
        # minas_idx_segment = idx_segment

        txt = np.loadtxt(path_to_mask)
        txt = np.add(txt, minas_mask_segment)
        np.savetxt(path_to_mask, txt, fmt='%d')

        print("Modified mask: ", path_to_mask)

        size_segment = mask_segment[self._segments == idx_segment].size

        segment = self._original_image.copy()
        segment = cv2.bitwise_and(segment, segment, mask=mask_segment)

        # Get the countours around the segment
        contours, _ = cv2.findContours(mask_segment, cv2.RETR_LIST,
                                       cv2.CHAIN_APPROX_SIMPLE)[-2:]

        m = -1
        max_contour = None
        for cnt in contours:
            if (len(cnt) > m):
                m = len(cnt)
                max_contour = cnt

        # Get the rectangle that encompasses the countour
        x, y, w, h = cv2.boundingRect(max_contour)
        segment = segment[y:y + h, x:x + w]

        end_time = TimeUtils.get_time()

        # Return the rectangle that encompasses the countour
        return segment, size_segment, idx_segment, (end_time - start_time)

    def paint_segment_skimage(self,
                              image,
                              color,
                              px=0,
                              py=0,
                              idx_segment=[],
                              border=True,
                              clear=False):
        """Paint a list of segments using a index or position in image.
        
        Parameters
        ----------
        image : opencv 3-channel color image.
            Segmented image.
        color : string
            X11Color name.
        px : integer, optional, default = 0
            Segment point inside the image in x-axis.
        py : integer, optional, default = 0
            Segment point inside the image in y-axis.
        idx_segment : list, optional, default = []
            List of segments.
        border : boolean, optional, default = True
            If true paint the border of segments with default color.
        clear : boolean, optional, default = False
            If true clear previous painting in the image.

        Returns
        -------
        new_image : opencv 3-channel color image.
            Painted image.
        run_time : integer
            Running time spent in milliseconds.
        """
        # Check if segmentation was already performed
        if self._segments is None:
            return image, 0

        start_time = TimeUtils.get_time()

        # If idx_segment not passed, get the segment index using the points (px, py)
        if len(idx_segment) == 0:
            idx_segment = [self._segments[py, px]]
        height, width, channels = self._original_image.shape

        # Create a mask, painting black all pixels outside of segments and white the pixels inside
        mask_segment = np.zeros(self._original_image.shape[:2], dtype="uint8")
        for idx in idx_segment:
            mask_segment[self._segments == idx] = 255
        mask_inv = cv2.bitwise_not(mask_segment)

        # Paint all pixels in original image with choosed color
        class_color = np.zeros((height, width, 3), np.uint8)
        class_color[:, :] = X11Colors.get_color(color)
        if SkimageSegmenter.FORCE_OPT == False:
            colored_image = cv2.addWeighted(self._original_image, 0.7,
                                            class_color, 0.3, 0)
        else:
            colored_image = cv2.addWeighted(image, 0.7, class_color, 0.3, 0)
        colored_image = cv2.bitwise_and(colored_image,
                                        colored_image,
                                        mask=mask_segment)

        # Create a new image keeping the painting only in pixels inside of segments
        new_image = image if clear == False else self._original_image
        new_image = cv2.bitwise_and(new_image, new_image, mask=mask_inv)
        mask_segment[:] = 255
        new_image = cv2.bitwise_or(new_image, colored_image, mask=mask_segment)

        # If border is true, paint the segments borders
        if border == True and SkimageSegmenter.FORCE_OPT == False:
            color = X11Colors.get_color_zero_one(
                self.border_color.get_cast_val())
            outline_color = color if self.border_outline.value == 'Yes' else None

            new_image = img_as_ubyte(
                mark_boundaries(img_as_float(new_image),
                                self._segments.astype(np.int8),
                                color=color,
                                outline_color=outline_color))

        end_time = TimeUtils.get_time()

        # Return painted image
        return new_image, (end_time - start_time)

    def run_skimage(self, image, method, **kwargs):
        """Perform the segmentation 
        
        Parameters
        ----------
        image : opencv 3-channel color image.
            Original image.
        method : function
            Method from skyimage that performs the image segmentation.
        **kwargs : keyword arguments
            Dict of the keyword args passed to the function.
    
        Returns
        -------
        new_image : opencv 3-channel color image.
            Segmented image.
        run_time : integer
            Running time spent in milliseconds.
        """
        self._original_image = image

        # Run the segmentation using the method passed
        start_time = TimeUtils.get_time()
        self._segments = method(img_as_float(image), **kwargs)
        end_time = TimeUtils.get_time()

        color = X11Colors.get_color_zero_one(self.border_color.get_cast_val())
        outline_color = color if self.border_outline.value == 'Yes' else None

        #  Ignore UserWarning: Possible precision loss when converting from float64 to uint8
        #  because the returned image is used just for visualization
        #  The original image, without loss, is stored in self._original_image
        return img_as_ubyte(
            mark_boundaries(image,
                            self._segments.astype(np.int8),
                            color=color,
                            outline_color=outline_color)), (end_time -
                                                            start_time)

    def reset_skimage(self):
        """Clean all data of segmentation. 
        """
        self._segments = None
        self._original_image = None
Example #32
0
    def clear_cookie(cls):
        if cls.session is not None:
            cls.session.cookies.clear()

        with open(Config.get(cls.KEY_COOKIE_LOCATION), "w") as f:
            f.write("")
Example #33
0
"""
sunportal is a web based visualisation tool to display data of SMA solar inverters.
It is based on the database of SBFspot and shows charts daily and monthly power
production. It runs on a Raspberry Pi and can handle multiple inverters in one Speedwire
or Bluetooth(R) network.
"""

import flask
from flask import Flask, render_template, request, jsonify
from flask_expects_json import expects_json

from util.config import Config
from util.database import Database
from util.mail import Mail

config = Config()
db = Database(config=config)
mail = Mail(config, db)
app = Flask(__name__)

schema = {
    'type': 'object',
    'properties': {
        'date': {
            'type': 'string',
            "pattern": r'^([12]\d{3}-(0[1-9]|1[0-2])-(0[1-9]|[12]\d|3[01]))$'
        }
    },
    'required': ['date']
}
Example #34
0
    if size_med > 0:
        t_order = scipy.signal.medfilt(t_order, 3)
    
    pred_acc = np.copy(pred)
    pred_acc[np.asarray(t_order, dtype=np.bool)] = 1 - pred_acc[np.asarray(t_order, dtype=np.bool)]
    pred_acc = pred_acc * mask
    
    
    
    return pred_acc
    
#%%
if __name__ == "__main__":
    
    
    config = Config('../model/config_03.json')
    fs = config.get("fs")
    
    data_type = "test"
    instr_mix = config.get("instr_mix")
    path_feat = config.get("path_feat")
    path_model = config.get("path_model")
    path_result = config.get("path_result")
    
    path_h5_aggr = os.path.join(path_feat, data_type)
    path_feat = os.path.join(path_feat, data_type, instr_mix)
    path_result = os.path.join(path_result, os.path.basename(path_model))
    
    path_result = path_result + "_adjust_med55"
    
    if not os.path.exists(path_result):
Example #35
0
    def __init__(
        self,
        config=None,
        name=None,
        n_epochs=None,
        seed=None,
        base_dir=None,
        globs=None,
        parse_config_sys_argv=True,
        checkpoint_to_cpu=True,
        exp_ID='000',
    ):

        # super(PytorchExperiment, self).__init__()
        # Experiment.__init__(self)
        self.exp_ID = exp_ID

        self._epoch_idx = 0

        self._config_raw = None
        if isinstance(config, str):
            self._config_raw = Config(file_=config,
                                      update_from_argv=parse_config_sys_argv)
        elif isinstance(config, Config):
            self._config_raw = Config(config=config,
                                      update_from_argv=parse_config_sys_argv)
        elif isinstance(config, dict):
            self._config_raw = Config(config=config,
                                      update_from_argv=parse_config_sys_argv)
        else:
            self._config_raw = Config(update_from_argv=parse_config_sys_argv)

        self.n_epochs = n_epochs
        if 'n_epochs' in self._config_raw:
            self.n_epochs = self._config_raw["n_epochs"]
        if self.n_epochs is None:
            self.n_epochs = 0

        self._seed = seed
        if 'seed' in self._config_raw:
            self._seed = self._config_raw.seed
        if self._seed is None:
            random_data = os.urandom(4)
            seed = int.from_bytes(random_data, byteorder="big")
            self._config_raw.seed = seed
            self._seed = seed

        self.exp_name = name
        if 'name' in self._config_raw:
            self.exp_name = self._config_raw["name"]

        if 'base_dir' in self._config_raw:
            base_dir = self._config_raw["base_dir"]

        self.base_dir = os.path.join(
            base_dir,
            exp_ID + '_' + str(config.cross_vali_index) + "_" + name +
            time.strftime("_%y%m%d_%H%M%S", time.localtime(time.time())))
        if not os.path.exists(self.base_dir):
            os.makedirs(self.base_dir)
        self.checkpoint_dir = os.path.join(self.base_dir, 'model')
        if not os.path.exists(self.checkpoint_dir):
            os.makedirs(self.checkpoint_dir)
        self.code_dir = os.path.join(self.base_dir, 'code')
        if not os.path.exists(self.code_dir):
            os.makedirs(self.code_dir)

        self.elog = PytorchExperimentLogger(self.base_dir)

        self._checkpoint_to_cpu = checkpoint_to_cpu
        self.results = dict()

        set_seed(self._seed)

        # self.elog.save_config(self.config, "config_pre")
        #if globs is not None: # comment out by Yukun due to error in new CRC environment
        #    zip_name = os.path.join(self.code_dir, "sources.zip")
        #    SourcePacker.zip_sources(globs, zip_name)

        # Init objects in config
        self.config = Config.init_objects(self._config_raw)

        atexit.register(self.at_exit_func)
Example #36
0
    if status == 'train':
        emb = args.wordemb.lower()
        print('Word Embedding: ', emb)
        if emb == 'wiki':
            emb_file = 'data/embedding/wiki_100.utf8'
        elif emb == 'zhwiki':
            emb_file = 'data/embedding/zhwiki_100.txt'
        elif emb == 'sogou':
            emb_file = 'data/embedding/sogou_news_100.txt'
        else:
            emb_file = None
        # char_emb_file = args.charemb.lower()
        # print('Char Embedding: ', char_emb_file)

        name = 'BaseLSTM'  # catnlp
        config = Config()
        config.optim = 'Adam'
        config.lr = 0.015
        config.hidden_dim = 200
        config.bid_flag = True
        config.number_normalized = False
        data_initialization(config, train_file, test_file)
        config.gpu = gpu
        config.word_features = name
        print('Word features: ', config.word_features)
        config.generate_instance(train_file, 'train')
        # config.generate_instance(dev_file, 'dev')
        config.generate_instance(test_file, 'test')
        if emb_file:
            print('load word emb file...norm: ', config.norm_word_emb)
            config.build_word_pretain_emb(emb_file)
Example #37
0
 def parse_info(cls):
     if len(cls.disk_name_map) == 0:
         all_data = Config.get("disk_info")
         for data in all_data:
             cls.disk_name_map[data["name"]] = data
             cls.download_folder_map[data["location"]] = data
from util.util import load_consistent_state_dict
from models.vgg import vgg16_bn

from evaluation import metrics
from PIL import Image
import pickle as pkl
import numpy as np
import logging
import time
import sys
import os

# logger.info => print

# python train inpaint.yml
config = Config(sys.argv[1])
logger = logging.getLogger(__name__)
time_stamp = time.strftime('%Y%m%d%H%M', time.localtime(time.time()))
log_dir = 'model_logs/test_{}_{}'.format(time_stamp, config.LOG_DIR)
result_dir = 'result_logs/{}'.format(
    config.MODEL_RESTORE[:config.MODEL_RESTORE.find('/')])
#tensorboardlogger = TensorBoardLogger(log_dir)
cuda0 = torch.device('cuda:{}'.format(config.GPU_IDS[0]))
cuda1 = torch.device('cuda:{}'.format(config.GPU_IDS[0]))
cpu0 = torch.device('cpu')
TRAIN_SIZES = ((64, 64), (128, 128), (256, 256))
SIZES_TAGS = ("64x64", "128x128", "256x256")


def logger_init():
    """
Example #39
0
 def alert(self, data):
     threshold = Config.get("putao_mp_threshold")
     if data <= threshold:
         EmailSender.send("魔力值警告: %s <= %s" % (str(data), threshold), "")
Example #40
0
def get_dependency_map() -> Dict[str, DependencySpec]:
    return {
        'draft':
        DependencySpec(expected_version=DRAFT_MIN_VERSION,
                       version_command=call_draft,
                       version_command_args=[
                           'version', '--tiller-namespace',
                           NAMESPACE_PLACEHOLDER
                       ],
                       version_field='SemVer',
                       match_exact_version=False),
        'kubectl':
        DependencySpec(expected_version=KUBECTL_MIN_VERSION,
                       version_command=execute_system_command,
                       version_command_args=['kubectl', 'version', '--client'],
                       version_field='GitVersion',
                       match_exact_version=False),
        'kubectl server':
        DependencySpec(expected_version=KUBECTL_SERVER_MIN_VERSION,
                       version_command=execute_system_command,
                       version_command_args=['kubectl', 'version', '--short'],
                       version_field='Server Version',
                       match_exact_version=False),
        'helm client':
        DependencySpec(expected_version=HELM_VERSION,
                       version_command=execute_system_command,
                       version_command_args=[
                           os.path.join(Config().config_path, 'helm'),
                           'version', '--client'
                       ],
                       version_field='SemVer',
                       match_exact_version=True),
        'helm server':
        DependencySpec(expected_version=HELM_VERSION,
                       version_command=execute_system_command,
                       version_command_args=[
                           os.path.join(Config().config_path,
                                        'helm'), 'version', '--server',
                           '--debug', '--tiller-connection-timeout',
                           f'{HELM_SERVER_CONNECTION_TIMEOUT}',
                           '--tiller-namespace', NAMESPACE_PLACEHOLDER
                       ],
                       version_field='SemVer',
                       match_exact_version=True),
        'docker client':
        DependencySpec(expected_version=DOCKER_MIN_VERSION,
                       version_command=execute_system_command,
                       version_command_args=[
                           'docker', 'version', '-f', '{{.Client.Version}}'
                       ],
                       version_field=None,
                       match_exact_version=False),
        'docker server':
        DependencySpec(expected_version=DOCKER_MIN_VERSION,
                       version_command=execute_system_command,
                       version_command_args=[
                           'docker', 'version', '-f', '{{.Server.Version}}'
                       ],
                       version_field=None,
                       match_exact_version=False),
    }
Example #41
0
def main(_):
    import argparse

    parser = argparse.ArgumentParser()
    parser.add_argument('--mode',
                        type=str,
                        required=True,
                        choices=('train', 'interactive', 'test'),
                        help='work mode')
    parser.add_argument('--model_dir',
                        type=str,
                        required=True,
                        help='model directory')

    parser.add_argument(
        '--config',
        type=str,
        help='config file containing parameters to configure the model')

    parser.add_argument('--pretrain_data', type=str, help='pretrain dataset')
    parser.add_argument('--train_data', type=str, help='training dataset')
    parser.add_argument('--dev_data', type=str, help='development dataset')
    parser.add_argument('--test_data', type=str, help='test dataset')

    parser.add_argument('--embed_conf',
                        type=str,
                        default="conf/word_embeddings.yml",
                        help='embedding config file')
    parser.add_argument(
        '--enable_epoch_evals',
        action='store_true',
        help='enable evals after finishing an apoch during training')
    parser.add_argument('--enable_final_eval',
                        action='store_true',
                        help='enable the last eval once training finished')
    parser.add_argument(
        '--disable_encoder_var_sharing',
        action='store_true',
        help='disable encoders sharing variables to support testing old models'
    )

    parser.add_argument('--num_gpus',
                        type=int,
                        default=4,
                        help='number of GPUs to use')
    parser.add_argument('--n_responses',
                        type=int,
                        default=1,
                        help='number of generated responses')
    parser.add_argument('--beam_width',
                        type=int,
                        help='beam width to override the value in config file')
    parser.add_argument(
        '--length_penalty_weight',
        type=float,
        help='length penalty to override the value in config file')
    parser.add_argument(
        '--sampling_temperature',
        type=float,
        help='sampling temperature to override the value in config file')
    parser.add_argument(
        '--lda_model_dir',
        type=str,
        help=
        'required only for testing with topical models (THRED and TA-Seq2Seq)')

    args = vars(parser.parse_args())
    config = Config(**args)

    model = model_factory.create_model(config)

    if config.mode == 'train':
        model.train()
    elif config.mode == 'interactive':
        model.interactive()
    elif config.mode == 'test':
        model.test()
Example #42
0
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu May 10 19:39:24 2018

@author: bochen
"""
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))


from util.config import Config
from util.logger import Logger
from trainer import Trainer
#%%
if __name__ == '__main__':
    config = Config("../model/config_001.json")
    config_idx = config.get("config_idx")
    filename_log = "../model/model_" + config_idx + "/log.txt"
    logger = Logger(filename_log)
    config.set_logger(logger)

    t = Trainer(config, logger)
    t.run()
Example #43
0
class Felzenszwalb(Segmenter, SkimageSegmenter):
    """Run Felzenszwalb's method segmentation."""
    def __init__(self,
                 scale=100.0,
                 sigma=1.0,
                 min_size=20,
                 border_color='Yellow',
                 border_outline='No'):
        """Constructor.

        Parameters
        ----------
        scale : integer, float, default = 100.0
            Free parameter. Higher means larger clusters.
        sigma : float, optional, default = 1.0
            Width of Gaussian kernel used in preprocessing.
        min_size : integer, optional, default = 20
            Minimum component size. Enforced using postprocessing.
        border_color : string
            X11Color name of segment border color.
        border_outline : string
            If 'yes' double the size of segment border.
        """
        super(self.__class__, self).__init__(border_color, border_outline)

        self.scale = Config("Scale", scale, float)
        self.sigma = Config("Sigma", sigma, float)
        self.min_size = Config("Min Size", min_size, int)

    def get_config(self):
        """Return configuration of segmenter. 
        
        Returns
        -------
        config : OrderedDict
            Current configs of segmenter.
        """
        felzenszwalb_config = OrderedDict()

        felzenszwalb_config["scale"] = self.scale
        felzenszwalb_config["sigma"] = self.sigma
        felzenszwalb_config["min_size"] = self.min_size
        felzenszwalb_config["border_color"] = self.border_color
        felzenszwalb_config["border_outline"] = self.border_outline

        return felzenszwalb_config

    def set_config(self, configs):
        """Update configuration of segmenter. 
        
        Parameters
        ----------
        configs : OrderedDict
            New configs of segmenter.
        """
        self.scale = Config.nvl_config(configs["scale"], self.scale)
        self.sigma = Config.nvl_config(configs["sigma"], self.sigma)
        self.min_size = Config.nvl_config(configs["min_size"], self.min_size)
        self.border_color = Config.nvl_config(configs["border_color"],
                                              self.border_color)
        self.border_outline = Config.nvl_config(configs["border_outline"],
                                                self.border_outline)

        self.border_outline.value = self.border_outline.value if self.border_outline.value == 'Yes' else 'No'

    def get_summary_config(self):
        """Return fomatted summary of configuration. 
        
        Returns
        -------
        summary : string
            Formatted string with summary of configuration.
        """
        felzenszwalb_config = OrderedDict()

        felzenszwalb_config[self.scale.label] = self.scale.value
        felzenszwalb_config[self.sigma.label] = self.sigma.value
        felzenszwalb_config[self.min_size.label] = self.min_size.value
        felzenszwalb_config[self.border_color.label] = self.border_color.value
        felzenszwalb_config[
            self.border_outline.label] = self.border_outline.value

        summary = ''
        for config in felzenszwalb_config:
            summary += "%s: %s\n" % (config, str(felzenszwalb_config[config]))

        return summary

    def get_list_segments(self):
        """Return a list with segments after apply segmentation. 
        
        Returns
        -------
        segments : list
            List of segments of segmented image.
        """
        return self.get_list_segments_skimage()

    def get_segment(self, px=0, py=0, idx_segment=None, path_to_mask=None):
        """Return a specified segment using a index or position in image. 
        
        Parameters
        ----------
        px : integer, optional, default = 0
            Segment point inside the image in x-axis.
        py : integer, optional, default = 0
            Segment point inside the image in y-axis.
        idx_segment : integer, optional, default = None
            Index of segment returned by previous call of this method.
            
        Returns
        -------
        segment : opencv 3-channel color image.
            Rectangle encompassing the segment image.
        size_segment : integer
            Number of pixels of segment.
        idx_segment : integer
            Index of segment if found, -1 otherwise.
        run_time : integer
            Running time spent in milliseconds.
        """
        return self.get_segment_skimage(px, py, idx_segment, path_to_mask)

    def paint_segment(self,
                      image,
                      color,
                      px=0,
                      py=0,
                      idx_segment=[],
                      border=True,
                      clear=False):
        """Paint a list of segments using a index or position in image.
        
        Parameters
        ----------
        image : opencv 3-channel color image.
            Segmented image.
        color : string
            X11Color name.
        px : integer, optional, default = 0
            Segment point inside the image in x-axis.
        py : integer, optional, default = 0
            Segment point inside the image in y-axis.
        idx_segment : list, optional, default = []
            List of segments.
        border : boolean, optional, default = True
            If true paint the border of segments with default color.
        clear : boolean, optional, default = False
            If true clear previous painting in the image.

        Returns
        -------
        new_image : opencv 3-channel color image.
            Painted image.
        run_time : integer
            Running time spent in milliseconds.
        """
        return self.paint_segment_skimage(image, color, px, py, idx_segment,
                                          border, clear)

    def run(self, image):
        """Perform the segmentation 
        
        Parameters
        ----------
        image : opencv 3-channel color image.
            Original image.
            
        Returns
        -------
        new_image : opencv 3-channel color image.
            Segmented image.
        run_time : integer
            Running time spent in milliseconds.
        """
        args = {
            "scale": self.scale.get_cast_val(),
            "sigma": self.sigma.get_cast_val(),
            "min_size": self.min_size.get_cast_val()
        }

        return self.run_skimage(image, felzenszwalb, **args)

    def reset(self):
        """Clean all data of segmentation. 
        """
        return self.reset_skimage()
Example #44
0
BASE_DIR = os.path.dirname(os.path.dirname(__file__))

# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/

# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'q6b4ktkfvx6*v1p2fzfdxu2r=ud!y*=&7pr*$7h+=pq5o@gjh$'

# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False

TEMPLATE_DEBUG = False
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'web/templates')]

ALLOWED_HOSTS = []
for key, name in Config.items("hostnames"):
    ALLOWED_HOSTS.append(name)

# Application definition

INSTALLED_APPS = (
    'django.contrib.admin',
    'django.contrib.auth',
    'django.contrib.contenttypes',
    'django.contrib.sessions',
    'django.contrib.messages',
    'django.contrib.staticfiles',
    'core',
    'update',
    'tuning',
    'web',
                                       'ro',
                                       markersize=4)
        disapproval_predicted, = plt.plot(axis_vals,
                                          predict_disapproval,
                                          'bo',
                                          markersize=4)
        plt.legend([disapproval_actual, approval_predicted],
                   ["Actual", "Predicted"],
                   loc=2,
                   borderaxespad=0.)
        plt.ylabel('Disapproval percentage')
        axes = plt.gca()
        axes.set_ylim([20, 70])

        plt.show()
        config_params = [
            "CORPUS_NAME", "POLL_DELAY", "DAY_RANGE",
            "SENTIMENT_ANALYSIS_METHOD", "TOPIC_EXTRACTION_METHOD",
            "NUM_TOPICS", "REGRESSION_MODEL", "NUM_LAYERS", "YEARS"
        ]
        plt.savefig(
            os.path.join(Config.PLOT_DIR,
                         (Config.dump_config(config_params) + ".png")))
        pickle.dump(
            k_fold_scores,
            open(
                os.path.join(
                    Config.PLOT_DIR,
                    Config.dump_config(config_params) +
                    "_k_fold_scores_negmse.txt"), "wb"))
Example #46
0
                    help='Use the specified configuration file instead ' +
                    'of the default config.ini')
parser.add_argument('-d',
                    '--debug',
                    help='Enables debugging logs.',
                    action='store_true')
parser.add_argument('-l',
                    '--legacy',
                    help='Enables sed usage.',
                    action='store_true')
args = parser.parse_args()

# check args, and if none provided, load default config
if args:
    if args.config:
        config = Config(args.config)
    else:
        config = Config('config.ini')
    if args.debug:
        Logger.log_info("Enabled debugging.")
        Logger.enable_debugging(Logger)
    if args.legacy:
        Logger.log_info("Enabled sed usage.")
        Adb.enable_legacy(Adb)

script = ALAuto(config)
script.run_update_check()

Adb.service = config.network['service']
Adb.device = '-d' if (Adb.service == 'PHONE') else '-e'
adb = Adb()
Example #47
0
#!/usr/bin/env python3
from pprint import pprint
from util.config import Config
from protoapi.moduleapi2 import ModuleApi2

#indb1 = 'ng4'; inrange1 = [8, 3997625, 8606957]
#indb2 = 'ng4'; inrange2 = [9, 3997624, 8606957]
db1 = Config.get_database('ng4')
db2 = Config.get_database('ng4')

# get net instance
mod = ModuleApi2()

# setup database connection
mod.connect("", db1, 5, db2, 6)

mod.consume(1069, 8606957, 1069, 8606957)

#mod.connect("", db1, 8, db2, 9)
#mod.consume(3997625, 8606957,
#            3997624, 8606957)
#
#mod.consume(3997625, 4100000,
#            3997624, 4100000)

#mod.consume(4100001, 4500000,
#            4100001, 4500000)

mod.run_evaluation(1)
mod.run_evaluation(2)
Example #48
0
def upload_experiment_to_git_repo_manager(username: str, experiment_name: str,
                                          experiments_workdir: str,
                                          run_name: str):
    git_repo_dir = f'.nauta-git-{username}-{compute_hash_of_k8s_env_address()}'
    git_work_dir = os.path.join(experiments_workdir, run_name)

    try:
        create_gitignore_file_for_experiments(git_work_dir)
        private_key_path = get_git_private_key_path(
            username=username, config_dir=Config().config_path)
        git_env = {
            'GIT_SSH_COMMAND':
            f'ssh -o StrictHostKeyChecking=no -i "{private_key_path}"',
            'GIT_DIR': os.path.join(experiments_workdir, git_repo_dir),
            'GIT_WORK_TREE': git_work_dir,
            'GIT_TERMINAL_PROMPT': '0',
            'SSH_AUTH_SOCK':
            '',  # Unset SSH_AUTH_SOCK to prevent issues when multiple users are using same nctl
        }
        env = {
            **os.environ,
            **git_env
        }  # Add git_env defined above to currently set environment variables
        if 'LD_LIBRARY_PATH' in env:
            # do not copy LD_LIBRARY_PATH to git exec env - it points to libraries packed by PyInstaller
            # and they can be incompatible with system's git (e.g. libssl)
            del env['LD_LIBRARY_PATH']
        logger.debug(f'Git client env: {env}')
        git = ExternalCliClient(executable='git',
                                env=env,
                                cwd=experiments_workdir,
                                timeout=60)
        # ls-remote command must be created manually due to hyphen
        git.ls_remote = git._make_command(name='ls-remote')  #type: ignore
        with TcpK8sProxy(NAUTAAppNames.GIT_REPO_MANAGER_SSH) as proxy:
            if not os.path.isdir(f'{experiments_workdir}/{git_repo_dir}'):
                git.clone(
                    f'ssh://git@localhost:{proxy.tunnel_port}/{username}/experiments.git',
                    git_repo_dir,
                    bare=True)
            git.remote(
                'set-url', 'origin',
                f'ssh://git@localhost:{proxy.tunnel_port}/{username}/experiments.git'
            )
            _initialize_git_client_config(git, username=username)
            git.add('.')
            git.commit(message=f'experiment: {experiment_name}',
                       allow_empty=True)
            remote_branches, _, _ = git.ls_remote()
            local_branches, _, _ = git.branch()
            if 'master' in local_branches:
                git.checkout('master')
            else:
                git.checkout('-b', 'master')
            if 'master' in remote_branches:
                try:
                    git.pull('--rebase', '--strategy=recursive',
                             '--strategy-option=theirs')
                except Exception:
                    git.rebase('--abort')
                    raise
            git.push('--set-upstream', 'origin', 'master')
            git.tag(experiment_name)
            git.push('--tags')
    except Exception:
        logger.exception(
            f'Failed to upload experiment {experiment_name} to git repo manager.'
        )
        try:
            git_env = {
                'GIT_DIR': os.path.join(experiments_workdir, git_repo_dir),
                'GIT_WORK_TREE': git_work_dir,
                'GIT_TERMINAL_PROMPT': '0',
                'SSH_AUTH_SOCK': '',
            }
            env = {
                **os.environ,
                **git_env
            }  # Add git_env defined above to currently set environment variables
            git = ExternalCliClient(executable='git',
                                    env=env,
                                    cwd=experiments_workdir,
                                    timeout=60)
            git.reset('--hard', 'master', _cwd=experiments_workdir)
        except Exception:
            logger.exception(
                f'Failed to rollback {experiment_name} experiment upload to git repo manager.'
            )
        raise
Example #49
0
 def save_cookie(cls):
     if cls.session is not None:
         with open(Config.get(cls.KEY_COOKIE_LOCATION), "w") as f:
             f.write(json.dumps(requests.utils.dict_from_cookiejar(cls.session.cookies)))
Example #50
0
 def setUpClass(cls):
     cls.Script = baiduScript(cls.driver)
     cls.config = Config()
Example #51
0
	def synchronizeRules(self):
		"""Collects lists of rules this sensor should have, compares it with what currently
		lies in the local cache, and request missing/changed rules from the central server."""
		logger = logging.getLogger(__name__)
		maxRuleRequests = int(Config.get("sync", "maxRulesInRequest"))
	
		s = Session.session()
		
		logger.info("Starting to synchronize the Rules")
		logger.debug("Collecting the SID/rev pairs from the server")
		
		# Collect sid/rev pairs from the central server.
		response = self.server.getRuleRevisions(self.token)
		if(response['status'] == False):
			logger.error("Could not get rulerevisions from the server: %s" % response['message'])
			raise SnowmanServer.ConnectionError("Could not retrieve RuleRevisions from the server.")
		
		rulerevisions = response['revisions']
		
		# Collect the rules in the local cache.
		localRules = s.query(Rule).all()
		for r in localRules:
			# If the current rule is in the rulerevisions-list
			if str(r.SID) in rulerevisions and int(r.rev) == int(rulerevisions[str(r.SID)]):
				rulerevisions.pop(str(r.SID))
				logger.debug("Rule %d is already up to date" % r.SID)
			else:
				logger.debug("Rule %d is deleted, as it is going to be updated or removed." % r.SID)
				s.delete(r)
		s.commit()
		
		logger.debug("Starting to download %d rules from the server" % len(rulerevisions))
		
		# Grab Ruleclasses, rulesets and rulereferencetypes from the local cache, to 
		#   have them at hand when it is needed to reference them.
		ruleClasses = {}
		for rc in s.query(RuleClass).all():
			ruleClasses[rc.classtype] = rc
		ruleSets = {}
		for rs in s.query(RuleSet).all():
			ruleSets[rs.name] = rs
		reftype = {}
		for ref in s.query(RuleReferenceType).all():
			reftype[ref.name] = ref
	
		# Start collecting the full rules from the central server:
		rulerevisions = list(rulerevisions)
		while len(rulerevisions) > 0:
			request = rulerevisions[:maxRuleRequests]
			logger.debug("Requesting %d out of %d rules" % (len(request), len(rulerevisions)))
			
			# Request a chunk of the rules
			response = self.server.getRules(self.token, request)
			if(response['status'] == False):	
				logger.error("Could not get rulers from the server: %s" % response['message'])
				raise SnowmanServer.ConnectionError("Could not retrieve Rule from the server.")
			else:
				rules = response['rules']
			
			# Insert the recieved rules, and all its references.
			for r in rules:
				rule = Rule(sid=rules[r]['SID'], rev=rules[r]['rev'], msg=rules[r]['msg'], raw=rules[r]['raw'])	
				rule.ruleset = ruleSets[rules[r]['ruleset']]
				rule.ruleclass = ruleClasses[rules[r]['ruleclass']]
				s.add(rule)
				logger.debug("Got a new rule from the server: " + str(rule))

				for ref in rules[r]['references']:
					rref = RuleReference(reference=ref[1])
					rref.referenceType = reftype[ref[0]]
					rref.rule = rule
					s.add(rref)
				
				rulerevisions.remove(r)
			s.commit()
		
		logger.info("Finished synchronizing the rules from the server")
		s.close()
Example #52
0
def mongo_conn():
    c = Config()
    mongo = MongoClient(c.get('mongo', 'host'), int(c.get('mongo', 'port')))
    return mongo.zhihu
import sys
import subprocess
import os
import ast
from pymongo import MongoClient, GEO2D
from util import log
from util.config import Config
from report.google_polyline_encoder import decode
import datetime as dt
import analyze.export_data_to_csv as export
import report.data_viz as data_viz

logger = log.getLogger(__name__)

# MongoDB Client & DB
cfg = Config()
client = MongoClient(cfg.get("mongo", "uri"))
db = client[cfg.get("mongo", "db_strava")]
segments_collection = db[cfg.get("mongo", "coll_segment")]
leaderboard_collection = db[cfg.get("mongo", "coll_leaderboards")]
zip_collection = db[cfg.get("mongo", "coll_zip")]
wban_collection = db[cfg.get("mongo", "coll_wban")]
weather_collection = db[cfg.get("mongo", "coll_weather")]


def cls():
    print "\n" * 100


def get_user_input():
    response = raw_input("Select reporting option:\n\n \
Example #54
0
    def _on_api_response(self, response):
        if response is None:
            logging.error("API request for %s failed." % self.gplus_user_id)
            self.write(
                "Unable to fetch content for this Google+ ID; it may not be authenticated. See http://%s for more information."
                % self.request.host)
            self.set_status(401)
            return self.finish()
        if response.error:
            if response.code == 403:
                logging.error("API Request 403: %r" %
                              (json.loads(response.body)))
                self.set_status(503)
                self.write(
                    "Unable to fulfill request at this time - Google+ API rate limit exceeded."
                )
                return self.finish()
            else:
                logging.error("AsyncHTTPRequest error: %r, %r" %
                              (response.error, response))
                return self.send_error(500)
        else:
            data = json.loads(response.body)

            headers = {'Content-Type': 'application/atom+xml'}
            params = {
                'userid':
                self.gplus_page_id or self.gplus_user_id,
                'baseurl':
                'http://%s' % self.request.host,
                'requesturi':
                'http://%s%s' %
                (self.request.host, self.request.uri.split('?', 1)[0]),
            }

            if 'items' not in data or not data['items']:
                params['lastupdate'] = dateutils.to_atom_format(
                    datetime.datetime.today())
                return self._respond(headers,
                                     empty_feed_template.format(**params))

            posts = data['items']

            lastupdate = max(
                dateutils.from_iso_format(p['updated']) for p in posts)
            params['author'] = xhtml_escape(posts[0]['actor']['displayName'])
            params['lastupdate'] = dateutils.to_atom_format(lastupdate)

            headers['Last-Modified'] = dateutils.to_http_format(lastupdate)

            params['entrycontent'] = u''.join(
                entry_template.format(**get_post_params(p)) for p in posts)

            body = feed_template.format(**params)

            Cache.set(self.cache_key, {
                'headers': headers,
                'body': body
            },
                      time=Config.getint('cache', 'stream-expire'))
            return self._respond(headers, body)
@author: bochen
"""
import sys
import numpy as np
import librosa
import h5py
import matplotlib.pyplot as plt
import glob
import os

import util
import cPickle as pickle
from util.config import Config

config = Config('../config.json')

instr_1 = "vn"
instr_2 = "cl"
data_type = "test"
instr_mix = instr_1 + "-" + instr_2
path_h5 = os.path.join("../data/feat/", data_type, instr_mix)
path_mix = os.path.join("data/segments/mix/", data_type, instr_mix)
path_phase = os.path.join("data/segments/phase/", data_type, instr_mix)

filenames = glob.glob(path_h5 + "/*.h5")

#%%
i = 0

filename = filenames[i]
Example #56
0
 def __init__(self):
     self.browser = Config().get_config()
Example #57
0
def main(_):

    if not FLAGS.config_file:
        raise ValueError("Must set --config_file to set model's hyperparams")

    config = Config(FLAGS.config_file)

    data = dataloader.Dataloader(config.data_file)
    dataset = data.load_data()

    config.num_steps = data.sentence_max_len
    config.vocab_size = data.vocab_size
    config.label_size = data.label_size
    initializer = tf.random_uniform_initializer(-config.init_scale,
                                                config.init_scale)

    kfold = config.kfold
    total_data = len(dataset)
    for i in range(kfold):
        test_data = dataset[int(total_data / kfold * i):int(total_data /
                                                            kfold * (i + 1))]
        train_data = dataset[0:int(total_data / kfold * i)]
        train_data.extend(dataset[int(total_data / kfold *
                                      (i + 1)):total_data])

        tf.reset_default_graph()
        with tf.name_scope("Train"):
            train_input = ModelInput(raw_data=train_data,
                                     batch_size=config.batch_size)
            with tf.variable_scope("Model",
                                   reuse=None,
                                   initializer=initializer):
                train_model = Model(is_training=True,
                                    config=config,
                                    input_=train_input)

        with tf.name_scope("Test"):
            test_input = ModelInput(raw_data=test_data,
                                    batch_size=config.batch_size)
            with tf.variable_scope("Model",
                                   reuse=True,
                                   initializer=initializer):
                test_model = Model(is_training=False,
                                   config=config,
                                   input_=test_input)

        with tf.name_scope("Test_train"):
            test_train_input = ModelInput(raw_data=train_data,
                                          batch_size=config.batch_size)
            with tf.variable_scope("Model",
                                   reuse=True,
                                   initializer=initializer):
                test_train_model = Model(is_training=False,
                                         config=config,
                                         input_=test_train_input)

        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())

            train(sess, train_model, test_model, config)
            test(sess, test_model, config)
            test(sess, test_train_model, config)
            print("""]}""")
Example #58
0
def config(state: State, cpu: str, memory: str):

    if not cpu or not memory:
        handle_error(logger, Texts.MISSING_ARGUMENTS, Texts.MISSING_ARGUMENTS)
        sys.exit(1)

    if not validate_cpu_settings(cpu):
        handle_error(logger, Texts.CPU_WRONG_FORMAT, Texts.CPU_WRONG_FORMAT)
        sys.exit(1)

    if not validate_memory_settings(memory):
        handle_error(logger, Texts.MEMORY_WRONG_FORMAT,
                     Texts.MEMORY_WRONG_FORMAT)
        sys.exit(1)

    configuration = NAUTAConfigMap()

    if configuration.minimal_node_memory_amount and \
       convert_k8s_memory_resource(configuration.minimal_node_memory_amount) > convert_k8s_memory_resource(memory):
        error_message = Texts.MEMORY_SETTINGS_TOO_LOW.format(
            memory_value=configuration.minimal_node_memory_amount)
        handle_error(logger, error_message, error_message)
        sys.exit(1)

    if configuration.minimal_node_cpu_number and \
       convert_k8s_cpu_resource(configuration.minimal_node_cpu_number) > convert_k8s_cpu_resource(cpu):
        error_message = Texts.CPU_SETTINGS_TOO_LOW.format(
            cpu_value=configuration.minimal_node_cpu_number)
        handle_error(logger, error_message, error_message)
        sys.exit(1)

    config_file_location = os.path.join(Config().config_path,
                                        NODE_CONFIG_FILENAME)

    if not os.path.isfile(config_file_location):
        handle_error(logger, Texts.MISSING_CONFIG_FILE,
                     Texts.MISSING_CONFIG_FILE)
        sys.exit(1)

    with open(config_file_location, 'r+', encoding='utf-8') as config_file, \
            spinner(text=Texts.CONFIG_UPDATE):
        config_file_content = yaml.safe_load(config_file)
        cpu_number = str(config_file_content.get(CPU_NUMBER_FIELDNAME))
        memory_amount = str(config_file_content.get(MEMORY_AMOUNT_FIELDNAME))
        cpu_system_required_min = str(
            config_file_content.get(CPU_SYSTEM_REQUIRED_MIN_FIELDNAME))
        cpu_system_required_percent = str(
            config_file_content.get(CPU_SYSTEM_REQUIRED_PERCENT_FIELDNAME))
        memory_system_required_min = str(
            config_file_content.get(MEMORY_SYSTEM_REQUIRED_MIN_FIELDNAME))
        memory_system_required_percent = str(
            config_file_content.get(MEMORY_SYSTEM_REQUIRED_PERCENT_FIELDNAME))

        if not cpu_number or cpu_number == "None" or not memory_amount or memory_amount == "None":
            handle_error(logger, Texts.CONFIG_FILE_INCORRECT,
                         Texts.CONFIG_FILE_INCORRECT)
            sys.exit(1)

        try:
            override_values_in_packs(
                new_cpu_number=cpu,
                new_memory_amount=memory,
                current_cpu_number=cpu_number,
                current_mem_amount=memory_amount,
                cpu_system_required_min=cpu_system_required_min,
                cpu_system_required_percent=cpu_system_required_percent,
                mem_system_required_min=memory_system_required_min,
                mem_system_required_percent=memory_system_required_percent)
        except Exception:
            logger.exception(Texts.ERROR_DURING_UPDATE)
            handle_error(logger, Texts.ERROR_DURING_UPDATE,
                         Texts.ERROR_DURING_UPDATE)
            sys.exit(1)

        config_file.seek(0)
        config_file.truncate()
        config_file_content[CPU_NUMBER_FIELDNAME] = cpu
        config_file_content[MEMORY_AMOUNT_FIELDNAME] = memory
        yaml.dump(config_file_content,
                  config_file,
                  default_flow_style=False,
                  explicit_start=True)

    click.echo(Texts.SUCCESS_MESSAGE)
# import spark specific libraries
from pyspark.sql import SQLContext, Row
from pyspark import SparkContext
from pyspark import SparkConf

# import specific libraries
from snakebite.client import Client
from datetime import datetime
from datetime import timedelta
from util.config import Config
import swiftclient as swift

# ==============================================================================
# configuration
# ==============================================================================
cfg = Config()
run_mode = cfg.get("run", "analyze_trends_run_mode")
out_mode = cfg.get("run", "analyze_trends_out_mode")

if (run_mode == "hdfs"):
    source_dir = cfg.get("hdfs", "hdfs_dir_pagecounts")
    hdfs_url = cfg.get("hdfs", "hdfs_url")
    source_files = cfg.get("hdfs", "hdfs_file_pagecounts")

elif (run_mode == "swift"):
    source_dir = cfg.get("swift", "swift_container_pagecounts")
    source_files = cfg.get("swift", "swift_file_pagecounts")

    swift_authurl = cfg.get("swift", "st_auth")
    swift_user = cfg.get("swift", "st_user")
    swift_key = cfg.get("swift", "st_key")
Example #60
0
class InteractiveClient(object):
    def __init__(self, authType, cloudType):
        self.__cloudType = cloudType
        self.__managementResource = cloudType.endpoints.management
        self.__graphResource = cloudType.endpoints.active_directory_graph_resource_id
        self.__tenantIdList = []
        self.__tenantObject = None
        self.__subscriptionClient = None
        self.__subscriptionList = []
        self.__selectedSubscriptionList = []
        self.__registerProviders = None
        self.__allSubscriptions = False
        self.__authType = authType
        self.__config = Config(None, str(uuid.uuid4()), [], True, None,
                               IDENTIFIER_URL, self.__cloudType.name, [],
                               False)
        self.__credentials = None

    def __getInput(self, line):
        name = raw_input(line + ": ")
        type(name)
        if len(name.strip()) == 0:
            return self.__getInput(line)
        elif name.strip() == "exit":
            exit(0)
        else:
            return name

    def __getPasswordInput(self, line):
        name = getpass.getpass(prompt=line + ": ")
        type(name)
        if len(name.strip()) == 0:
            return self.__getPasswordInput(line)
        elif name.strip() == "exit":
            exit(0)
        else:
            return name

    def __getNumberInput(self, line):
        number = self.__getInput(line)
        try:
            return int(number)
        except:
            self.__getNumberInput(line)

    def run(self):
        self.getUserNamePasswordCredentials()
        # self.getClientSecret()
        self.selectTenants()
        self.getUpdateApp()
        self.selectSubscriptions()
        if len(self.__selectedSubscriptionList) > 0:
            self.shouldRegisterProviders()
        self.review()
        cont = self.getYesNoPrompt("Do You Want to continue? (YES/NO)")
        if not cont:
            print "Aborting.."
            exit(1)
        self.runApp()

    def getUserNamePasswordCredentials(self):
        credentials = {"type": self.__authType}
        credentials[KEY_USERNAME] = self.__getInput("Enter UserName")
        if self.__authType == CREDENTIALS_TYPE_USER_PASS:
            credentials[KEY_PASSWORD] = self.__getPasswordInput(
                "Enter Password")
        self.__credentials = credentials
        self.__config.setCredentials(credentials)

    def getUpdateApp(self):
        credentialsProvider = CredentialsProviderFactory.getCredentialsProvider(
            self.__config)
        appUtil = AppUtil(credentialsProvider)
        appId = appUtil.getAppId()

        if appId != None:
            print "\n\nYou already have a Lacework App Setup"
            self.__config.setIsUpdateApp(
                self.getYesNoPrompt(
                    "Do You Want to update an existing app? (YES/NO)"))
            if not self.__config.isUpdateApp():
                print "Please delete the existing App to create a new one. Aborting for now..."
                exit(1)

    def getClientSecret(self):
        name = raw_input(
            "Get Client Secret (Optional). Not used for existing Apps: ")
        type(name)
        if len(name.strip()) == 0:
            self.__clientSecret = str(uuid.uuid4())
        elif name.strip() == "exit":
            exit(0)
        elif len(name.strip()) < 10:
            self.getClientSecret()

    def review(self):
        printBold(
            "\n\n------------------------------------------------------------------------------------------------------------------"
        )
        printBold("\n\nOverview")
        self.printTenants([self.__tenantObject])
        printBold("\n\nSelected Subscriptions: ")
        self.printSubscriptions(self.__selectedSubscriptionList)
        printBold("\n\nUpdate Permission for existing App: ")
        print str(self.__config.isUpdateApp())
        if len(self.__selectedSubscriptionList) > 0:
            printBold("\n\nProviders Required")
            self.printProviders()
        printBold(
            "\n\n------------------------------------------------------------------------------------------------------------------"
        )

    def shouldRegisterProviders(self):
        print "\n\nWe need some providers to be registered in the selected subscriptions in order to add compliance evaluations for them: \n"
        self.printProviders()
        self.__registerProviders = self.getYesNoPrompt(
            "Do you Want to register the providers? (YES/NO)")

    def printProviders(self):
        print "\nList Of Providers Required\n"
        prettyTable = PrettyTable(["No.", "Namespace"])
        i = 1
        for provider in PROVIDER_REGISTRATION_LIST:
            prettyTable.add_row([str(i), provider])
            i += 1
        print prettyTable
        if self.__registerProviders != None:
            printBold("\n\nRegister Providers : ")
            print str(self.__registerProviders)

    def getYesNoPrompt(self, line):
        input = self.__getInput(line)
        if input.lower() == "yes":
            return True
        elif input.lower() == "no":
            return False
        else:
            self.getYesNoPrompt(line)

    def selectSubscriptions(self):
        self.fetchSubscriptions()
        self.printSubscriptions(self.__subscriptionList)
        self.__selectSubscriptions()

    def fetchSubscriptions(self):
        self.__subscriptionClient = SubscriptionClient(
            credentials=CredentialsProviderFactory.getCredentialsForResource(
                self.__config, self.__managementResource))
        for subscription in self.__subscriptionClient.subscriptions.list():
            if str(subscription.state) == 'SubscriptionState.enabled':
                self.__subscriptionList.append(subscription)

    def printSubscriptions(self, subscriptionList):
        prettyTable = PrettyTable(["No.", "Subscription Id", "Display Name"])
        i = 1
        print "\nList Of Available Subscriptions\n"
        for subscription in subscriptionList:
            prettyTable.add_row([
                str(i), subscription.subscription_id, subscription.display_name
            ])
            i += 1
        print prettyTable

    def __selectSubscriptions(self):
        if len(self.__subscriptionList) == 0:
            print "\n No Subscriptions found in tenant: " + self.__config.getTenantId(
            )
            return
        line = "\n Please provide comma separated Subscription No. (eg. 1,3,4) or 'ALL' for all subscriptions"
        subString = self.__getInput(line)
        if subString == "ALL":
            self.__allSubscriptions = True
            self.__selectedSubscriptionList = self.__subscriptionList
            self.__config.setAllSubscriptions(True)
        else:
            self.__config.setAllSubscriptions(False)
            try:
                subsIndexList = [x.strip() for x in subString.split(',')]
                for index in subsIndexList:
                    indexNum = int(index)
                    if indexNum < 1 or indexNum > len(self.__subscriptionList):
                        self.__selectSubscriptions()
                        return
                for index in subsIndexList:
                    indexNum = int(index)
                    if indexNum >= 1 and indexNum <= len(
                            self.__subscriptionList):
                        self.__selectedSubscriptionList.append(
                            self.__subscriptionList[indexNum - 1])
                        self.__config.getSubscriptionList().append(
                            self.__subscriptionList[indexNum -
                                                    1].subscription_id)
            except Exception as e:
                self.__selectSubscriptions()

    def selectTenants(self):
        self.fetchTenants()
        self.printTenants(self.__tenantIdList)
        self.__selectTenant()

    def printTenants(self, tenantIdList):
        prettyTable = PrettyTable(["No.", "Tenant Id", "DisplayName"])
        i = 1
        print "\nTenants\n"
        for tenant in tenantIdList:
            prettyTable.add_row([
                str(i), tenant["tenant"],
                tenant["details"]["value"][0]["displayName"]
            ])
            i += 1
        print prettyTable

    def __selectTenant(self):
        tenantNo = self.__getNumberInput("\nSelect Tenant No. from " + str(1) +
                                         " - " + str(len(self.__tenantIdList)))
        try:
            if tenantNo >= 1 and tenantNo <= len(self.__tenantIdList):
                tenantNo = tenantNo - 1
                self.__tenantObject = self.__tenantIdList[tenantNo]
                self.__config.setTenantId(self.__tenantObject["tenant"])
                print "\n Selected tenant: " + self.__config.getTenantId()
                return
            else:
                self.__selectTenant()
        except:
            self.__selectTenant()

    def fetchTenants(self):
        config = Config(self.__credentials, None, [], True, None, "",
                        self.__cloudType.name, [], True)
        self.__subscriptionClient = SubscriptionClient(
            CredentialsProviderFactory.getCredentialsForResource(
                config, self.__managementResource))
        for tenant in self.__subscriptionClient.tenants.list():
            config = Config(self.__credentials, None, [], True,
                            str(tenant.tenant_id), "", self.__cloudType.name,
                            [], True)

            tenantId = config.getTenantId()
            credentials = CredentialsProviderFactory.getCredentialsForResource(
                config, self.__graphResource)

            tenantUtil = TenantUtil(credentials, tenantId)

            tenantProperties = tenantUtil.getAdProperties()
            if tenantProperties:
                self.__tenantIdList.append({
                    "tenant": tenantId,
                    "details": tenantProperties
                })
            else:
                logging.warn(
                    "Could not get Tenant description for tenant Id: " +
                    tenantId)
        if len(self.__tenantIdList) == 0:
            print "No Tenants Found"
            exit(1)

    def runApp(self):
        config = self.__config
        appManager = AppManager(config, self.__registerProviders)
        appManager.run()