예제 #1
0
    def _verify_new_library_and_get_items(self, create_if_not_found=False):
        # Check if the new library exists in Plex
        try:
            new_library = self.plex.server.library.section(
                self.recipe['new_library']['name'])
            logs.warning(u"Library already exists in Plex. Scanning the library...")

            new_library.update()
        except plexapi.exceptions.NotFound:
            if create_if_not_found:
                self.plex.create_new_library(
                    self.recipe['new_library']['name'],
                    self.recipe['new_library']['folder'],
                    self.library_type)
                new_library = self.plex.server.library.section(
                    self.recipe['new_library']['name'])
            else:
                raise Exception("Library '{library}' does not exist".format(
                    library=self.recipe['new_library']['name']))

        # Wait for metadata to finish downloading before continuing
        logs.info(u"Waiting for metadata to finish downloading...")
        new_library = self.plex.server.library.section(
            self.recipe['new_library']['name'])
        while new_library.refreshing:
            time.sleep(5)
            new_library = self.plex.server.library.section(
                self.recipe['new_library']['name'])

        # Retrieve a list of items from the new library
        logs.info(u"Retrieving a list of items from the '{library}' library in "
                  u"Plex...".format(library=self.recipe['new_library']['name']))
        return new_library, new_library.all()
예제 #2
0
파일: Twitter.py 프로젝트: Stamped/Stamped
    def getFriendData(self, user_token, user_secret, offset=0, limit=30):
        logs.info('### user_token %s   user_secret: %s' % (user_token, user_secret))
        if limit > 100:
            raise StampedInputError("Limit must be <= 100")
        ids = self._getUserIds(user_token, user_secret, 'friends')
        if offset >= len(ids):
            return []

        url = '1/users/lookup.json'
        friends = []

        idset = ','.join(ids[offset:offset+limit])
        results = self.__get(url, user_token, user_secret, user_id=idset)
        for result in results:
            try:
                friends.append(
                    {
                        'user_id'   : result['id'],
                        'name'      : result['name'],
                        'screen_name' : result['screen_name'],
                        'image_url' : result['profile_image_url'],
                    }
                )
            except TypeError as e:
                logs.warning("Unable to get twitter friends! Error: %s" % e)
                logs.info("Results: %s" % results)
                raise
        return friends
예제 #3
0
    def add_movies(self, url, movie_list=None, movie_ids=None, max_age=0):
        if not movie_list:
            movie_list = []
        if not movie_ids:
            movie_ids = []
        max_date = add_years(max_age * -1)
        logs.info(u"Retrieving the trakt list: {}".format(url))
        data = {}
        if max_age != 0:
            data['extended'] = 'full'
        movie_data = self._handle_request('get', url, data=data)
        for m in movie_data:
            if 'movie' not in m:
                m['movie'] = m
            # Skip already added movies
            if m['movie']['ids']['imdb'] in movie_ids:
                continue
            if not m['movie']['year']:  # TODO: Handle this better?
                continue
            # Skip old movies
            if max_age != 0 \
                    and (max_date > datetime.datetime.strptime(
                        m['movie']['released'], '%Y-%m-%d')):
                continue
            movie_list.append({
                'id': m['movie']['ids']['imdb'],
                'tmdb_id': str(m['movie']['ids'].get('tmdb', '')),
                'title': m['movie']['title'],
                'year': m['movie']['year'],
            })
            movie_ids.append(m['movie']['ids']['imdb'])
            if m['movie']['ids'].get('tmdb'):
                movie_ids.append('tmdb' + str(m['movie']['ids']['tmdb']))

        return movie_list, movie_ids
예제 #4
0
파일: xencluster.py 프로젝트: nagius/cxm
    def check_cfg(self):
        """Perform a check on configuration files.

		Return False if a file is missing somewhere.
		"""
        log.info("Checking synchronization of configuration files...")
        safe = True

        # Get a dict with config files of each nodes
        nodes_cfg = dict()
        for node in self.get_nodes():
            nodes_cfg[node.get_hostname()] = node.get_possible_vm_names()

        log.debug("nodes_cfg=", nodes_cfg)

        # Compare file lists for each nodes
        missing = dict()
        for node in nodes_cfg.keys():
            for cfg in nodes_cfg.values():
                missing.setdefault(node, []).extend(list(Set(cfg) - Set(nodes_cfg[node])))

                # Show missing files without duplicates
        for node in missing.keys():
            if missing[node]:
                log.info(
                    " ** WARNING : Missing configuration files on %s : %s" % (node, ", ".join(list(Set(missing[node]))))
                )
                safe = False

        return safe
예제 #5
0
파일: master.py 프로젝트: sorinros/cxm
    def countVotes(self):
        if self.role != MasterService.RL_VOTING:
            log.warn("Tally triggered but it's not election time !")
            return

        if type(self.ballotBox) != dict or len(self.ballotBox) == 0:
            log.emerg(
                "No vote received ! There is a critical network failure.")
            self.panic(True)  # noCheck=True because role is not consistent
            return

        # Select election winner
        self.currentElection = None
        self.lastTallyDate = int(time.time())
        self.master = self.ballotBox[max(self.ballotBox.keys())]
        log.info("New master is %s." % (self.master))
        self._startSlave()

        if self.master == DNSCache.getInstance().name:
            log.info("I'm the new master.")
            self.role = MasterService.RL_ACTIVE
            self._startMaster()
        else:
            self.role = MasterService.RL_PASSIVE

        if self.panicRequested:
            log.warn("Engaging panic mode requested during election stage.")
            self.panicRequested = False
            self.panic()
예제 #6
0
파일: xencluster.py 프로젝트: nagius/cxm
    def check_bridges(self):
        """Perform a check on briges configurations.

		Return False if a bridge is missing somewhere.
		"""
        log.info("Checking bridges configurations...")
        safe = True

        # Get a dict with bridges of each nodes
        nodes_bridges = dict()
        for node in self.get_nodes():
            nodes_bridges[node.get_hostname()] = node.get_bridges()

        log.debug("nodes_bridges=", nodes_bridges)

        # Compare bridges lists for each nodes
        missing = dict()
        for node in nodes_bridges.keys():
            for bridges in nodes_bridges.values():
                missing.setdefault(node, []).extend(list(Set(bridges) - Set(nodes_bridges[node])))

                # Show missing bridges without duplicates
        for node in missing.keys():
            if missing[node]:
                log.info(" ** WARNING : Missing bridges on %s : %s" % (node, ", ".join(list(Set(missing[node])))))
                safe = False

        return safe
def _getImageFromS3(bucket, name):
    num_retries = 0
    max_retries = 5

    while True:
        try:
            key = Key(bucket, name)
            data = key.get_contents_as_string()
            key.close()
            return data

        except Exception as e:
            logs.warning("S3 Exception: %s" % e)
            num_retries += 1
            if num_retries > max_retries:
                msg = "Unable to connect to S3 after %d retries (%s)" % (max_retries, self.__class__.__name__)
                logs.warning(msg)
                raise Exception(msg)

            logs.info("Retrying (%s)" % (num_retries))
            time.sleep(0.5)

        finally:
            try:
                if not key.closed:
                    key.close()
            except Exception:
                logs.warning("Error closing key")
예제 #8
0
def enterWorkLoop(functions):
    from MongoStampedAPI import globalMongoStampedAPI

    api = globalMongoStampedAPI()
    logs.info("starting worker for %s" % functions.keys())
    worker = StampedWorker(getHosts())

    def wrapper(worker, job):
        try:
            k = job.task
            logs.begin(saveLog=api._logsDB.saveLog, saveStat=api._statsDB.addStat, nodeName=api.node_name)
            logs.async_request(k)
            v = functions[k]
            data = pickle.loads(job.data)
            logs.info("%s: %s: %s" % (k, v, data))
            v(k, data)
        except Exception as e:
            logs.error(str(e))
        finally:
            try:
                logs.save()
            except Exception:
                print "Unable to save logs"
                import traceback

                traceback.print_exc()
                logs.warning(traceback.format_exc())
        return ""

    for k, v in functions.items():
        worker.register_task(k, wrapper)
    worker.work(poll_timeout=1)
예제 #9
0
파일: Facebook.py 프로젝트: Stamped/Stamped
    def postToOpenGraph(self, fb_user_id, action, access_token, object_type, object_url, message=None, imageUrl=None):
        logs.info('### access_token: %s  object_type: %s  object_url: %s' % (access_token, object_type, object_url))

        http = httplib2.Http()
        response, content = http.request(object_url, 'GET')
        soup = BeautifulSoup(content)
        logs.info('### meta tags:\n%s' % soup.findAll('meta', property=True))

        args = {}
        if action == 'like':
            path = "%s/og.likes" % fb_user_id
            args['object'] = object_url
        elif action == 'follow':
            path = "me/og.follows"
            args['profile'] = object_url
        else:
            args[object_type] = object_url
            path = "me/stampedapp:%s" % action
        if message is not None:
            args['message'] = message
        if imageUrl is not None:
            args['image[0][url]'] = imageUrl
            args['image[0][user_generated]'] = "true"
        return self._post(
            access_token,
            path,
            priority='low',
            **args
        )
예제 #10
0
파일: master.py 프로젝트: nagius/cxm
	def countVotes(self):
		if self.role != MasterService.RL_VOTING:
			log.warn("Tally triggered but it's not election time !")
			return

		if type(self.ballotBox) != dict or len(self.ballotBox) == 0:
			log.emerg("No vote received ! There is a critical network failure.")
			self.panic(True) # noCheck=True because role is not consistent
			return

		# Select election winner
		self.currentElection=None
		self.lastTallyDate=int(time.time())
		self.master=self.ballotBox[max(self.ballotBox.keys())]
		log.info("New master is %s." % (self.master))
		self._startSlave()

		if self.master == DNSCache.getInstance().name:
			log.info("I'm the new master.")
			self.role=MasterService.RL_ACTIVE
			self._startMaster()
		else:
			self.role=MasterService.RL_PASSIVE
		
		if self.panicRequested:
			log.warn("Engaging panic mode requested during election stage.")
			self.panicRequested=False
			self.panic()
예제 #11
0
파일: Facebook.py 프로젝트: Stamped/Stamped
    def getFriendData(self, access_token, offset=0, limit=30):
        path = 'me/friends'
        logs.info('#### offset: %s  limit: %s' % (offset, limit))

        #May want to order by name using FQL:
        #http://developers.facebook.com/tools/explorer?fql=SELECT%20uid%2C%20name%20FROM%20user%20WHERE%20uid%20IN%20(SELECT%20uid2%20FROM%20friend%20WHERE%20uid1%20%3D%20me())%20ORDER%20BY%20last_name

        friends = []
        while True:
            print path
            result = self._get(access_token, path, limit=limit, offset=offset, fields='id,name,picture')
            access_token = None
            logs.info('### result: %s' % result)
            for d in result['data']:
                friends.append(
                    {
                        'user_id' : d['id'],
                        'name' : d['name'],
                        'image_url' : d['picture'],
                    }
                )
#            friends.extend([ d for d in result['data']] )
            if 'paging' in result and 'next' in result['paging']:
                path = result['paging']['next']
                url = urlparse.urlparse(result['paging']['next'])
                params = dict([part.split('=') for part in url[4].split('&')])
                if 'offset' in params and int(params['offset']) == len(friends):
                    continue
            break
        return friends
예제 #12
0
파일: node.py 프로젝트: nagius/cxm
	def __init__(self,hostname):
		"""Instanciate a Node object.

		This constructor open SSH and XenAPI connections to the node.
		If the node is not online, this will fail with an uncatched exception from paramiko or XenAPI.
		"""
		log.info("Connecting to", hostname, "...")
		self.hostname=hostname

		# Open SSH channel (localhost use popen2)
		if not self.is_local_node():
			self.ssh = paramiko.SSHClient()
			self.ssh.load_system_host_keys()
			#self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
			self.ssh.connect(hostname,22,'root', timeout=2)

		# Open Xen-API Session 
		if self.is_local_node():
			# Use unix socket on localhost
			self.server = XenAPI.Session("httpu:///var/run/xend/xen-api.sock")
			log.debug("[API]","Using unix socket.")
		else:
			self.server = XenAPI.Session("http://"+hostname+":9363")
			log.debug("[API]","Using tcp socket.")
		self.server.login_with_password("root", "")

		# Prepare connection with legacy API
		self.__legacy_server=None

		# Prepare metrics
		self.__metrics=None

		# Prepare cache
		self._cache=datacache.DataCache()
		self._last_refresh=0
예제 #13
0
파일: node.py 프로젝트: nagius/cxm
	def check_missing_lvs(self):
		"""
		Perform a check on logicals volumes used by VMs. 
		Return False if some are missing.
		"""
		log.info("Checking for missing LV...")
		safe=True
		
		# Get all LVs used by VMs
		used_lvs = list()
		for vm in self.get_possible_vm_names():
			used_lvs.extend(VM(vm).get_lvs())

		# Get all existent LVs
		existent_lvs = list()
		for line in self.run("lvs -o vg_name,name --noheading").readlines():
			(vg, lv)=line.strip().split()
			existent_lvs.append("/dev/"+vg+"/"+lv)
		
		# Compute missing LVs 
		missing_lvs = list(Set(used_lvs) - Set(existent_lvs))
		if len(missing_lvs):
			log.info(" ** WARNING : Found missing LV :\n\t", "\n\t".join(missing_lvs))
			safe=False

		return safe
예제 #14
0
def upload(file_name):
    """ Input a file name. Upload the file to webserver from the ./data
        directory.
    """
    file_path = os.path.join(file_operations.FILE_FOLDER, file_name)
    user_id, url = file_operations.fetch_static_data()

    #print 'User_ID: {}, URL: {}'.format(user_id, url)

    data = {
        'file_name': file_name,
        'user_id': user_id,
        'operation': 'upload_file'
    }
    files = {'file_data': open(file_path, 'rb')}
    try:
        reply = requests.post(url, data, json=None, files=files)

        if reply.text == 'upload successful':
            logs.info('Uploaded file: {}'.format(file_name))
            return True
    except requests.exceptions.ConnectionError as e:
        error = 'Error: Cannot upload file: ' + file_name + ' ' + str(e)
        logs.info(error)
        return False
예제 #15
0
def getFacebook(accessToken, path, params=None):
    if params is None:
        params = {}
    num_retries = 0
    max_retries = 5
    params['access_token'] = accessToken

    while True:
        try:
            baseurl = 'https://graph.facebook.com'
            encoded_params  = urllib.urlencode(params)
            url     = "%s%s?%s" % (baseurl, path, encoded_params)
            result  = json.load(urllib2.urlopen(url))
            
            if 'error' in result:
                if 'type' in result['error'] and result['error']['type'] == 'OAuthException':
                    # OAuth exception
                    raise
                raise
            
            return result
            
        except urllib2.HTTPError as e:
            logs.warning('Facebook API Error: %s' % e)
            num_retries += 1
            if num_retries > max_retries:
                if e.code == 400:
                    raise StampedInputError('Facebook API 400 Error')
                raise StampedUnavailableError('Facebook API Error')
                
            logs.info("Retrying (%s)" % (num_retries))
            time.sleep(0.5)

        except Exception as e:
            raise Exception('Error connecting to Facebook: %s' % e)
예제 #16
0
    def _get_trakt_lists(self):
        item_list = []  # TODO Replace with dict, scrap item_ids?
        item_ids = []

        for url in self.recipe['source_list_urls']:
            max_age = (self.recipe['new_playlist'].get('max_age', 0) if
                       self.use_playlists else self.recipe['new_library'].get(
                           'max_age', 0))
            if 'api.trakt.tv' in url:
                (item_list,
                 item_ids) = self.trakt.add_items(self.library_type, url,
                                                  item_list, item_ids, max_age
                                                  or 0)
            elif 'imdb.com/chart' in url:
                (item_list,
                 item_ids) = self.imdb.add_items(self.library_type, url,
                                                 item_list, item_ids, max_age
                                                 or 0)
            else:
                raise Exception(
                    "Unsupported source list: {url}".format(url=url))

        if self.recipe['weighted_sorting']['enabled']:
            if self.config['tmdb']['api_key']:
                logs.info(u"Getting data from TMDb to add weighted sorting...")
                item_list = self.weighted_sorting(item_list)
            else:
                logs.warning(u"Warning: TMDd API key is required "
                             u"for weighted sorting")
        return item_list, item_ids
예제 #17
0
파일: heartbeats.py 프로젝트: sorinros/cxm
	def stopService(self):
		if self.running:
			Service.stopService(self)
			log.info("Stopping master heartbeat service...")
			return self._hb.stop()
		else:
			return defer.succeed(None)
예제 #18
0
파일: svnwatcher.py 프로젝트: sorinros/cxm
        def checkLock(result):
            # If result is true, no commit running (lock successfully grabbed)
            if result:
                # Get a local copy for thread's work
                # Use .extend insted of = due to scope restriction (vars are in the parent function)
                added.extend(self.added)
                deleted.extend(self.deleted)
                updated.extend(self.updated)
                self.added = list()
                self.deleted = list()
                self.updated = list()

                log.info("Committing for " +
                         ", ".join(set(added + deleted + updated)))
                self.commitRunning = True

                d = threads.deferToThread(commit)
                d.addCallback(lambda _: self.doUpdate())
                d.addCallbacks(commitEnded, commitFailed)
                d.addCallback(releaseLock)
                return d
            else:
                log.info("Commit already running: rescheduling.")
                self.rescheduleCommit()
                return defer.succeed(None)
예제 #19
0
파일: node.py 프로젝트: sorinros/cxm
    def __init__(self, hostname):
        """Instanciate a Node object.

		This constructor open SSH and XenAPI connections to the node.
		If the node is not online, this will fail with an uncatched exception from paramiko or XenAPI.
		"""
        log.info("Connecting to", hostname, "...")
        self.hostname = hostname

        # Open SSH channel (localhost use popen2)
        if not self.is_local_node():
            self.ssh = paramiko.SSHClient()
            self.ssh.load_system_host_keys()
            #self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
            self.ssh.connect(hostname, 22, 'root', timeout=2)

        # Open Xen-API Session
        if self.is_local_node():
            # Use unix socket on localhost
            self.server = XenAPI.Session("httpu:///var/run/xend/xen-api.sock")
            log.debug("[API]", "Using unix socket.")
        else:
            self.server = XenAPI.Session("http://" + hostname + ":9363")
            log.debug("[API]", "Using tcp socket.")
        self.server.login_with_password("root", "")

        # Prepare connection with legacy API
        self.__legacy_server = None

        # Prepare metrics
        self.__metrics = None

        # Prepare cache
        self._cache = datacache.DataCache()
        self._last_refresh = 0
예제 #20
0
    def login():
        input_info = input("请输入用户名/密码:")
        info = input_info.split("/")
        if len(info) != 2:
            logs.error("您的输入格式错误,请按照:用户名/密码")
            return
        if info[0] not in page.users or info[1] != page.users[info[0]]:
            logs.error("您输入的用户名或密码错误!")
            return

        if info[0] in page.admins:
            page.model_level01[1] = "删除用户"
            page.model_level01[3] = "查询所有用户"
            page.model_level01[5] = "导出用户信息csv格式"
        else:
            if 3 in page.model_level01.keys():
                page.model_level01.pop(1)
                page.model_level01.pop(3)
                page.model_level01.pop(5)

        logs.info(info[0] + "登录成功")
        while True:
            in2 = page.page.home01(page.model_level01)
            op = page.page.print_model(in2, page.model_level01, info[0])
            if op == "exit":
                return
예제 #21
0
파일: node.py 프로젝트: sorinros/cxm
    def check_missing_lvs(self):
        """
		Perform a check on logicals volumes used by VMs. 
		Return False if some are missing.
		"""
        log.info("Checking for missing LV...")
        safe = True

        # Get all LVs used by VMs
        used_lvs = list()
        for vm in self.get_possible_vm_names():
            used_lvs.extend(VM(vm).get_lvs())

        # Get all existent LVs
        existent_lvs = list()
        for line in self.run("lvs -o vg_name,name --noheading").readlines():
            (vg, lv) = line.strip().split()
            existent_lvs.append("/dev/" + vg + "/" + lv)

        # Compute missing LVs
        missing_lvs = list(Set(used_lvs) - Set(existent_lvs))
        if len(missing_lvs):
            log.info(" ** WARNING : Found missing LV :\n\t",
                     "\n\t".join(missing_lvs))
            safe = False

        return safe
예제 #22
0
파일: rpc.py 프로젝트: nagius/cxm
	def startService(self):
		Service.startService(self)

		log.info("Starting RPC service...")
		self.cleanSocket(None)
		self._localPort=reactor.listenUNIX(core.cfg['UNIX_PORT'], pb.PBServerFactory(LocalRPC(self._master)))
		self._remotePort=reactor.listenTCP(core.cfg['TCP_PORT'], pb.PBServerFactory(RemoteRPC(self._master)))
예제 #23
0
    def addDataToS3(self, name, data, contentType):
        num_retries = 0
        max_retries = 5

        while True:
            try:
                conn = S3Connection(keys.aws.AWS_ACCESS_KEY_ID, keys.aws.AWS_SECRET_KEY)
                bucket = conn.lookup(self.bucket_name)
                key = Key(bucket, name)
                key.set_metadata('Content-Type', contentType)
                # for some reason, if we use set_contents_from_file here, an empty file is created
                key.set_contents_from_string(data.getvalue(), policy='public-read')
                #key.set_contents_from_file(data, policy='public-read')
                key.close()
                
                return "%s/%s" % (self.base_url, name)

            except Exception as e:
                logs.warning('S3 Exception: %s' % e)
                num_retries += 1
                if num_retries > max_retries:
                    msg = "Unable to connect to S3 after %d retries (%s)" % \
                        (max_retries, self.__class__.__name__)
                    logs.warning(msg)
                    raise Exception(msg)
                
                logs.info("Retrying (%s)" % (num_retries))
                time.sleep(0.5)

            finally:
                try:
                    if not key.closed:
                        key.close()
                except Exception:
                    logs.warning("Error closing key")
예제 #24
0
    def _copyInS3(self, oldKey, newKey):
        num_retries = 0
        max_retries = 5
        
        while True:
            try:
                conn = S3Connection(keys.aws.AWS_ACCESS_KEY_ID, keys.aws.AWS_SECRET_KEY)
                bucket = conn.lookup(self.bucket_name)

                if not self.bucket.get_key(oldKey):
                    return True
                
                bucket.copy_key(newKey, self.bucket_name, oldKey, preserve_acl=True)
                return True

            except Exception as e:
                logs.warning('S3 Exception: %s' % e)
                num_retries += 1
                if num_retries > max_retries:
                    msg = "Unable to connect to S3 after %d retries (%s)" % \
                        (max_retries, self.__class__.__name__)
                    logs.warning(msg)
                    raise Exception(msg)
                
                logs.info("Retrying (%s)" % (num_retries))
                time.sleep(0.5)
예제 #25
0
    def save(self, to_save, manipulate=True, safe=False, **kwargs):
        if self._debug:
            print("Mongo 'save' - manipulate: %s safe: %s kwargs: %s" % (manipulate, safe, kwargs))

        num_retries = 0
        max_retries = 5

        storeLog = kwargs.pop('log', True)
        
        while True:
            try:
                ret = self._collection.save(to_save, manipulate, safe, **kwargs)
                return ret
            except AutoReconnect as e:
                num_retries += 1
                if num_retries > max_retries:
                    msg = "Unable to connect after %d retries (%s)" % \
                        (max_retries, self._parent.__class__.__name__)
                    if storeLog:
                        logs.warning(msg)
                    raise
                if storeLog:
                    logs.info("Retrying delete (%s)" % (self._parent.__class__.__name__))
                time.sleep(0.25)
            except Exception as e:
                import traceback
                logs.warning('Failure updating document:\n%s' % ''.join(traceback.format_exc()))
                raise StampedSaveDocumentError("Unable to update document")
예제 #26
0
파일: xencluster.py 프로젝트: sorinros/cxm
	def check_cfg(self):
		"""Perform a check on configuration files.

		Return False if a file is missing somewhere.
		"""
		log.info("Checking synchronization of configuration files...")
		safe=True

		# Get a dict with config files of each nodes
		nodes_cfg=dict()
		for node in self.get_nodes():
			nodes_cfg[node.get_hostname()]=node.get_possible_vm_names()

		log.debug("nodes_cfg=",nodes_cfg)

		# Compare file lists for each nodes
		missing=dict()
		for node in nodes_cfg.keys():
			for cfg in nodes_cfg.values():
				missing.setdefault(node,[]).extend(list(Set(cfg) - Set(nodes_cfg[node])))

		# Show missing files without duplicates
		for node in missing.keys():
			if missing[node]:
				log.info(" ** WARNING : Missing configuration files on %s : %s" % (node,", ".join(list(Set(missing[node])))))
				safe=False

		return safe
예제 #27
0
파일: Request.py 프로젝트: Stamped/Stamped
def service_request(service, method, url, body={}, header={}, query_params = {}, priority='low', timeout=DEFAULT_TIMEOUT):
    if timeout is None:
        timeout = DEFAULT_TIMEOUT
    if body is None:
        body = {}
    if header is None:
        header = {}
    if query_params is None:
        query_params = {}

    if query_params != {}:
        encoded_params  = urllib.urlencode(query_params)
        if url.find('?') == -1:
            url += "?%s" % encoded_params
        else:
            url += "&%s" % encoded_params

    logs.info('### called service_request.  service: %s  url: %s   priority: %s  timeout: %s' % (service, url, priority, timeout))

    response, content = rl_state().request(service, method, url, body, header, priority, timeout)

    if response.status > 400:
        logs.warning('service request returned an error response.  status code: %s  content: %s' % (response.status, content))

    return response, content
예제 #28
0
    def ensure_index(self, key_or_list, **kwargs):
        if self._debug:
            print("Mongo 'ensure_index'")

        num_retries = 0
        max_retries = 5
        
        # NOTE (travis): this method should never throw an error locally if connected to 
        # a non-master DB node that can't ensure_index because the conn doesn't have 
        # write permissions
        
        while True:
            try:
                ret = self._collection.ensure_index(key_or_list, **kwargs)
                return ret
            except AutoReconnect as e:
                if not utils.is_ec2():
                    return
                
                num_retries += 1
                
                if num_retries > max_retries:
                    msg = "Unable to ensure_index after %d retries (%s)" % \
                        (max_retries, self._parent.__class__.__name__)
                    logs.warning(msg)
                    
                    raise
                
                logs.info("Retrying ensure_index (%s)" % (self._parent.__class__.__name__))
                time.sleep(0.25)
예제 #29
0
파일: xencluster.py 프로젝트: sorinros/cxm
	def check_bridges(self):
		"""Perform a check on briges configurations.

		Return False if a bridge is missing somewhere.
		"""
		log.info("Checking bridges configurations...")
		safe=True

		# Get a dict with bridges of each nodes
		nodes_bridges=dict()
		for node in self.get_nodes():
			nodes_bridges[node.get_hostname()]=node.get_bridges()

		log.debug("nodes_bridges=",nodes_bridges)

		# Compare bridges lists for each nodes
		missing=dict()
		for node in nodes_bridges.keys():
			for bridges in nodes_bridges.values():
				missing.setdefault(node,[]).extend(list(Set(bridges) - Set(nodes_bridges[node])))

		# Show missing bridges without duplicates
		for node in missing.keys():
			if missing[node]:
				log.info(" ** WARNING : Missing bridges on %s : %s" % (node,", ".join(list(Set(missing[node])))))
				safe=False

		return safe
예제 #30
0
파일: applerss.py 프로젝트: Stamped/Stamped
    def _parse_entity(self, entry):
        logs.info(pformat(entry))
        aid = entry["id"]["attributes"]["im:id"]

        # TODO: Why can't we parse the proxies directly from the feed results?
        proxy = self._source.entityProxyFromKey(aid)
        return EntityProxyContainer.EntityProxyContainer().addProxy(proxy).buildEntity()
예제 #31
0
 def __init__(self, query_string, coords=None, kinds=None, types=None, local=False):
     ResolverSearchAll.__init__(self)
     
     if local:
         if kinds is None: kinds = set()
         kinds.add('place')
     else:
         if kinds and 'place' not in kinds:
             # if we're filtering by category / subcategory and the filtered results 
             # couldn't possibly contain a location, then ensure that coords are 
             # disabled
             coords = None
         else:
             # process 'in' or 'near' location hint
             result = libs.worldcities.try_get_region(query_string)
             
             if result is not None:
                 new_query_string, coords, region_name = result
                 if kinds is None: kinds = set()
                 kinds.add('place')
                 
                 logs.info("[search] using region %s at %s (parsed from '%s')" % 
                           (region_name, coords, query_string))
                 query_string = new_query_string
     
     self.__query_string = query_string
     self.__coordinates  = coords
     self.__kinds        = kinds
     self.__types        = types
     self.__local        = local
예제 #32
0
def main():

    browsers_static_info = {
    'windows-7' : {'firefox' : ['AppData','Roaming','Mozilla','Firefox','Profiles'],
                    'chrome' : ['AppData','Local','Google','Chrome','User Data','Default','History']},

    'linux'     : {'firefox' : ['.mozilla','firefox'],
                    'chrome' : ['.config','google-chrome','Default','History']},

    'windows-xp': {'firefox' : ['Application Data','Mozilla','Firefox','Profiles'],
                    'chrome' : ['Local Settings','Application Data','Google','Chrome','User Data','Default','Preferences']}

    }
    file_operations.initial_setup()
    os_name = platform.platform().lower()
    if 'linux' in os_name:
        # this is linux
        browsers_path = expand_links(browsers_static_info['linux'])
    elif 'windows-7' in os_name or 'windows-8' in os_name:
        # this is windows 7 or 8
        browsers_path = expand_links(browsers_static_info['windows-7'])
    elif 'windows-xp' in os_name:
        # this is windows xp
        browsers_path = expand_links(browsers_static_info['windows-xp'])

    browsers_info = format_browsers_info(browsers_path)
    file_operations.write_browsers_info(browsers_info)
    static_data()
    logs.info('All initial configuration done.')
예제 #33
0
    def genSyncMetaFile(self):
        f = self.getMeta()
        if path.isfile(f):
            return

        dump = self.getDumpedMeta()
        if not path.isfile(dump):
            raise NameError("can't get dump metadata file: " + dump)

        with open(dump) as f:
            lines = f.readlines()
        for line in lines:
            if 'Log: ' in line:
                name = line.strip()[5:]
            elif 'Pos: ' in line:
                pos = line.strip()[5:]
                break
            else:
                continue
        if name == None or pos == None:
            raise NameError("can't get dump binlog name and position")

        f = open(self.getMeta(), 'w+')
        logs.info("binlog-name:%s binlog-pos:%s", name, pos)
        f.write('binlog-name = "' + name + '"\n')
        f.write('binlog-pos = ' + pos + '\n')
예제 #34
0
 def find(self, spec=None, output=None, limit=None, **kwargs):
     if self._debug:
         print("Mongo 'find' - spec: %s output: %s limit: %s kwargs: %s" % (spec, output, limit, kwargs))
     num_retries = 0
     max_retries = 5
     
     while True:
         try:
             ret = self._collection.find(spec, **kwargs)
             
             if limit is not None:
                 ret = ret.limit(limit)
             
             if output is not None:
                 if output == list:
                     ret = list(ret)
             
             return ret
         except AutoReconnect as e:
             num_retries += 1
             if num_retries > max_retries:
                 msg = "Unable to connect after %d retries (%s)" % \
                     (max_retries, self._parent.__class__.__name__)
                 logs.warning(msg)
                 raise
             
             logs.info("Retrying find (%s)" % (self._parent.__class__.__name__))
             time.sleep(0.25)
def _addImageToS3(bucket, name, data):
    num_retries = 0
    max_retries = 5

    while True:
        try:
            key = Key(bucket, name)
            key.set_metadata("Content-Type", "image/jpeg")
            key.set_contents_from_string(data.getvalue(), policy="public-read")
            key.close()
            return key

        except Exception as e:
            logs.warning("S3 Exception: %s" % e)
            num_retries += 1
            if num_retries > max_retries:
                msg = "Unable to connect to S3 after %d retries (%s)" % (max_retries, self.__class__.__name__)
                logs.warning(msg)
                raise Exception(msg)

            logs.info("Retrying (%s)" % (num_retries))
            time.sleep(0.5)

        finally:
            try:
                if not key.closed:
                    key.close()
            except Exception:
                logs.warning("Error closing key")
예제 #36
0
파일: alerts.py 프로젝트: Stamped/Stamped
    def sendEmails(self, noop=False):
        logs.info("Submitting emails to %s users" % len(self._emailQueue))

        # Apply rate limit
        limit = 8

        ses = boto.connect_ses(keys.aws.AWS_ACCESS_KEY_ID, keys.aws.AWS_SECRET_KEY)

        for emailAddress, emailQueue in self._emailQueue.iteritems():
            if IS_PROD or emailAddress in self._adminEmails:
                count = 0
                emailQueue.reverse()
                for email in emailQueue:
                    count += 1
                    if count > limit:
                        logs.debug("Limit exceeded for email '%s'" % emailAddress)
                        break

                    try:
                        logs.debug("Send email: %s" % (email))
                        if not noop:
                            ses.send_email(email.sender, email.title, email.body, emailAddress, format='html')

                    except Exception as e:
                        logs.warning("Email failed: %s" % email)
                        logs.warning(utils.getFormattedException())

        logs.info("Success!")
예제 #37
0
파일: views.py 프로젝트: Stamped/Stamped
def show(request, **kwargs):
    screenName  = kwargs.pop('screen_name', None)
    stampNum    = kwargs.pop('stamp_num', None)
    stampTitle  = kwargs.pop('stamp_title', None)
    mobile      = kwargs.pop('mobile', False)
    
    try:
        logs.info('%s/%s/%s' % (screenName, stampNum, stampTitle))
        stamp = stampedAPI.getStampFromUser(screenName, stampNum)

        template = 'sdetail.html'
        if mobile:
            logs.info('mobile=True')
            template = 'sdetail-mobile.html'

        encodedStampTitle = encodeStampTitle(stamp.entity.title)
        
        if encodedStampTitle != stampTitle:
            i = encodedStampTitle.find('.')
            if i != -1:
                encodedStampTitle = encodedStampTitle[:i]
            
            if encodedStampTitle != stampTitle:
                raise Exception("Invalid stamp title: '%s' (received) vs '%s' (stored)" % 
                                (stampTitle, encodedStampTitle))

        entity = stampedAPI.getEntity({'entity_id': stamp.entity_id})
        opentable_url = None
        if entity.rid is not None:
            opentable_url = "http://www.opentable.com/single.aspx?rid=%s&ref=9166" % entity.rid
        
        entity = HTTPEntity_stampedtest().importSchema(entity)
        if entity.opentable_url and opentable_url is not None:
            entity.opentable_url = opentable_url
        
        params = HTTPStamp().dataImport(stamp).dataExport()
        params['entity'] = entity.dataExport()
        
        if entity.genre == 'film' and entity.length:
            params['entity']['duration'] = formatDuration(entity.length)
        
        params['image_url_92'] = params['user']['image_url'].replace('.jpg', '-92x92.jpg')
        
        response = render_to_response(template, params)
        response['Expires'] = (datetime.utcnow() + timedelta(minutes=10)).ctime()
        response['Cache-Control'] = 'max-age=600'
        
        return response
    
    except Exception as e:
        logs.warning("Error: %s" % e)
        try:
            exc_type, exc_value, exc_traceback = sys.exc_info()
            f = traceback.format_exception(exc_type, exc_value, exc_traceback)
            f = string.joinfields(f, '')
            logs.warning(f)
        except:
            pass
        raise Http404
예제 #38
0
파일: Netflix.py 프로젝트: Stamped/Stamped
    def __http(self, verb, service, user_id=None, token=None, priority='low', timeout=None, **parameters):
        """
        Makes a request to the Netflix API
        """
        self.__checkBlacklistExpiration()

        #if a user is specified, and she is in the blacklist, return None
        if user_id is not None and self.__isUserBlacklisted(user_id):
            return None
        if service.startswith('http'):
            url = service
        else:
            if user_id is None:
                url = "http://%s/%s" % (HOST, service)
            else:
                url = "http://%s/users/%s/%s" % (HOST, user_id, service)
        parameters['output'] = 'json'

        oauthRequest = oauth.OAuthRequest.from_consumer_and_token(self.__consumer,
            http_url=url,
            parameters=parameters,
            token=token,
            http_method=verb)

        oauthRequest.sign_request(  self.__signature_method_hmac_sha1, self.__consumer, token)

        headers = {'Content-Type' :'application/x-www-form-urlencoded'} if verb =='POST' else {}
        params = oauthRequest.parameters
        logs.info(url)

        if verb == 'POST':
            response, content = service_request('netflix', verb, url, body=params, header=headers, priority=priority, timeout=timeout)
        else:
            response, content = service_request('netflix', verb, url, query_params=params, header=headers, priority=priority, timeout=timeout)

        # if the response is a 401 or 403, blacklist the user until the day expires
        if user_id is not None and response.status in (401, 403):
            if self.__addToBlacklistCount(user_id):
                logs.warning('Too many 401/403 responses.  User added to blacklist')

        if response.status < 300:
            return json.loads(content)
        else:
            logs.info('Failed with status code %s' % response['status'])
            try:
                failData = json.loads(content)['status']
                status = failData['status_code']
                subcode = failData.get('sub_code', None)
                message = failData['message']
            except:
                raise StampedThirdPartyError("Error parsing Netflix error response")

            # For the full list of possible status codes, see: http://developer.netflix.com/docs/HTTP_Status_Codes
            if status == 401:
                raise StampedThirdPartyInvalidCredentialsError(message)
            elif status == 412 and subcode == 710:
                return True
            else:
                raise StampedThirdPartyError(message)
예제 #39
0
파일: master.py 프로젝트: nagius/cxm
	def startService(self):
		Service.startService(self)
		
		# Print welcome message
		log.info("Starting cxmd version", meta.version)

		self._messagePort=reactor.listenUDP(core.cfg['UDP_PORT'], UDPListener(self.dispatchMessage))
		reactor.callLater(2, self.joinCluster)
예제 #40
0
 def _addPNG(self, name, image):
     name    = "%s.png" % name
     out     = StringIO()
     
     image.save(out, 'png')
     
     logs.info('[%s] adding image %s (%dx%d)' % (self, name, image.size[0], image.size[1]))
     return self.addDataToS3(name, out, 'image/png')
예제 #41
0
파일: master.py 프로젝트: nagius/cxm
	def joinCluster(self):

		def startHeartbeats():
			self._startSlave()
			self.s_rpc.startService()

			if self.role == MasterService.RL_ACTIVE:
				self._startMaster() 

		def joinRefused(reason):
			reason.trap(NodeRefusedError, RPCRefusedError)
			log.err("Join to cluster %s failed: Master %s has refused me: %s" % 
				(core.cfg['CLUSTER_NAME'], self.master, reason.getErrorMessage()))
			self.stopService()

		def joinAccepted(result):
			self.role=MasterService.RL_PASSIVE
			log.info("Join successfull, I'm now part of cluster %s." % (core.cfg['CLUSTER_NAME']))
			startHeartbeats()
			
		def masterConnected(obj):
			d = obj.callRemote("register",DNSCache.getInstance().name)
			d.addCallbacks(joinAccepted,joinRefused)
			d.addErrback(log.err)
			d.addBoth(lambda _: rpcConnector.disconnect())
			return d

		try:
			if self.master is None:
				# New active master
				if DNSCache.getInstance().name not in core.cfg['ALLOWED_NODES']:
					log.warn("I'm not allowed to create a new cluster. Exiting.")
					raise Exception("Cluster creation not allowed")

				if DiskHeartbeat.is_in_use():
					log.err("Heartbeat disk is in use but we are alone !")
					raise Exception("Heartbeat disk already in use")

				log.info("No master found. I'm now the new master of %s." % (core.cfg['CLUSTER_NAME']))
				self.role=MasterService.RL_ACTIVE
				self.master=DNSCache.getInstance().name
				self.status[self.master]={'timestamp': 0, 'offset': 0, 'vms': []}
				self.disk.make_slot(DNSCache.getInstance().name)
				startHeartbeats()

			else:
				# Passive master
				self.role=MasterService.RL_JOINING
				log.info("Trying to join cluster %s..." % (core.cfg['CLUSTER_NAME']))

				factory = pb.PBClientFactory()
				rpcConnector = reactor.connectTCP(self.master, core.cfg['TCP_PORT'], factory)
				d = factory.getRootObject()
				d.addCallback(masterConnected)
				d.addErrback(log.err)
		except Exception, e:
			log.err("Startup failed: %s. Shutting down." % (e))
			self.stopService()
예제 #42
0
파일: master.py 프로젝트: sorinros/cxm
    def registerNode(self, name):
        def validHostname(result):
            try:
                self.disk.make_slot(name)
            except DiskHeartbeatError, e:
                raise NodeRefusedError("Disk heartbeat failure: %s" % (e))

            self.status[name] = {'timestamp': 0, 'offset': 0, 'vms': []}
            log.info("Node %s has joined the cluster." % (name))
예제 #43
0
파일: master.py 프로젝트: sorinros/cxm
        def recoverSucceeded(result, name):
            # result is the return code from XenCluster.recover()
            # If True: success, if False: maybe a partition

            if (result):
                log.info("Successfully recovered node %s." % (name))
                self._unregister(name)
            else:
                log.err("Partial failure, cannot recover", name)
예제 #44
0
파일: master.py 프로젝트: sorinros/cxm
    def startService(self):
        Service.startService(self)

        # Print welcome message
        log.info("Starting cxmd version", meta.version)

        self._messagePort = reactor.listenUDP(
            core.cfg['UDP_PORT'], UDPListener(self.dispatchMessage))
        reactor.callLater(2, self.joinCluster)
예제 #45
0
파일: server.py 프로젝트: Stamped/Stamped
 def exposed_request(self, service, priority, timeout, verb, url, body = None, headers = None):
     logs.info('Received request.  service: %s  priority: %s  timeout: %s  verb: %s  url: %s  ' %
               (service, priority, timeout, verb, url))
     if body is not None:
         body = pickle.loads(body)
     if headers is not None:
         headers = pickle.loads(headers)
     response, content = self.__rl_service.handleRequest(service, priority, timeout, verb, url, body, headers)
     return pickle.dumps(response), content
예제 #46
0
파일: heartbeats.py 프로젝트: sorinros/cxm
	def stopService(self):
		if self.running:
			Service.stopService(self)
			log.info("Stopping slave heartbeats...")
			if self._call.running:
				self._call.stop()
			return self._hb.stop()
		else:
			return defer.succeed(None)
예제 #47
0
async def get(request):
    filename = request.match_info['filename']
    if not re.match(r'^[A-Za-z0-9]{64}\.(pdf|png|pdf)$', filename):
        logs.info('{} not found'.format(filename))
        raise aiohttp.web.HTTPBadRequest
    path = './temp/' + filename.replace('.', '/a.')
    if not os.path.isfile(path):
        raise aiohttp.web.HTTPNotFound
    return aiohttp.web.FileResponse(path)
예제 #48
0
파일: rpc.py 프로젝트: sorinros/cxm
    def startService(self):
        Service.startService(self)

        log.info("Starting RPC service...")
        self.cleanSocket(None)
        self._localPort = reactor.listenUNIX(
            core.cfg['UNIX_PORT'], pb.PBServerFactory(LocalRPC(self._master)))
        self._remotePort = reactor.listenTCP(
            core.cfg['TCP_PORT'], pb.PBServerFactory(RemoteRPC(self._master)))
예제 #49
0
    def add_shows(self, url, show_list=None, show_ids=None, max_age=0):
        if not show_list:
            show_list = []
        if not show_ids:
            show_ids = []
        curyear = datetime.datetime.now().year
        logs.info(u"Retrieving the IMDb list: {}".format(url))
        data = {}
        if max_age != 0:
            data['extended'] = 'full'
        (imdb_ids, imdb_titles, imdb_years) = self._handle_request(url)
        for i, imdb_id in enumerate(imdb_ids):
            # Skip already added shows
            if imdb_id in show_ids:
                continue

            if self.tvdb:
                tvdb_data = self.tvdb.get_tvdb_from_imdb(imdb_id)

            if self.tmdb:
                tmdb_data = self.tmdb.get_tmdb_from_imdb(imdb_id, 'tv')

            if tvdb_data and tvdb_data['firstAired'] != "":
                year = datetime.datetime.strptime(tvdb_data['firstAired'],
                                                  '%Y-%m-%d').year
            elif tmdb_data and tmdb_data['first_air_date'] != "":
                year = datetime.datetime.strptime(tmdb_data['first_air_date'],
                                                  '%Y-%m-%d').year
            elif imdb_years[i]:
                year = str(imdb_years[i]).strip("()")
            else:
                year = datetime.date.today().year

            # Skip old shows
            if max_age != 0 \
                    and (curyear - (max_age - 1)) > year:
                continue

            if tvdb_data:
                title = tvdb_data['seriesName']
            else:
                title = tmdb_data['name'] if tmdb_data else imdb_titles[i]

            show_list.append({
                'id': imdb_id,
                'tvdb_id': tvdb_data['id'] if tvdb_data else None,
                'tmdb_id': tmdb_data['id'] if tmdb_data else None,
                'title': title,
                'year': year,
            })
            show_ids.append(imdb_id)
            if tmdb_data and tmdb_data['id']:
                show_ids.append('tmdb' + str(tmdb_data['id']))
            if tvdb_data and tvdb_data['id']:
                show_ids.append('tvdb' + str(tvdb_data['id']))

        return show_list, show_ids
예제 #50
0
 def _modify_sort_titles_and_cleanup(self, item_list, imdb_map, new_library, sort_only=False):
     if self.recipe['new_library']['sort']:
         logs.info(u"Setting the sort titles for the '{}' library...".format(
             self.recipe['new_library']['name']))
     if self.recipe['new_library']['sort_title']['absolute']:
         for i, m in enumerate(item_list):
             item = imdb_map.pop(m['id'], None)
             if not item:
                 item = imdb_map.pop('tmdb' + str(m.get('tmdb_id', '')),
                                     None)
             if not item:
                 item = imdb_map.pop('tvdb' + str(m.get('tvdb_id', '')),
                                     None)
             if item and self.recipe['new_library']['sort']:
                 self.plex.set_sort_title(
                     new_library.key, item.ratingKey, i + 1, m['title'],
                     self.library_type,
                     self.recipe['new_library']['sort_title']['format'],
                     self.recipe['new_library']['sort_title']['visible']
                 )
     else:
         i = 0
         for m in item_list:
             item = imdb_map.pop(m['id'], None)
             if not item:
                 item = imdb_map.pop('tmdb' + str(m.get('tmdb_id', '')),
                                     None)
             if not item:
                 item = imdb_map.pop('tvdb' + str(m.get('tvdb_id', '')),
                                     None)
             if item and self.recipe['new_library']['sort']:
                 i += 1
                 self.plex.set_sort_title(
                     new_library.key, item.ratingKey, i, m['title'],
                     self.library_type,
                     self.recipe['new_library']['sort_title']['format'],
                     self.recipe['new_library']['sort_title']['visible']
                 )
     if not sort_only and (
             self.recipe['new_library']['remove_from_library'] or
             self.recipe['new_library'].get('remove_old', False)):
         # Remove old items that no longer qualify
         self._remove_old_items_from_library(imdb_map=imdb_map)
     elif sort_only:
         return True
     all_new_items = self._cleanup_new_library(new_library=new_library)
     while imdb_map:
         imdb_id, item = imdb_map.popitem()
         i += 1
         logs.info(u"{} {} ({})".format(i, item.title, item.year))
         self.plex.set_sort_title(
             new_library.key, item.ratingKey, i, item.title,
             self.library_type,
             self.recipe['new_library']['sort_title']['format'],
             self.recipe['new_library']['sort_title']['visible'])
     return all_new_items
예제 #51
0
def static_data():
    """ Fetches user ID from the web server. And stores this id and url
        in a file.
    """
    user_id = upload_file.get_usr_id(URL)
    usr_id = int(user_id)

    logs.info('User ID assigned: {}'.format(usr_id))
    # specially written function just to handle this write operation
    file_operations.write_static_data(usr_id, URL)
예제 #52
0
def initial_setup():
    """ This makes folder required to keep all data files    
    """
    try:
        os.mkdir('data')
    except OSError as e:
        # folder data already exists this means maybe files init already
        # exists so we create the log file using below function
        logs.init()
        logs.info('Error:' + str(e))
예제 #53
0
def write_static_data(usr_id, url):
    """ Input User_ID and URL
        Write the User ID and url to static file
    """
    static_data = [usr_id, url]
    data_file_path = os.path.join(FILE_FOLDER, STATIC_FILE_NAME)
    file_name = open(data_file_path, 'w')
    pickle.dump(static_data, file_name)
    file_name.close()
    logs.info('Static File created at {}'.format(data_file_path))
예제 #54
0
파일: xencluster.py 프로젝트: sorinros/cxm
	def check(self):
		"""Perform a sanity check of the cluster.

		Return a corresponding exit code (0=success, 0!=error)
		"""
		log.info("Checking for duplicate VM...")
		safe=True

		# Get cluster wide VM list
		vm_by_node=dict()
		for node in self.get_nodes():
			vm_by_node[node.get_hostname()]=node.get_vms()
	
		log.debug("vm_by_node=",vm_by_node)
	
		# Invert key/value of the dict
		node_by_vm=dict()
		for node, vms in vm_by_node.items():
			for vm in vms:
				try:
					node_by_vm[vm.name].append(node)
				except KeyError:
					node_by_vm[vm.name]=[node]

		log.debug("node_by_vm=",node_by_vm)

		# Check duplicate VM
		for vm, nodes in node_by_vm.items():
			if len(nodes)>1:
				log.info(" ** WARNING :", vm, "is running on", " and ".join(nodes))
				safe=False

		# Check bridges
		if not self.check_bridges():
			safe=False

		# Check synchronization of configuration files
		if not self.check_cfg():
			safe=False

		# Check existence of used logicals volumes
		if not self.get_local_node().check_missing_lvs():
			safe=False

		# Other checks
		for node in self.get_nodes():
			# Check (non)activation of LVs
			if not node.check_activated_lvs():
				safe=False

			# Check autostart link
			if not node.check_autostart():
				safe=False
				
		return safe
예제 #55
0
파일: master.py 프로젝트: sorinros/cxm
    def triggerElection(self):
        log.info("Asking a new election for cluster %s." %
                 (core.cfg['CLUSTER_NAME']))

        d = Deferred()
        port = reactor.listenUDP(
            0, UDPSender(d, lambda: MessageVoteRequest().forge()))
        d.addCallback(lambda result: result.sendMessage())
        d.addCallback(lambda _: port.stopListening())

        return d
예제 #56
0
파일: rpc.py 프로젝트: sorinros/cxm
    def stopService(self):
        if self.running:
            Service.stopService(self)
            log.info("Stopping RPC service...")

            d1 = defer.maybeDeferred(self._remotePort.stopListening)
            d2 = defer.maybeDeferred(self._localPort.stopListening)
            d2.addBoth(self.cleanSocket)
            return defer.DeferredList([d1, d2])
        else:
            return defer.succeed(None)
예제 #57
0
파일: rpc.py 프로젝트: sorinros/cxm
    def remote_releaseLock(self, name):
        try:
            self._locks[name].cancel()
        except:
            pass

        try:
            del self._locks[name]
            log.info("Lock %s released." % (name))
        except:
            pass
예제 #58
0
def update_timestamp(browser, new_timestamp):
    """ Updates the timestamp of browser
        takes in browser name and new timestamp
        makes necessary changes to the file
    """
    # read old data from file
    browsers_info = fetch_browsers_info()
    browsers_info[browser][1] = new_timestamp
    write_browsers_info(browsers_info)
    logs.info('Timestamp: {} updated for Browser: {}'.format(
        new_timestamp, browser))