def __init__(self): self.__name__ = self.__class__.__name__ self._account = None self.permitted_methods = [] if hasattr(self, 'GET'): self.permitted_methods.append('GET') if hasattr(self, 'POST'): self.permitted_methods.append('POST') if hasattr(self, 'PUT'): self.permitted_methods.append('PUT') if not hasattr(self, 'POST'): self.permitted_methods.append('POST') self.POST = self.PUT if hasattr(self, 'DELETE'): self.permitted_methods.append('DELETE') self.logger = FakeLogger(get_logger(self.__name__), self) self.error_logger = FakeLogger(get_logger('Errors'), self, compname=self.__name__) self.executing = True
def __init__(self, pagerduty_email='*****@*****.**'): super(Monitor, self).__init__() self.name = self.__class__.__name__ self.statuses = shelve.open('/data/monitor-%s.shelf' % self.name) self.logger = flaptor_logging.get_logger(self.name) self.failure_threshold = 1 self.fatal_failure_threshold = 0 self.severity = 'WARNING' self.title_template = '%s::%s: [%s] %s' self.pagerduty_email = pagerduty_email
from lib.monitor import Monitor from nebu.models import Index, IndexPopulation from lib.indextank.client import ApiClient from django.utils import simplejson as json from lib import flaptor_logging batch_size = 1000 dataset_files_path = './' logger = flaptor_logging.get_logger('Populator') class IndexPopulator(Monitor): def __init__(self): super(IndexPopulator, self).__init__() self.failure_threshold = 5 self.fatal_failure_threshold = 20 self.period = 30 def iterable(self): return IndexPopulation.objects.exclude(status=IndexPopulation.Statuses.finished) def monitor(self, population): client = ApiClient(population.index.account.get_private_apiurl()) index = client.get_index(population.index.name) if index.has_started(): if population.status == IndexPopulation.Statuses.created: logger.info('Populating index ' + population.index.code + ' with dataset "' + population.dataset.name + '"')
import signal import systemutils import flaptor.indextank.rpc.Controller as TController from flaptor.indextank.rpc.ttypes import WorkerMountStats, WorkerLoadStats, IndexStats from thrift.transport import TSocket from thrift.transport import TTransport from thrift.protocol import TBinaryProtocol from thrift.server import TServer import sys import commands from lib import flaptor_logging import simplejson as json logger = flaptor_logging.get_logger('Controller') def _get_working_directory(index_code, base_port): return "/data/indexes/%s-%d" % (index_code, base_port) # XXX /data? class Controller: """ Controls a Host""" def __init__(self): pass def __create_indexengine_environment(self, working_directory): shutil.copytree('indextank_lib', working_directory + "/lib") shutil.copy('startIndex.py', working_directory) shutil.copy('log4j.properties', working_directory)
DeployManager, Controller, FrontendManager from flaptor.indextank.rpc.ttypes import NebuException, IndextankException ''' =========================== THRIFT STUFF =========================== ''' from thrift.transport import TSocket, TTransport from thrift.protocol import TBinaryProtocol from lib import flaptor_logging, exceptions from thrift.transport.TTransport import TTransportException from socket import socket from socket import error as SocketError logger = flaptor_logging.get_logger('RPC') # Missing a way to close transport def getThriftControllerClient(host, timeout_ms=None): protocol, transport = __getThriftProtocolTransport(host,19010, timeout_ms) client = Controller.Client(protocol) transport.open() return client # Missing a way to close transport def getThriftIndexerClient(host, base_port, timeout_ms=None): protocol, transport = __getThriftProtocolTransport(host, base_port + 1, timeout_ms) client = Indexer.Client(protocol) transport.open() return client
from thrift.transport import TSocket from thrift.transport import TTransport from thrift.protocol import TBinaryProtocol from thrift.server import TServer from lib import flaptor_logging, mail import simplejson as json import rpc import sys import random from traceback import format_tb from django.db import transaction from datetime import datetime, timedelta logger = flaptor_logging.get_logger('DeployMgr') INITIAL_XMX = 100 INITIAL_XMX_THRESHOLD = 1000 MAXIMUM_PARALLEL_MOVES = 10 timeout_ms = 1000 def logerrors(func): def decorated(*args, **kwargs): try: return func(*args, **kwargs) except Exception, e: logger.exception("Failed while executing %s", func.__name__) raise e return decorated
import traceback from lib import flaptor_logging from django.http import HttpResponse logger = flaptor_logging.get_logger('error_logging') class ViewErrorLoggingMiddleware: def process_view(self, request, view_func, view_args, view_kwargs): self.view_name = view_func.__name__ def process_exception(self, request, exception): logger.error('UNEXPECTED EXCEPTION in view "%s". Exception is: %s', self.view_name, repr(traceback.print_exc())) return HttpResponse( '{"status":"ERROR", "message":"Unexpected error."}')
from flaptor.indextank.rpc import Indexer, Searcher, Suggestor, Storage, LogWriter, WorkerManager,\ DeployManager, Controller, FrontendManager from flaptor.indextank.rpc.ttypes import NebuException, IndextankException ''' =========================== THRIFT STUFF =========================== ''' from thrift.transport import TSocket, TTransport from thrift.protocol import TBinaryProtocol from lib import flaptor_logging, exceptions from thrift.transport.TTransport import TTransportException from socket import socket from socket import error as SocketError logger = flaptor_logging.get_logger('RPC') # Missing a way to close transport def getThriftControllerClient(host, timeout_ms=None): protocol, transport = __getThriftProtocolTransport(host, 19010, timeout_ms) client = Controller.Client(protocol) transport.open() return client # Missing a way to close transport def getThriftIndexerClient(host, base_port, timeout_ms=None): protocol, transport = __getThriftProtocolTransport(host, base_port + 1, timeout_ms) client = Indexer.Client(protocol) transport.open()
import subprocess import flaptor.indextank.rpc.Controller as TController from flaptor.indextank.rpc.ttypes import WorkerMountStats, WorkerLoadStats, IndexStats from thrift.transport import TSocket from thrift.transport import TTransport from thrift.protocol import TBinaryProtocol from thrift.server import TServer import sys import commands from lib import flaptor_logging import simplejson as json logger = flaptor_logging.get_logger('Controller') def _get_working_directory(index_code,base_port): return "/data/indexes/%s-%d"% ( index_code, base_port) # XXX /data? class Controller: """ Controls a Host""" def __init__(self): pass def __create_indexengine_environment(self,working_directory): shutil.copytree('indextank_lib', working_directory + "/lib") shutil.copy('startIndex.py', working_directory) shutil.copy('log4j.properties', working_directory)
import traceback from lib import flaptor_logging from django.http import HttpResponse logger = flaptor_logging.get_logger('error_logging') class ViewErrorLoggingMiddleware: def process_view(self, request, view_func, view_args, view_kwargs): self.view_name = view_func.__name__ def process_exception(self, request, exception): logger.error('UNEXPECTED EXCEPTION in view "%s". Exception is: %s', self.view_name, repr(traceback.print_exc())) return HttpResponse('{"status":"ERROR", "message":"Unexpected error."}')
from flaptor.indextank.rpc import WorkerManager as TWorkerManager from nebu.models import Worker from thrift.transport import TSocket from thrift.transport import TTransport from thrift.protocol import TBinaryProtocol from thrift.server import TServer import boto, time import socket from lib import flaptor_logging, mail import rpc IMAGE_ID = 'ami-c6fa07af' logger = flaptor_logging.get_logger('WorkerMgr') def logerrors(func): def decorated(*args, **kwargs): try: return func(*args, **kwargs) except Exception, e: logger.exception("Failed while executing %s", func.__name__) raise e return decorated class WorkerManager: @logerrors
from lib.indextank.client import ApiClient, IndexAlreadyExists from lib.authorizenet import AuthorizeNet, BillingException from django.db import models from django.contrib.auth.models import User from django.utils import simplejson as json from django.db import IntegrityError from django.db.models.aggregates import Sum, Count from lib import encoder, flaptor_logging from django.conf import settings from datetime import datetime logger = flaptor_logging.get_logger('Models') # idea taken from https://www.grc.com/passwords.htm def generate_apikey(id): key = "2A1A8AE7CAEFAC47D6F74920CE4B0CE46430CDA6CF03D254C1C29402D727E570" while True: hash = hashlib.md5() hash.update('%d' % id) hash.update(key) hash.update('%d' % random.randint(0,1000000)) random_part = binascii.b2a_base64(hash.digest())[:14] if not '/' in random_part: break unique_part = encoder.to_key(id)
from lib.indextank.client import ApiClient, IndexAlreadyExists from lib.authorizenet import AuthorizeNet, BillingException from django.db import models from django.contrib.auth.models import User from django.utils import simplejson as json from django.db import IntegrityError from django.db.models.aggregates import Sum, Count from lib import encoder, flaptor_logging from django.conf import settings from datetime import datetime logger = flaptor_logging.get_logger('Models') # idea taken from https://www.grc.com/passwords.htm def generate_apikey(id): key = settings.APIKEY_KEY while True: hash = hashlib.md5() hash.update('%d' % id) hash.update(key) hash.update('%d' % random.randint(0, sys.maxint)) random_part = binascii.b2a_base64(hash.digest())[:14] if not '/' in random_part: break unique_part = encoder.to_key(id)
import systemutils import flaptor.indextank.rpc.Controller as TController from flaptor.indextank.rpc.ttypes import WorkerMountStats, WorkerLoadStats, IndexStats from thrift.transport import TSocket from thrift.transport import TTransport from thrift.protocol import TBinaryProtocol from thrift.server import TServer import sys import commands from lib import flaptor_logging import simplejson as json logger = flaptor_logging.get_logger("Controller") def _get_working_directory(index_code, base_port): return "/data/indexes/%s-%d" % (index_code, base_port) # XXX /data? class Controller: """ Controls a Host""" def __init__(self): pass def __create_indexengine_environment(self, working_directory): shutil.copytree("indextank_lib", working_directory + "/lib") shutil.copy("startIndex.py", working_directory)
from flaptor.indextank.rpc import WorkerManager as TWorkerManager from nebu.models import Worker from thrift.transport import TSocket from thrift.transport import TTransport from thrift.protocol import TBinaryProtocol from thrift.server import TServer import boto, time import socket from lib import flaptor_logging, mail import rpc IMAGE_ID = 'ami-c6fa07af' logger = flaptor_logging.get_logger('WorkerMgr') def logerrors(func): def decorated(*args, **kwargs): try: return func(*args, **kwargs) except Exception, e: logger.exception("Failed while executing %s", func.__name__) raise e return decorated class WorkerManager: @logerrors def ec2_connection(self):
from lib.indextank.client import ApiClient, IndexAlreadyExists from lib.authorizenet import AuthorizeNet, BillingException from django.db import models from django.contrib.auth.models import User from django.utils import simplejson as json from django.db import IntegrityError from django.db.models.aggregates import Sum, Count from lib import encoder, flaptor_logging from django.conf import settings from datetime import datetime logger = flaptor_logging.get_logger("Models") # idea taken from https://www.grc.com/passwords.htm def generate_apikey(id): key = settings.APIKEY_KEY while True: hash = hashlib.md5() hash.update("%d" % id) hash.update(key) hash.update("%d" % random.randint(0, sys.maxint)) random_part = binascii.b2a_base64(hash.digest())[:14] if not "/" in random_part: break unique_part = encoder.to_key(id)
from lib.monitor import Monitor from nebu.models import Index, IndexPopulation from lib.indextank.client import ApiClient from django.utils import simplejson as json from lib import flaptor_logging batch_size = 1000 dataset_files_path = './' logger = flaptor_logging.get_logger('Populator') class IndexPopulator(Monitor): def __init__(self): super(IndexPopulator, self).__init__() self.failure_threshold = 5 self.fatal_failure_threshold = 20 self.period = 30 def iterable(self): return IndexPopulation.objects.exclude( status=IndexPopulation.Statuses.finished) def monitor(self, population): client = ApiClient(population.index.account.get_private_apiurl()) index = client.get_index(population.index.name) if index.has_started(): if population.status == IndexPopulation.Statuses.created: logger.info('Populating index ' + population.index.code +