def setup_loghandlers(level=None):
    # Setup logging for post_office if not already configured
    logger = logging.getLogger('post_office')
    if not logger.handlers:
        dictConfig({
            "version": 1,
            "disable_existing_loggers": False,

            "formatters": {
                "post_office": {
                    "format": "[%(levelname)s]%(asctime)s PID %(process)d: %(message)s",
                    "datefmt": "%d-%m-%Y %H:%M:%S",
                },
            },

            "handlers": {
                "post_office": {
                    "level": "DEBUG",
                    "class": "logging.StreamHandler",
                    "formatter": "post_office"
                },
            },

            "root": {
                "handlers": ["post_office"],
                "level": level or "DEBUG"
            }
        })
    return logger
Exemple #2
0
def setup_loghandlers(level=None):
    # Setup logging for post_office if not already configured
    logger = logging.getLogger('post_office')
    if not logger.handlers:
        dictConfig({
            "version": 1,
            "disable_existing_loggers": False,

            "formatters": {
                "post_office": {
                    "format": "[%(levelname)s]%(asctime)s PID %(process)d: %(message)s",
                    "datefmt": "%Y-%m-%d %H:%M:%S",
                },
            },

            "handlers": {
                "post_office": {
                    "level": "DEBUG",
                    "class": "logging.StreamHandler",
                    "formatter": "post_office"
                },
            },

            "loggers": {
                "post_office": {
                    "handlers": ["post_office"],
                    "level": level or "DEBUG"
                }
            }
        })
    return logger
Exemple #3
0
    def get_logger(cls, name=None):
        """
        Sets up a default DEBUG console logger if no other logger
        is configured.

        :rtype : object
        """
        if name is not None:
            cls.logger_name = name

        if cls.logger is None:
            dictConfig({
                "version": 1,
                "disable_existing_loggers": False,
                "formatters": {
                    "verbose": {
                        "format": "%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s"
                    },
                },
                "handlers": {
                    "console": {
                        "level": "DEBUG",
                        "class": "logging.StreamHandler",
                        "formatter": "verbose"
                    },
                },
                "loggers": {
                    cls.logger_name: {
                        "level": "DEBUG",
                        "handlers": ["console"],
                        "propagate": False
                    }
                }
            })
            cls.logger = logging.getLogger(cls.logger_name)

        return cls.logger
# Setup logging for RQWorker if not already configured
logger = logging.getLogger('rq.worker')
if not logger.handlers:
    dictConfig({
        "version": 1,
        "disable_existing_loggers": False,

        "formatters": {
            "rq_console": {
                "format": "%(asctime)s %(message)s",
                "datefmt": "%H:%M:%S",
            },
        },

        "handlers": {
            "rq_console": {
                "level": "DEBUG",
                #"class": "logging.StreamHandler",
                "class": "rq.utils.ColorizingStreamHandler",
                "formatter": "rq_console",
                "exclude": ["%(asctime)s"],
            },
        },

        "worker": {
            "handlers": ["rq_console"],
            "level": "DEBUG"
        }
    })

    
# Copied from rq.utils
Exemple #5
0
 def __init__(self):
     from django.utils.log import dictConfig
     dictConfig(LOGGING)
     self.logger = logging.getLogger(__name__)
Exemple #6
0
        'k': {
            'handlers': ['console'],
            'propogate': True,
            'level': settings.LOG_LEVEL,
        },
        'k.lib.email': {
            'handlers': ['console'],
            'propogate': True,
            'level': logging.DEBUG,
        },
        'django.request': {
            'handlers': ['console'],
            'propogate': True,
            'level': settings.LOG_LEVEL,
        },
        'raven': {
            'level': logging.ERROR,
            'handlers': ['console', 'mail_admins'],
            'propagate': False,
        },
        'sentry.errors': {
            'level': logging.ERROR,
            'handlers': ['console', 'mail_admins'],
            'propagate': False,
        },
    },
}

dictConfig(config)
logging.captureWarnings(True)
Exemple #7
0
# Setup logging for RQWorker if not already configured
logger = logging.getLogger('rq.worker')
if not logger.handlers:
    dictConfig({
        "version": 1,
        "disable_existing_loggers": False,
        "formatters": {
            "rq_console": {
                "format": "%(asctime)s %(message)s",
                "datefmt": "%H:%M:%S",
            },
        },
        "handlers": {
            "rq_console": {
                "level": "DEBUG",
                #"class": "logging.StreamHandler",
                "class": "rq.utils.ColorizingStreamHandler",
                "formatter": "rq_console",
                "exclude": ["%(asctime)s"],
            },
        },
        "worker": {
            "handlers": ["rq_console"],
            "level": "DEBUG"
        }
    })


# Copied from rq.utils
def import_attribute(name):
Exemple #8
0
'''
Created on Apr 22, 2011

@author: akai
'''
from ..models import Builder, Project, Task
from ..views import project
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test.client import Client
from django.test.testcases import TestCase
from django.utils import log
from utils import create_test_user
log.dictConfig(settings.LOGGING)
logger = log.getLogger('custom')


class TestProject(TestCase):
    def setUp(self):
        self.owner = create_test_user('*****@*****.**')
        self.builder1 = create_test_user('*****@*****.**')
        self.builder2 = create_test_user('*****@*****.**')
        pass
#    def test_view_project(self):
#        c = Client()
#        c.login(username='******', password='******')
#        project = Project.objects.create(name='test',
#                                          desc='test',
#                                          owner=self.user1)
#        project = Project.objects.get(name='test')
Exemple #9
0
    },
    'loggers': {
        'django.request': {
            'handlers': ['mail_admins'],
            'level': 'ERROR',
            'propagate': False,
        },
    },
    'root': {},
}

for key, value in settings.LOGGING.items():
    cfg[key].update(value)

# Set the level and handlers for all loggers.
for logger in cfg['loggers'].values() + [cfg['root']]:
    if 'handlers' not in logger:
        logger['handlers'] = ['syslog' if use_syslog else 'console']
    if 'level' not in logger:
        logger['level'] = settings.LOG_LEVEL
    if logger is not cfg['root'] and 'propagate' not in logger:
        logger['propagate'] = False

dictConfig(cfg)


def noop():
    # FIXME: This is here just to reduce pyflakes issues when this is
    # imported for the side-effects. It's gross stacked on gross.
    pass
 def __init__(self):
     from django.utils.log import dictConfig
     dictConfig(LOGGING)
     self.logger = logging.getLogger(__name__)
Exemple #11
0
def get_logger():
    log.dictConfig(settings.LOGGING)
    return log.getLogger('custom')
#import astrometry.net.settings as settings
import settings
settings.LOGGING['loggers'][''] = {
    'handlers': ['console'],
    'level': 'INFO',
    'propagate': True,
}
from astrometry.net.models import *
from .log import *

from django.utils.log import dictConfig
from django.db.models import Count
from django.db import DatabaseError
from django.db.models import Q

dictConfig(settings.LOGGING)


def is_tarball(fn):
    logmsg('is_tarball: %s' % fn)
    types = filetype_short(fn)
    logmsg('filetypes:', types)
    for t in types:
        if t.startswith('POSIX tar archive'):
            return True
    return False


def get_tarball_files(fn):
    # create temp dir to extract tarfile.
    tempdir = tempfile.mkdtemp()
Exemple #13
0
    },
    'handlers': {
        'views':{
            'level':'DEBUG',
            'class':'logging.handlers.RotatingFileHandler',
            'filename': LOG_FILE,
            'formatter': 'verbose',
            'maxBytes': 1048576,
            'backupCount': 3,
            }
    },
    'loggers': {
        'django.request': {
            'handlers': ['views'],
            'level': 'ERROR',
            'propagate': True,
        },
        'iqfight.iqfight_app.views':{
            'handlers': ['views'],
            'level': 'DEBUG',
            'propagate': True
            },
    }
}
import traceback
from django.utils import log
try:
    log.dictConfig(LOGGING)
except:
    print traceback.format_exc()
Exemple #14
0
 def tearDown(self):
     super(LoggingTestCaseMixIn, self).tearDown()
     log.dictConfig(settings.LOGGING)
     logging.getLogger("ngeo_browse_server").removeHandler(self.log_handler)
Exemple #15
0
 def setUp(self):
     super(LoggingTestCaseMixIn, self).setUp()
     self.log_handler = TestLogHandler()
     log.dictConfig(self.logging_config)
     logging.getLogger("ngeo_browse_server").addHandler(self.log_handler)
#import astrometry.net.settings as settings
import settings
settings.LOGGING['loggers'][''] = {
    'handlers': ['console'],
    'level': 'INFO',
    'propagate': True,
}
from astrometry.net.models import *
from log import *

from django.utils.log import dictConfig
from django.db.models import Count
from django.db import DatabaseError
from django.db.models import Q

dictConfig(settings.LOGGING)



def is_tarball(fn):
    logmsg('is_tarball: %s' % fn)
    types = filetype_short(fn)
    logmsg('filetypes:', types)
    for t in types:
        if t.startswith('POSIX tar archive'):
            return True
    return False

def get_tarball_files(fn):
    # create temp dir to extract tarfile.
    tempdir = tempfile.mkdtemp()
Exemple #17
0
@author: akai
'''
from django import forms
from django.conf import settings
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.template.context import RequestContext
from django.utils import log
from django.views.decorators.csrf import csrf_protect
from django.views.generic.list_detail import object_detail
from ideab.ideabuilder.models import Project, Builder, Application

log.dictConfig(settings.LOGGING)
logger = log.getLogger('custom')


class UserForm(forms.Form):
    email = forms.EmailField(max_length=20)
    password = forms.CharField(max_length=20,
                               widget=forms.PasswordInput(render_value=False))

    def clean_email(self):
        try:
            Builder.objects.get(username=self.cleaned_data['email'])
        except Builder.DoesNotExist:
            return self.cleaned_data['email']
        raise forms.ValidationError("Email already in use")
Exemple #18
0
class Command(BaseCommand):
    args = '<subcommand> <arg> ...'
    help = 'This command migrates file Replicas for MyTardis Experiments, ' \
        'Datasets and Datafiles from one location to another.  The ' \
        'individual Replicas are migrated atomically, but there are no ' \
        'guarantees of atomicity at the Dataset or Experiment level.  The ' \
        'following subcommands are supported:\n' \
        '    migrate [<target> <id> ...] : migrates <target> replicas \n' \
        '    mirror [<target> <id> ...]  : copies <target> replicas without\n' \
        '                                  deleting the originals\n' \
        '    ensure <N>                  : migrates replicas to ensure N \n' \
        '                                  bytes of free space\n' \
        '    reclaim <N>                 : migrate replicas to reclaim N \n' \
        '                                  bytes of space\n' \
        '    score                       : score and list all datafiles\n' \
        '    destinations                : lists the recognized destinations\n' \
        'where <target> is "datafile", "dataset" or "experiment", and the ' \
        '<id>s are the mytardis numeric ids for the respective objects\n'

    option_list = BaseCommand.option_list + (
        make_option('-a', '--all',
                    action='store_true',
                    dest='all',
                    help='Process all datafiles'),
        make_option('-s', '--source',
                    action='store',
                    dest='source',
                    help='The destination for the transfer. ' \
                        'The default destination is %s' % \
                        settings.DEFAULT_MIGRATION_DESTINATION),
        make_option('-d', '--dest',
                    action='store',
                    dest='dest',
                    help='The source for the transfer. ' \
                        'The default source is "local"'),
        make_option('-n', '--dryRun',
                    action='store_true',
                    dest='dryRun',
                    default=False,
                    help='Dry-run mode just lists the replicas that' \
                        ' would be migrated / restored'),
        make_option('--noRemove',
                    action='store_true',
                    dest='noRemove',
                    default=False,
                    help='No-remove mode migrates without removing' \
                        ' the actual file corresponding to the' \
                        ' source replica')
        )

    conf = dictConfig(LOGGING)

    def handle(self, *args, **options):
        self.verbosity = int(options.get('verbosity', 1))
        self.noRemove = options.get('noRemove', False)
        self.dryRun = options.get('dryRun', False)
        self.dest = self._get_destination(
            options.get('dest', None), settings.DEFAULT_MIGRATION_DESTINATION)
        self.source = self._get_destination(options.get('source', None),
                                            'local')

        all = options.get('all', False)
        if len(args) == 0:
            raise CommandError("Expected a subcommand")
        subcommand = args[0]
        if subcommand == 'destinations':
            self._list_destinations()
            return
        elif subcommand == 'score':
            if self.source:
                self._score_all_datafiles()
            return
        args = args[1:]
        if not self.source or not self.dest:
            return
        if self.verbosity > 2:
            self.stderr.write('Source %s destination %s\n' % \
                                  (self.source.name, self.dest.name))
        sourceAlive = self._ping(self.source, 'Source')
        destAlive = self._ping(self.dest, 'Destination')
        if not self.dryRun and (not sourceAlive or not destAlive):
            return
        self.transfer_count = 0
        self.error_count = 0
        if subcommand == 'reclaim':
            if not self.source.name == 'local':
                raise CommandError("Can only 'reclaim' for source 'local'")
            self._reclaim(args)
        elif subcommand == 'ensure':
            if not self.source.name == 'local':
                raise CommandError("Can only 'ensure' for source 'local'")
            self._ensure(args)
        elif subcommand == 'migrate' or subcommand == 'mirror':
            if all:
                if len(args) != 0:
                    raise CommandError("No target/ids allowed with --all")
            else:
                if len(args) == 0:
                    raise CommandError("Expected a %s target" % subcommand)
                target = args[0]
                args = args[1:]
            if all:
                self._all_datafiles(subcommand)
            elif target == 'datafile' or target == 'datafiles':
                self._datafiles(args, subcommand)
            elif target == 'dataset' or target == 'datasets':
                self._datasets(args, subcommand)
            elif target == 'experiment' or target == 'experiments':
                self._experiments(args, subcommand)
            else:
                raise CommandError("Unknown target: %s" % target)
        else:
            raise CommandError("Unrecognized subcommand: %s" % subcommand)
        self._stats()

    def _stats(self):
        if not self.dryRun and self.verbosity > 0:
            self.stdout.write("Transferred %s datafiles with %s errors\n" %
                              (self.transfer_count, self.error_count))

    def _all_datafiles(self, subcommand):
        # To make things faster, filter out any datafiles that don't have
        # a replica for the 'source' location.
        for row in Replica.objects.filter(location__id=self.source.id). \
                values('datafile__id').distinct().all():
            self._process_datafile(row['datafile__id'], subcommand)

    def _datafiles(self, args, subcommand):
        ids = []
        for id in args:
            try:
                Dataset_File.objects.get(id=id)
                ids.append(id)
            except Dataset_File.DoesNotExist:
                self.stderr.write('Datafile %s does not exist\n' % id)
        self._process_selected_datafiles(args, ids, subcommand, explicit=True)

    def _datasets(self, args, subcommand):
        ids = []
        for id in args:
            ids.extend(self._ids_for_dataset(id))
        self._process_selected_datafiles(args, ids, subcommand)

    def _experiments(self, args, subcommand):
        ids = []
        for id in args:
            ids.extend(self._ids_for_experiment(id))
        self._process_selected_datafiles(args, ids, subcommand)

    def _process_selected_datafiles(self,
                                    args,
                                    ids,
                                    subcommand,
                                    explicit=False):
        if len(args) == 0:
            raise CommandError("Expected one or more ids")
        elif len(ids) == 0:
            raise CommandError("No Datafiles selected")

        for id in ids:
            self._process_datafile(id, subcommand, explicit=explicit)

    def _process_datafile(self, id, subcommand, explicit=False):
        if self.dryRun:
            self.stdout.write( \
                'Would have %s datafile %s\n' % \
                    (self._verb(subcommand).lower(), id))
            return
        try:
            replica = Replica.objects.get(datafile__id=id,
                                          location__id=self.source.id)
            if subcommand == 'migrate':
                ok = migrate_replica(replica,
                                     self.dest,
                                     noRemove=self.noRemove)
            elif subcommand == 'mirror':
                ok = migrate_replica(replica, self.dest, mirror=True)
            if self.verbosity > 1:
                if ok:
                    self.stdout.write('%s datafile %s\n' % \
                                          (self._verb(subcommand), id))
                elif self.verbosity > 2:
                    self.stdout.write('Did not %s datafile %s\n' % \
                                          (subcommand, id))
            if ok:
                self.transfer_count += 1
        except Replica.DoesNotExist:
            if explicit and self.verbosity > 2:
                self.stderr.write('No replica of %s exists at %s\n' % \
                                      (id, self.source.name))
        except MigrationError as e:
            self.stderr.write(
                '%s failed for datafile %s : %s\n' % \
                    (self._noun(subcommand), id, e.args[0]))
            self.error_count += 1

    def _ping(self, location, label):
        if not location.provider.alive():
            self.stderr.write(
                '%s location %s is not responding: giving up\n' % \
                    (label, location.name))
            return False
        else:
            return True

    def _verb(self, subcommand):
        if (subcommand == 'migrate'):
            return 'Migrated'
        elif (subcommand == 'restore'):
            return 'Restored'
        elif (subcommand == 'mirror'):
            return 'Mirrored'

    def _noun(self, subcommand):
        if (subcommand == 'migrate'):
            return 'Migration'
        elif (subcommand == 'restore'):
            return 'Restoration'
        elif (subcommand == 'mirror'):
            return 'Mirroring'

    def _ids_for_dataset(self, id):
        try:
            dataset = Dataset.objects.get(id=id)
            return Dataset_File.objects.filter(dataset=id).\
                values_list('id', flat=True)
        except Dataset.DoesNotExist:
            self.stderr.write('Dataset %s does not exist\n' % id)
            return []

    def _ids_for_experiment(self, id):
        try:
            experiment = Experiment.objects.get(id=id)
            return Dataset_File.objects.\
                filter(dataset__experiments__id=id).\
                values_list('id', flat=True)
        except Experiment.DoesNotExist:
            self.stderr.write('Experiment %s does not exist\n' % id)
            return []

    def _get_destination(self, destName, default):
        if not destName:
            if not default:
                raise CommandError("No default destination configured")
            else:
                destName = default
        try:
            dest = Location.get_location(destName)
            if not dest:
                raise CommandError("Destination %s not known" % destName)
            return dest
        except MigrationError as e:
            raise CommandError("Migration error: %s" % e.args[0])

    def _list_destinations(self):
        for loc in Location.objects.all():
            self.stdout.write('{0:<16} : {1:<8} : {2:<8} : {3:}\n'.format(
                loc.name, loc.type, loc.transfer_provider, loc.url))

    def _score_all_datafiles(self):
        scores = self._do_score_all()
        total = 0
        for entry in scores:
            datafile, replica, score = entry
            try:
                total += int(datafile.size)
            except:
                pass
            self.stdout.write("datafile %s / %s, size = %s, " \
                              "score = %s, total_size = %d\n" % \
                                  (replica.url, datafile.id,
                                   datafile.size, score, total))

    def _reclaim(self, args):
        required_space = self._parse_amount(args)
        self._do_reclaim(required_space)

    def _ensure(self, args):
        required_space = self._parse_amount(args)
        free_space = get_free_space(settings.FILE_STORE_PATH)
        if free_space < required_space:
            self._do_reclaim(required_space - free_space)

    def _parse_amount(self, args):
        if len(args) < 1:
            raise CommandError("missing <amount> argument")
        elif len(args) > 1:
            raise CommandError("multiple <amount> arguments")
        pat = re.compile(r"^(\d+(?:\.\d+)?)([kmgtKMGT]?)$")
        res = pat.match(args[0])
        if res:
            amount = float(res.group(1))
            scale = res.group(2).lower()
            factor = {
                '': 1,
                'k': 1024,
                'm': 1048576,
                'g': 1073741824,
                't': 1099511627776
            }.get(scale)
            amount = amount * factor
            return long(amount)
        else:
            raise CommandError("<amount> argument (%s) must be a non-negative" \
                               " number followed  by an optional scale" \
                               " factor (K, M, G or T)" % args[0])

    def _do_reclaim(self, required):
        scores = self._do_score_all()
        total = 0
        if self.verbosity > 1:
            self.stdout.write("Attempting to reclaim %s bytes\n" % required)
        for entry in scores:
            if total >= required:
                break
            datafile, replica, _ = entry
            if self.verbosity > 1:
                if self.dryRun:
                    self.stdout.write("Would have migrated %s / %s " \
                                          "saving %s bytes\n" % \
                                          (replica.url, datafile.id,
                                           datafile.size))
                else:
                    self.stdout.write("Migrating %s / %s saving %s bytes\n" % \
                                          (replica.url, datafile.id,
                                           datafile.size))
            if self.dryRun:
                total += long(datafile.size)
            else:
                try:
                    if migrate_replica(replica, self.dest):
                        total += long(datafile.size)
                        self.transfer_count += 1
                except MigrationError as e:
                    self.stderr.write(
                        '%s failed for datafile %s : %s\n' % \
                            (self._noun(subcommand), id, e.args[0]))
                    self.error_count += 1
        if self.dryRun:
            self.stdout.write("Would have reclaimed %d bytes\n" % total)
        else:
            self.stdout.write("Reclaimed %d bytes\n" % total)

    def _do_score_all(self):
        scorer = MigrationScorer(self.source.id,
                                 settings.MIGRATION_SCORING_PARAMS)
        return scorer.score_all_datafiles()
Exemple #19
0
        'raven': {
            'level': logging.ERROR,
            'handlers': ['syslog', 'mail_admins'],
            'propagate': False,
        },
        'sentry.errors': {
            'level': logging.ERROR,
            'handlers': ['syslog', 'mail_admins'],
            'propagate': False,
        },
    },
}

if settings.DEBUG:
    config['formatters']['default']['datefmt'] = '%H:%M:%S'
    config['loggers']['k']['handlers'] = ['console']
    config['loggers']['django.request']['handlers'] = ['console']
    config['root']['handlers'] = ['console']
else:
    from celery import current_app
    from celery.utils.log import LoggingProxy

    task_log = logging.getLogger('k.celery')
    task_proxy = LoggingProxy(task_log)
    current_app.conf.update(
        CELERYD_LOG_FILE=task_proxy,
        CELERYD_LOG_COLOR=False
    )

dictConfig(config)
Exemple #20
0
    },
    'loggers': {
        'django.request': {
            'handlers': ['mail_admins'],
            'level': 'ERROR',
            'propagate': False,
        },
    },
    'root': {},
}

for key, value in settings.LOGGING.items():
    cfg[key].update(value)

# Set the level and handlers for all loggers.
for logger in cfg['loggers'].values() + [cfg['root']]:
    if 'handlers' not in logger:
        logger['handlers'] = ['syslog' if use_syslog else 'console']
    if 'level' not in logger:
        logger['level'] = settings.LOG_LEVEL
    if logger is not cfg['root'] and 'propagate' not in logger:
        logger['propagate'] = False

dictConfig(cfg)


def noop():
    # FIXME: This is here just to reduce pyflakes issues when this is
    # imported for the side-effects. It's gross stacked on gross.
    pass