Example #1
0
    def handle(self, *args, **options):
        super(Command, self).handle(*args, **options)

        # Django is querying django_content_types in a hanging transaction
        # Because of this psql can't drop django_content_types and just hangs
        close_connection()

        # Ensure backup dir present
        if not os.path.exists(BACKUP_DIR):
            raise CommandError(
                "Backup dir '{0}' does not exist!".format(BACKUP_DIR))

        backup_name = options['backup_name']
        drop_tables = options['drop_tables']
        show_output = options['show_output']

        # Loop through databases
        for db_name, db_config in settings.DATABASES.items():
            with section("Restoring '{0}'...".format(db_name)):
                # Get backup config for this engine type
                engine = db_config['ENGINE']
                backup_config = BACKUP_CONFIG.get(engine)
                if not backup_config:
                    raise SectionWarning(
                        "Restore for '{0}' engine not implemented".format(
                            engine))

                # Get backup file name
                backup_extension = backup_config['backup_extension']
                if backup_name:
                    backup_file = '{dir}/{db_name}-{backup_name}.{ext}.gz'.format(
                        dir=BACKUP_DIR,
                        db_name=db_name,
                        backup_name=backup_name,
                        ext=backup_extension,
                    )
                else:
                    try:
                        backup_file = get_latest_timestamped_file(
                            backup_extension)
                    except RestoreError as e:
                        raise SectionError(e)

                # Find restore command and get kwargs
                restore_func = backup_config['restore_func']
                restore_kwargs = {
                    'backup_file': backup_file,
                    'db_config': db_config,
                    'drop_tables': drop_tables,
                    'show_output': show_output,
                }

                # Run restore command
                try:
                    restore_func(**restore_kwargs)
                    logger.info(
                        "Restored '{db_name}' from '{backup_file}'".format(
                            db_name=db_name, backup_file=backup_file))
                except (RestoreError, CalledProcessError) as e:
                    raise SectionError(e)
Example #2
0
    def run(self):
        count = 1
        while True:
            with open("/tmp/queue-daemon.txt", "w") as f:
                print >> f, str(count)
                count += 1

                try:
                    process = subprocess.Popen("qstat -x",
                                               shell=True,
                                               stdout=subprocess.PIPE)
                    out, err = process.communicate()

                    data = objectify.fromstring(out)

                    jms = JMS()
                    for job in data.Job:
                        print >> f, job.Job_Id
                        try:
                            jms.AddUpdateClusterJob(job)
                        except Exception, err:
                            print >> f, ''.join(
                                traceback.format_exception(*sys.exc_info()))

                    # Reset database connection to avoid "MySQL has gone away" error after daemon
                    # has been running for a long time
                    db.close_connection()

                except Exception, err:
                    print >> f, "Error: " + str(err)
Example #3
0
def worker(bits):
    # We need to reset the connections, otherwise the different processes
    # will try to share the connection, which causes things to blow up.
    from django.db import connections

    for alias, info in connections.databases.items():
        # We need to also tread lightly with SQLite, because blindly wiping
        # out connections (via ``... = {}``) destroys in-memory DBs.
        if not 'sqlite3' in info['ENGINE']:
            try:
                db.close_connection()
                if isinstance(connections._connections, dict):
                    del(connections._connections[alias])
                else:
                    delattr(connections._connections, alias)
            except KeyError:
                pass

    if bits[0] == 'do_update':
        func, model, start, end, total, using, start_date, end_date, verbosity = bits
    elif bits[0] == 'do_remove':
        func, model, pks_seen, start, upper_bound, using, verbosity = bits
    else:
        return

    unified_index = haystack_connections[using].get_unified_index()
    index = unified_index.get_index(model)
    backend = haystack_connections[using].get_backend()

    if func == 'do_update':
        qs = index.build_queryset(start_date=start_date, end_date=end_date)
        do_update(backend, index, qs, start, end, total, verbosity=verbosity)
    elif bits[0] == 'do_remove':
        do_remove(backend, index, model, pks_seen, start, upper_bound, verbosity=verbosity)
Example #4
0
def sobek(scenario_id, tmp_dir):
    success = common_generation(scenario_id,
                                [SOBEK_PROGRAM_ID, IMPORT_PROGRAM_ID], tmp_dir)
    logger.debug("Finish task.")
    logger.debug("close db connection to avoid an idle process.")
    db.close_connection()
    return success
    def handle(self, *args, **options):
        log.notice('Start deamon')

        for sig in (signal.SIGABRT, signal.SIGILL, signal.SIGINT, signal.SIGSEGV, signal.SIGTERM):
            signal.signal(sig, self.terminate)

        self.check_binaries()
        self.workers = list()

        try:
            while True:
                sleep(10)
                close_connection()

                self.workers = [ (w,p) for w, p in self.workers if w.is_alive() ]

                # If pool is already full, do not try to spawn more.
                if len(self.workers) >= PARSING_WORKERS:
                    log.debug('Pool is full with {} workers'.format(len(self.workers)))
                    continue

                # Get all pending docs
                pendings = list(Task.objects.filter(state='queued').order_by('id'))

                # Spawn worker for every pending doc (but limit pool size)
                while len(self.workers) < PARSING_WORKERS and len(pendings) > 0:
                    log.debug('Spawning worker')
                    pending = pendings.pop(0)
                    process = Process(target=self.process_file, args=(pending.id,))
                    process.start()
                    self.workers.append((process, pending))

        except KeyboardInterrupt:
            self.terminate(None,None)
Example #6
0
def run_notifier(slave):
    delay = BackoffDelay(1, 60)
    while True:
        try:
            cursor = connection.cursor()
            con = connection.connection
            con.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
            con.commit()
            cursor = con.cursor()
            cursor.execute('LISTEN satori;')
            cursor.execute('DELETE FROM core_notification;')

            while True:
                if select.select([con], [], [], 5) == ([], [], []):
                    slave.keep_alive()
                else:
                    con.poll()
                    if con.notifies:
                        while con.notifies:
                            con.notifies.pop()
                        handle_notifications(cursor, slave)
        except GeneratorExit:
            return
        except SystemExit:
            break
        except:
            logging.exception('DBEV notifier error')
            db.close_connection()
            delay()
    slave.disconnect()
def process_page(user, resp, week=False):
    '''user is a UserProfile object'''
    key = week and 'toptracks' or 'tracks'
    tracks = resp[key].get('track')
    
    if tracks is None:
        print 'No tracks for this user!'
        return False

    if isinstance(tracks, list):
        for track in tracks:
            make_track(user, track, week=week)
    elif isinstance(tracks, dict):
        make_track(user, tracks, week=week)
    else:
        raise Exception('EXCEPTION WTF!? %s'%tracks)
    if not week:
        pagecomplete = int(resp['tracks']['@attr']['page'])

        if not user.track_pages_loaded:
            user.track_pages_loaded = '0'*int(resp['tracks']['@attr']['totalPages'])
    
        else:
            #TODO: replace this bit with transactions
            reset_queries()
            close_connection()
    
        user.track_pages_loaded = user.track_pages_loaded[:pagecomplete-1]+'1'+user.track_pages_loaded[pagecomplete:]
        user.save()
    else:
        pagecomplete = int(resp['toptracks']['@attr']['page'])
        if pagecomplete == int(resp['toptracks']['@attr']['totalPages']):
            user.updating_track_week = False
            user.save()
    return True
Example #8
0
 def _dbThread(self, queue):
     while 1:
         op = queue.get()
         reset_queries()
         
         
         if op is None:
             close_connection()
             queue.task_done()
             return
         
         func, args, kwargs, d, finished = op
             
         start = datetime.datetime.now()
         try:
             result = d.callback, func(*args, **kwargs)
         except:
             _rollback_on_exception()
             result = d.errback, Failure()
         delta = datetime.datetime.now() - start
         queries = ''
         if delta.seconds > 0.5:
             q = []
             for conn in connections.all():
                 q.extend(conn.queries)
             queries = ': QUERIES: %r' % (q,)
             log.msg('Query took too long %s on thread %s queue %s: func =\n %r queries =\n %s' % (delta, threading.currentThread().getName(), queue.qsize(), repr.repr((func.__module__, func.func_name, args, kwargs,)), queries[:1024],))
         finished(*result)
         queue.task_done()
Example #9
0
 def _get_instances(cluster):
     try:
         instances.extend(cluster.get_user_instances(request.user))
     except (GanetiApiError, Exception):
         bad_clusters.append(cluster)
     finally:
         close_connection()
Example #10
0
 def _update_ping(self):
     try:
         Judge.objects.filter(name=self.name).update(ping=self.latency, load=self.load)
     except Exception as e:
         # What can I do? I don't want to tie this to MySQL.
         if e.__class__.__name__ == 'OperationalError' and e.__module__ == '_mysql_exceptions' and e.args[0] == 2006:
             db.close_connection()
Example #11
0
    def wait(self):
        log.info("Waiting for messages..")
        timeout = DISPATCHER_RECONNECT_TIMEOUT
        while True:
            try:
                # Close the Django DB connection before processing
                # every incoming message. This plays nicely with
                # DB connection pooling, if enabled and allows
                # the dispatcher to recover from broken connections
                # gracefully.
                close_connection()
                msg = self.client.basic_wait(timeout=timeout)
                if not msg:
                    log.warning("Idle connection for %d seconds. Will connect"
                                " to a different host. Verify that"
                                " snf-ganeti-eventd is running!!", timeout)
                    self.client.reconnect(timeout=1)
            except select.error as e:
                if e[0] != errno.EINTR:
                    log.exception("Caught unexpected exception: %s", e)
                else:
                    break
            except (SystemExit, KeyboardInterrupt):
                break
            except Exception as e:
                log.exception("Caught unexpected exception: %s", e)

        log.info("Clean up AMQP connection before exit")
        self.client.basic_cancel(timeout=1)
        self.client.close(timeout=1)
Example #12
0
def process_file_safe(docid, upfile, convert=True):
    close_connection()
    doc = Document.objects.get(pk=docid)
    logger.info('Starting processing of doc %d (from %s) : %s' % 
                (docid, doc.owner.username, doc.name))
    filename = UPLOAD_DIR + '/' + str(docid) + '.pdf'

    # sauvegarde du document original
    fd = open(filename, 'w')
    fd.write(upfile.read())
    fd.close()

    # sauvegarde du nombre de page
    fd = open(filename, 'r')
    pdf = PdfFileReader(fd)
    doc.set_npages(pdf.numPages)
    transaction.commit()

    # activate the search system
    system("pdftotext " + filename)
    words = open(UPLOAD_DIR + '/' + str(docid) + '.txt', 'r') 
    doc.set_wsize(parse_words(doc, words.read()))
    words.close()
    transaction.commit()

    # iteration page a page, transform en png + get page size
    num = 1
    for page in pdf.pages:
        process_page(doc, page, num, convert)
        num += 1

    fd.close()
    logger.info('End of processing of doc %d' % docid)
Example #13
0
    def handle(self, *args, **options):
        """
        Iterates over all the CRON_CLASSES (or if passed in as a commandline argument)
        and runs them.
        """
        if not options['silent']:
            self.stdout.write("Running Crons\n")
            self.stdout.write("{0}\n".format("=" * 40))

        cron_classes = options['cron_classes']
        if cron_classes:
            cron_class_names = cron_classes
        else:
            cron_class_names = getattr(settings, 'CRON_CLASSES', [])

        try:
            crons_to_run = [get_class(x) for x in cron_class_names]
        except ImportError:
            error = traceback.format_exc()
            self.stdout.write('ERROR: Make sure these are valid cron class names: %s\n\n%s' % (cron_class_names, error))
            return

        for cron_class in crons_to_run:
            run_cron_with_cache_check(
                cron_class,
                force=options['force'],
                silent=options['silent'],
                dry_run=options['dry_run'],
                stdout=self.stdout
            )

        clear_old_log_entries()
        close_connection()
Example #14
0
    def __init__(self, identity, meta):
        self.identity = identity
        self.meta = json.loads(meta)
        self.src_share = self.meta['share']
        self.dest_pool = self.meta['pool']
        self.incremental = self.meta['incremental']
        self.snap_name = self.meta['snap']
        self.sender_id = self.meta['uuid']
        self.sname = ('%s_%s' % (self.sender_id, self.src_share))
        self.snap_dir = ('%s%s/.snapshots/%s' %
                         (settings.MNT_PT, self.dest_pool, self.sname))

        self.ppid = os.getpid()
        self.kb_received = 0
        self.rid = None
        self.rtid = None
        self.num_retain_snaps = 5
        self.ctx = zmq.Context()
        self.rp = None
        self.raw = None
        self.ack = False
        self.total_bytes_received = 0
        # close all db connections prior to fork.
        for alias, info in db.connections.databases.items():
            db.close_connection()
        super(Receiver, self).__init__()
Example #15
0
 def callback(result):
     attempt = 0
     while attempt < 2:
         try:
             (verified, exist) = result
             if verified:
                 self.send_verified_notification(
                     exist, conn, exchange,
                     routing_keys=routing_keys)
             break
         except exceptions.ObjectDoesNotExist:
             if attempt < 1:
                 logger.warn("ObjectDoesNotExist in callback, "
                          "attempting to reconnect and try "
                          "again.")
                 close_connection()
                 reset_queries()
             else:
                 logger.error("ObjectDoesNotExist in callback "
                           "again, giving up.")
         except Exception, e:
             msg = "ERROR in Callback %s: %s" % (exchange_name,
                                                 e)
             logger.exception(msg)
             break
         attempt += 1
Example #16
0
 def callback(result):
     attempt = 0
     retry_limit = self.config.get_exponential_limit()
     while attempt < retry_limit:
         self.stats['timestamp'] = self._utcnow()
         try:
             (verified, exist) = result
             if verified:
                 self.send_verified_notification(
                     exist, conn, exchange,
                     routing_keys=routing_keys)
             break
         except exceptions.ObjectDoesNotExist:
             if attempt < retry_limit-1:
                 logger.warn("ObjectDoesNotExist in callback, "
                          "attempting to reconnect and try "
                          "again.")
                 close_connection()
                 reset_queries()
             else:
                 logger.error("ObjectDoesNotExist in callback "
                           "again, giving up.")
                 # Avoiding unnecessary sleep()
                 break
         except librabbitmq.ConnectionError as e:
             logger.error("ConnectionEror found while trying to connect to RabbitMQ. \
                           Attempting the {}th time.".format(attempt))
         except Exception, e:
             msg = "ERROR in Callback %s: %s" % (exchange_name,
                                                 e)
             logger.exception(msg)
             break
         attempt += 1
         # Exponentially timed backoff
         time.sleep((2 ** attempt) / 1000.0 + (random.randint(0, 1000) / 1000.0))
Example #17
0
def create_event(row_data, datemode):
    title, start_date, end_date, slug, topics, importance = row_data
    # we could bulk create the events for efficiency later if necessary

    start_date = datetime(*xldate_as_tuple(start_date, datemode))
    end_date = datetime(*xldate_as_tuple(end_date, datemode))
    ev, new = Event.objects.get_or_create(title=title.strip(),
                                          start_date=start_date,
                                          end_date=end_date,
                                          slug=slug.strip(),
                                          importance=importance)

    if not new:
        ev.start_date = start_date
        ev.end_date = end_date
        ev.title = title.strip()
        ev.importance = importance

    topics = topics.split(',')
    for topic in topics:
        topic = topic.strip()
        t, created = Topic.objects.get_or_create(name=topic)
        t.save()
        ev.topics.add(t)

    ev.save()

    db.close_connection()
Example #18
0
 def _update_ping(self):
     try:
         Judge.objects.filter(name=self.name).update(ping=self.latency, load=self.load)
     except Exception as e:
         # What can I do? I don't want to tie this to MySQL.
         if e.__class__.__name__ == 'OperationalError' and e.__module__ == '_mysql_exceptions' and e.args[0] == 2006:
             db.close_connection()
Example #19
0
 def _get_instances(cluster):
     try:
         instances.extend(cluster.get_instances())
     except (GanetiApiError, Exception):
         pass
     finally:
         close_connection()
Example #20
0
 def start_connection(self, host_widget):
     db.close_connection()
     if not os.path.isdir(self.setup.log_path):
         logger.error(u"Logs Path not found")
         raise Exception(u"Logs Path not found")
     self.validate_user(host_widget.host_connection.host)
     session = self.create_session(host_widget)
     size = self.gui.loop.screen.get_cols_rows()
     self.gui.pause_screen()
     try:
         session.connect(size)
     except socket.timeout as e:
         logger.error(u"Connection Failed <{0}> [{1}]".format(e.message, host_widget.host_connection.host.name))
         raise Exception(u"Connection Failed <{0}>".format(e.message))
     except socket.error as e:
         logger.error(u"Connection Failed <{0}> [{1}]".format(e, host_widget.host_connection.host.name))
         raise Exception(u"Connection Failed <{0}>".format(e))
     except Exception as e:
         logger.error(u"Connection Failed <{0}> [{1}]".format(e, host_widget.host_connection.host.name))
         raise Exception(u"Connection Failed <{0}>".format(e))
     self.iohanlder.set_log_filename(session.session_log.log_file)
     self.iohanlder.capture()
     try:
         session.start_session()
     except paramiko.AuthenticationException as e:
         msg = u"Authentication Failed <{0}>".format(e)
         logger.error(msg)
         raise Exception(msg)
     except Exception as e:
         msg = u"ERROR <{0}>".format(e)
         #logger.error(msg)
         raise Exception(msg)
     finally:
         self.iohanlder.restore()
Example #21
0
 def _get_instances(cluster):
     try:
         instances.extend(cluster.get_user_instances(request.user))
     except (GanetiApiError, Exception):
         exclude_pks.append(cluster.pk)
     finally:
         close_connection()
Example #22
0
 def _get_instances(cluster):
     try:
         instances.extend(cluster.get_instances())
     except (GanetiApiError, Exception):
         pass
     finally:
         close_connection()
Example #23
0
    def handle(self, *args, **options):
        if options["debug"]:
            import logging
            l = logging.getLogger("django.db.backends")
            l.setLevel(logging.DEBUG)
            l.addHandler(logging.StreamHandler())
        self.verbose = options["verbose"]

        Amavis().load()

        max_messages_age = int(parameters.get_admin("MAX_MESSAGES_AGE",
                                                    app="amavis"))

        flags = ['D']
        if parameters.get_admin("RELEASED_MSGS_CLEANUP",
                                app="amavis") == "yes":
            flags += ['R']

        self.__vprint("Deleting marked messages...")
        ids = Msgrcpt.objects.filter(rs__in=flags).values("mail_id").distinct()
        for msg in Msgs.objects.filter(mail_id__in=ids):
            if not msg.msgrcpt_set.exclude(rs__in=flags).count():
                msg.delete()

        self.__vprint("Deleting messages older than %d days..." % max_messages_age)
        limit = int(time.time()) - (max_messages_age * 24 * 3600)
        Msgs.objects.filter(time_num__lt=limit).delete()

        self.__vprint("Deleting unreferenced e-mail addresses...")
        for maddr in Maddr.objects.all():
            if not maddr.msgs_set.count() and not maddr.msgrcpt_set.count():
                maddr.delete()
        db.close_connection()

        self.__vprint("Done.")
Example #24
0
def handle_tx_update(event_name, data):
    """Post a transaction status update."""
    txupdate.send(handle_tx_update, event_name=event_name, data=data)

    # Make Django to close its own database connection,
    # so that each new event doesn't consume leave connection open
    db.close_connection()
Example #25
0
 def _get_jobs(cluster):
     try:
         jobs.extend(cluster.get_job_list())
     except (GanetiApiError, Exception):
         bad_clusters.append(cluster)
     finally:
         close_connection()
Example #26
0
    def notify_admins_pending_requests(self):
        self.sender = parameters.get_admin("NOTIFICATIONS_SENDER",
                                           app="amavis")
        self.baseurl = self.options["baseurl"].strip("/")
        self.listingurl = self.baseurl \
            + reverse("modoboa.extensions.amavis.views._listing") \
            + "?viewrequests=1"

        for da in User.objects.filter(groups__name="DomainAdmins"):
            if not da.mailbox_set.count():
                continue
            rcpt = da.mailbox_set.all()[0].full_address
            reqs = get_wrapper().get_domains_pending_requests(
                Domain.objects.get_for_admin(da))
            if reqs.count():
                self.send_pr_notification(rcpt, reqs)

        reqs = Msgrcpt.objects.filter(rs='p')
        if not reqs.count():
            if self.options["verbose"]:
                print "No release request currently pending"
            return
        for su in User.objects.filter(is_superuser=True):
            if not su.mailbox_set.count():
                continue
            rcpt = su.mailbox_set.all()[0].full_address
            self.send_pr_notification(rcpt, reqs)
        db.close_connection()
Example #27
0
 def session_ended_handler(self, session):
     logger.debug("Stop session handler")
     db.close_connection()
     session.session_log.logout_date = datetime.now(pytz.timezone(self.setup.timezone))
     session.session_log.save_duration()
     session.session_log.save()
     self.remove_from_redis()
Example #28
0
    def wait(self):
        log.info("Waiting for messages..")
        timeout = 600
        while True:
            try:
                # Close the Django DB connection before processing
                # every incoming message. This plays nicely with
                # DB connection pooling, if enabled and allows
                # the dispatcher to recover from broken connections
                # gracefully.
                close_connection()
                msg = self.client.basic_wait(timeout=timeout)
                if not msg:
                    log.warning(
                        "Idle connection for %d seconds. Will connect"
                        " to a different host. Verify that"
                        " snf-ganeti-eventd is running!!", timeout)
                    self.client.reconnect()
            except SystemExit:
                break
            except Exception as e:
                log.exception("Caught unexpected exception: %s", e)

        self.client.basic_cancel()
        self.client.close()
 def terminate(self, signal_code, frame):
     if signal_code == None:
         signal_code = 'KeyboardInterrupt'
     log.notice('Caught signal #{}, exiting.'.format(signal_code))
     close_connection()
     log.info('Shutdown.')
     exit(0)
Example #30
0
 def wrapper(*args, **kwargs):
     # This ensures the task gets a fresh db connection
     db.close_connection()
     result = func(*args, **kwargs)
     # This ensures no open connections remain after the task finishes executing
     db.close_connection()
     return result
Example #31
0
 def _get_instance_details(instance):
     try:
         instancedetails.extend(generate_json_light(instance, user))
     except (GanetiApiError, Exception):
         pass
     finally:
         close_connection()
Example #32
0
def worker(bits):
    # We need to reset the connections, otherwise the different processes
    # will try to share the connection, which causes things to blow up.
    from django.db import connections

    for alias, info in connections.databases.items():
        # We need to also tread lightly with SQLite, because blindly wiping
        # out connections (via ``... = {}``) destroys in-memory DBs.
        if not 'sqlite3' in info['ENGINE']:
            try:
                db.close_connection()
                del(connections._connections[alias])
            except KeyError:
                pass

    if bits[0] == 'do_update':
        func, model, start, end, total, using, start_date, end_date, verbosity = bits
    elif bits[0] == 'do_remove':
        func, model, pks_seen, start, upper_bound, using, verbosity = bits
    else:
        return

    unified_index = haystack_connections[using].get_unified_index()
    index = unified_index.get_index(model)
    backend = haystack_connections[using].get_backend()

    if func == 'do_update':
        qs = index.build_queryset(start_date=start_date, end_date=end_date)
        do_update(backend, index, qs, start, end, total, verbosity=verbosity)
    elif bits[0] == 'do_remove':
        do_remove(backend, index, model, pks_seen, start, upper_bound, verbosity=verbosity)
Example #33
0
 def _get_cluster_details(cluster):
     try:
         clusterlist.append(clusterdetails_generator(cluster.slug))
     except (GanetiApiError, Exception):
         errors.append(Exception)
     finally:
         close_connection()
Example #34
0
 def _get_instance_details(instance):
     try:
         instancedetails.extend(generate_json_light(instance, user))
     except (GanetiApiError, Exception):
         pass
     finally:
         close_connection()
Example #35
0
 def run(self):
     count = 1
     while True:
         with open("/tmp/queue-daemon.txt", "w") as f:
             print >> f, str(count)
             count += 1
             
             try:
                 process = subprocess.Popen("qstat -x", shell=True, stdout=subprocess.PIPE)
                 out, err = process.communicate()
                 
                 data = objectify.fromstring(out)
                 
                 jms = JMS() 
                 for job in data.Job:
                     print >> f, job.Job_Id
                     try:
                         jms.AddUpdateClusterJob(job)
                     except Exception, err:
                         print >> f, ''.join(traceback.format_exception(*sys.exc_info()))
                 
                 # Reset database connection to avoid "MySQL has gone away" error after daemon 
                 # has been running for a long time    
                 db.close_connection() 
                 
             except Exception, err:
                 print >> f, "Error: " + str(err)
Example #36
0
    def notify_admins_pending_requests(self):
        self.sender = parameters.get_admin("NOTIFICATIONS_SENDER",
                                           app="amavis")
        self.baseurl = self.options["baseurl"].strip("/")
        self.listingurl = self.baseurl \
            + reverse("modoboa.extensions.amavis.views._listing") \
            + "?viewrequests=1"

        for da in User.objects.filter(groups__name="DomainAdmins"):
            if not da.mailbox_set.count():
                continue
            rcpt = da.mailbox_set.all()[0].full_address
            reqs = get_wrapper().get_domains_pending_requests(
                Domain.objects.get_for_admin(da)
            )
            if reqs.count():
                self.send_pr_notification(rcpt, reqs)

        reqs = Msgrcpt.objects.filter(rs='p')
        if not reqs.count():
            if self.options["verbose"]:
                print "No release request currently pending"
            return
        for su in User.objects.filter(is_superuser=True):
            if not su.mailbox_set.count():
                continue
            rcpt = su.mailbox_set.all()[0].full_address
            self.send_pr_notification(rcpt, reqs)
        db.close_connection()
Example #37
0
    def wait(self):
        log.info("Waiting for messages..")
        timeout = 600
        while True:
            try:
                # Close the Django DB connection before processing
                # every incoming message. This plays nicely with
                # DB connection pooling, if enabled and allows
                # the dispatcher to recover from broken connections
                # gracefully.
                close_connection()
                msg = self.client.basic_wait(timeout=timeout)
                if not msg:
                    log.warning(
                        "Idle connection for %d seconds. Will connect"
                        " to a different host. Verify that"
                        " snf-ganeti-eventd is running!!",
                        timeout,
                    )
                    self.client.reconnect()
            except SystemExit:
                break
            except Exception as e:
                log.exception("Caught unexpected exception: %s", e)

        self.client.basic_cancel()
        self.client.close()
def after_all(context):
    # clean up django test DB
    try:
        db.close_connection()
        db.connection.creation.destroy_test_db(old_name, verbosity=0)
    except Exception, e:
        pass
Example #39
0
 def callback(result):
     attempt = 0
     while attempt < 2:
         self.stats['timestamp'] = self._utcnow()
         try:
             (verified, exist) = result
             if verified:
                 self.send_verified_notification(
                     exist,
                     conn,
                     exchange,
                     routing_keys=routing_keys)
             break
         except exceptions.ObjectDoesNotExist:
             if attempt < 1:
                 logger.warn("ObjectDoesNotExist in callback, "
                             "attempting to reconnect and try "
                             "again.")
                 close_connection()
                 reset_queries()
             else:
                 logger.error("ObjectDoesNotExist in callback "
                              "again, giving up.")
         except Exception, e:
             msg = "ERROR in Callback %s: %s" % (exchange_name,
                                                 e)
             logger.exception(msg)
             break
         attempt += 1
Example #40
0
    def handle(self, *args, **options):
        """
        Iterates over all the CRON_CLASSES (or if passed in as a commandline argument)
        and runs them.
        """
        if args:
            cron_class_names = args
        else:
            cron_class_names = getattr(settings, 'CRON_CLASSES', [])

        try:
            crons_to_run = [get_class(x) for x in cron_class_names]
        except Exception:
            error = traceback.format_exc()
            self.stdout.write(
                'Make sure these are valid cron class names: %s\n%s' %
                (cron_class_names, error))
            return

        for cron_class in crons_to_run:
            run_cron_with_cache_check(cron_class,
                                      force=options['force'],
                                      silent=options['silent'])

        clear_old_log_entries()
        close_connection()
def saveURLList(urls):
    uqList = []
    for url in urls:
        uqList.append(URLQueue(url=url))
    from django import db
    db.close_connection()
    URLQueue.objects.bulk_create(uqList)
def sobek(scenario_id, tmp_dir):
    success = common_generation(
        scenario_id, tmp_dir, [SOBEK_PROGRAM_ID, IMPORT_PROGRAM_ID])
    log.debug("Finish task.")
    log.debug("close db connection to avoid an idle process.")
    db.close_connection()
    return success
Example #43
0
def use_backup(request,org_id):
    try:
        backup=request.POST.get('backup',None)
        if backup:
            #mysql -uroot -pagile -P3308 -h127.0.0.1 -Dmember_v2 <  ..\backup\2013-09-20
            
            cstr="\"%smysql.exe\" --default-character-set=utf8 -u%s -p%s -P%s -h%s -D%s"%(EXE_DIR,DATABASES['default']['USER'],
                                                                     DATABASES['default']['PASSWORD'],
                                                                     DATABASES['default']['PORT'],
                                                                     DATABASES['default']['HOST'],
                                                                     DATABASES['default']['NAME'])
        
        
            if not os.path.exists(os.path.join(EXE_DIR,'../backup')):
                os.mkdir(os.path.join(EXE_DIR,'../backup')) 
             
            db.close_connection()
            
            fname=datetime.date.today().strftime('%Y-%m-%d')    
            cstr="cmd /k %s < \"%s\""%(cstr,os.path.join(EXE_DIR,'../backup/%s'%backup).replace('\\','/')) 
            print cstr
            f=os.popen(cstr)
            msg=f.read()
            f.close()
            
        return HttpResponse(backup)
    except:
        print traceback.print_exc()
Example #44
0
    def run(self, *args, **options):
        duration = options.get('duration', 0)
        sleep = options.get('sleep', 5.0)
        queue = options.get('queue', None)
        log_std = options.get('log_std', False)
        is_dev = options.get('dev', False)
        sig_manager = self.sig_manager

        if is_dev:
            # raise last Exception is exist
            autoreload.raise_last_exception()

        if log_std:
            _configure_log_std()

        autodiscover()

        start_time = time.time()

        while (duration <= 0) or (time.time() - start_time) <= duration:
            if sig_manager.kill_now:
                # shutting down gracefully
                break

            if not self._tasks.run_next_task(queue):
                # there were no tasks in the queue, let's recover.
                close_connection()
                logger.debug('waiting for tasks')
                time.sleep(sleep)
            else:
                # there were some tasks to process, let's check if there is more work to do after a little break.
                time.sleep(
                    random.uniform(sig_manager.time_to_wait[0],
                                   sig_manager.time_to_wait[1]))
Example #45
0
 def __init__(self, uuid, receiver_ip, replica, rt=None):
     self.uuid = uuid
     self.receiver_ip = receiver_ip
     self.receiver_port = replica.data_port
     self.replica = replica
     self.snap_name = '%s_%d_replication' % (replica.share, replica.id)
     self.snap_name += '_1' if (rt is None) else '_%d' % (rt.id + 1)
     self.snap_id = '%s_%s' % (self.uuid, self.snap_name)
     self.rt = rt
     self.rt2 = None
     self.rt2_id = None
     self.rid = replica.id
     self.identity = u'%s-%s' % (self.uuid, self.rid)
     self.sp = None
     # Latest snapshot per Receiver(comes along with receiver-ready)
     self.rlatest_snap = None
     self.ctx = zmq.Context()
     self.msg = ''
     self.update_trail = False
     self.total_bytes_sent = 0
     self.ppid = os.getpid()
     self.max_snap_retain = settings.REPLICATION.get('max_snap_retain')
     for alias, info in db.connections.databases.items():
         db.close_connection()
     super(Sender, self).__init__()
Example #46
0
def check_password(environ, username, password):
    """
    Authenticates against Django's auth database

    mod_wsgi docs specify None, True, False as return value depending
    on whether the user exists and authenticates.
    """

    UserModel = auth.get_user_model()
    # db connection state is managed similarly to the wsgi handler
    # as mod_wsgi may call these functions outside of a request/response cycle
    db.reset_queries()

    try:
        try:
            user = UserModel.objects.get_by_natural_key(username)
        except UserModel.DoesNotExist:
            return None
        try:
            if not user.is_active:
                return None
        except AttributeError as e:
            # a custom user may not support is_active
            return None
        return user.check_password(password)
    finally:
        db.close_connection()
Example #47
0
    def get(self, request, username, tenant=None, format=None):
        """
        Retrieve user details
        """
        if settings.MULTI_TENANT:
            util.multi_tenant_setup(tenant)
        if request.username and username == "me":
            username = request.username
        # LdapUser.base_dn = util.get_base_db(request)
        try:
            user = LdapUser.objects.get(username=username)
        except Exception:
            return Response(error_dict(msg="Error retrieving user details.",
                                       query_dict=request.GET),
                            status=status.HTTP_404_NOT_FOUND)
        finally:
            db.close_connection()
        serializer = LdapUserSerializer(user)

        # remove password from data:
        serializer.data.pop('password', None)
        # remove unused uid field as well
        serializer.data.pop('uid', None)
        return Response(
            success_dict(result=serializer.data,
                         msg="User details retrieved successfully.",
                         query_dict=request.GET))
Example #48
0
def create_event(row_data, datemode):
    title, start_date, end_date, slug, topics, importance = row_data
    # we could bulk create the events for efficiency later if necessary

    start_date = datetime(*xldate_as_tuple(start_date, datemode))
    end_date = datetime(*xldate_as_tuple(end_date, datemode))
    ev, new = Event.objects.get_or_create(title=title.strip(),
                                          start_date=start_date,
                                          end_date=end_date,
                                          slug=slug.strip(),
                                          importance=importance)

    if not new:
        ev.start_date = start_date
        ev.end_date = end_date
        ev.title = title.strip()
        ev.importance = importance
            
    topics = topics.split(',')
    for topic in topics:
        topic = topic.strip()
        t, created = Topic.objects.get_or_create(name=topic)
        t.save()
        ev.topics.add(t)

    ev.save()
    
    db.close_connection()
Example #49
0
    def post(self, request, format=None):
        """
        Create an OU.

        ou -- (REQUIRED) The organizational unit to create.
        """

        if settings.READ_ONLY:
            return Response(error_dict(msg="Read-only service."),
                            status=status.HTTP_400_BAD_REQUEST)
        if settings.CHECK_JWT and settings.CHECK_USER_ADMIN_ROLE and not request.service_admin:
            return Response(error_dict(msg="Access denied."),
                            status=status.HTTP_401_UNAUTHORIZED)
        try:
            ou.create_ou(request.POST.get('ou'))
            if settings.CREATE_NOTIFICATIONS:
                ou_uuid = settings.TENANT_UUID + "-" + ou + "-" + settings.BEANSTALK_SRV_CODE
                ou_body = {"name": ou, "uuid": ou_uuid}
                create_generic_notification(ou_uuid, "CREATED",
                                            request.username, ou_body,
                                            settings.TENANT_ID)

        except Exception as e:
            return Response(
                error_dict(msg="Error trying to create OU: " + str(e)))
        finally:
            db.close_connection()
        return Response(success_dict(msg="OU created successfully."))
Example #50
0
 def handle(self, *args, **options):
     Stats().load()
     if options["logfile"] is None:
         options["logfile"] = parameters.get_admin("LOGFILE", app="stats")
     p = LogParser(options, parameters.get_admin("RRD_ROOTDIR", app="stats"))
     p.process()
     db.close_connection()
Example #51
0
 def get(self, request, tenant=None, format=None):
     """
     List all users.
     """
     if settings.MULTI_TENANT:
         util.multi_tenant_setup(tenant)
     print "DN:", LdapUser.base_dn
     filter_dict = util.get_filter(request)
     if filter_dict:
         users = LdapUser.objects.filter(**filter_dict)
     else:
         users = LdapUser.objects.all()
     limit, offset = util.get_page_parms(request)
     if limit > 0:
         users = users[offset:offset + limit]
     serializer = LdapUserSerializer(users, many=True)
     for user in serializer.data:
         # remove password from data:
         user.pop('password', None)
         # remove unused uid field as well:
         user.pop('uid', None)
     db.close_connection()
     return Response(
         success_dict(msg="Users retrieved successfully.",
                      result=serializer.data,
                      query_dict=request.GET))
Example #52
0
    def handle(self, *args, **options):
        """
        Iterates over all the CRON_CLASSES (or if passed in as a commandline argument)
        and runs them.
        """
        if args:
            cron_class_names = args
        else:
            cron_class_names = getattr(settings, 'CRON_CLASSES', [])

        try:
            crons_to_run = [get_class(x) for x in cron_class_names]
        except Exception:
            error = traceback.format_exc()
            self.stdout.write('Make sure these are valid cron class names: %s\n%s' % (cron_class_names, error))
            raise

        for cron_class in crons_to_run:
            run_cron_with_cache_check(
                cron_class,
                force=options['force'],
                silent=options['silent']
            )

        clear_old_log_entries()
        close_connection()
Example #53
0
        def poll(self):
            '''Check for new pushes.

            Hack around transactions on innodb, make this transaction
            aware and transaction.commit() to get a new transaction
            for our queries.
            '''
            try:
                transaction.commit()
                if self.latest is None:
                    try:
                        self.latest = Push.objects.order_by('-pk')[0].id
                    except IndexError:
                        self.latest = 0
                    return
                new_pushes = Push.objects.filter(pk__gt=self.latest).order_by('pk')
                if self.debug:
                    log.msg('mbdb changesource found %d pushes after %d' % (new_pushes.count(), self.latest))
                push = None
                for push in new_pushes:
                    self.submitChangesForPush(push)
                if push is not None:
                    self.latest = push.id
            except MySQLdb.OperationalError:
                from django import db
                db.close_connection()
                log.msg('Django database OperationalError caught')
Example #54
0
def import_from_file(filehandle, descriptor, targetstatus, copy_status, owner_id=None):
    """
    Import the xml metadata record(s) contained in the opened file identified by filehandle.
    filehandle: an opened file handle to either a single XML file or a zip archive containing
        only XML files.
    descriptor: a descriptor for the file handle, e.g. the file name.
    targetstatus: one of PUBLISHED, INGESTED or INTERNAL. 
        All imported records will be assigned this status.
    owner_id (optional): if present, the given user ID will be added to the list of owners of the
        resource.

    Returns a pair of lists, the first list containing the successfully imported resource objects,
         the second containing pairs of descriptors of the erroneous XML file(s) and error messages.
    """
    imported_resources = []
    erroneous_descriptors = []

    handling_zip_file = is_zipfile(filehandle)
    # Reset file handle for proper reading of the file contents.
    filehandle.seek(0)

    if not handling_zip_file:
        try:
            LOGGER.info('Importing XML file: "{0}"'.format(descriptor))
            xml_string = filehandle.read()
            resource = import_from_string(xml_string, targetstatus, copy_status, owner_id)
            imported_resources.append(resource)
        # pylint: disable-msg=W0703
        except Exception as problem:
            LOGGER.warn('Caught an exception while importing %s:',
                descriptor, exc_info=True)
            if isinstance(problem, db.utils.DatabaseError):
                # reset database connection (required for PostgreSQL)
                db.close_connection()
            erroneous_descriptors.append((descriptor, problem))
    
    else:
        temp_zip = ZipFile(filehandle)
        
        LOGGER.info('Importing ZIP file: "{0}"'.format(descriptor))
        file_count = 0
        for xml_name in temp_zip.namelist():
            try:
                if xml_name.endswith('/') or xml_name.endswith('\\'):
                    continue
                file_count += 1
                LOGGER.info('Importing {0}. extracted XML file: "{1}"'.format(file_count, xml_name))
                xml_string = temp_zip.read(xml_name)
                resource = import_from_string(xml_string, targetstatus, copy_status, owner_id)
                imported_resources.append(resource)
            # pylint: disable-msg=W0703
            except Exception as problem:
                LOGGER.warn('Caught an exception while importing %s from %s:',
                    xml_name, descriptor, exc_info=True)
                if isinstance(problem, db.utils.DatabaseError):
                    # reset database connection (required for PostgreSQL)
                    db.close_connection()
                erroneous_descriptors.append((xml_name, problem))
    return imported_resources, erroneous_descriptors
Example #55
0
 def call_test_func():
     try:
         test_func(*args, **kwargs)
     except Exception as e:
         exceptions.append(e)
         raise
     finally:
         db.close_connection()
    def run(self):
        # Close database connection so it's not shared with the parent.
        # django will reconnect automatically.
        db.close_connection()

        logger.info("Running Job: '%s'" % self.job)
        job_runner = JobRunner(self.job)
        job_runner.run()
Example #57
0
def check_with_own_connection(solution,run_all = True):
	# Close the current db connection - will cause Django to create a new connection (not shared with other processes)
	# when one is needed, see https://groups.google.com/forum/#!msg/django-users/eCAIY9DAfG0/6DMyz3YuQDgJ
	db.close_connection()
	solution.check(run_all)

	# Don't leave idle connections behind
	db.close_connection()