Beispiel #1
0
 def insert_data(self, query, args):
     self.logger.debug("Inserting logs to searchd")
     result = None
     for _ in 1, 2, 3:
         try:
             c = self.conn.cursor()
             result = c.execute(query, args)
             self.logger.debug("%s rows inserted" % c.rowcount)
             c.close()
         except ProgrammingError:
             self.logger.exception(
                 "Can't insert values to index: %s" % query)
         except DatabaseError as e:
             self.logger.exception("Sphinx connection error: %s" % e)
             try:
                 close_old_connections()
             except Exception as e:
                 self.logger.exception("Can't reconnect: %s" % e)
                 os.kill(os.getpid(), signal.SIGKILL)
         except Exception:
             self.logger.exception("Unhandled error in insert_data")
         else:
             return result
     self.logger.error("Can't insert data in 3 tries, exit process")
     os.kill(os.getpid(), signal.SIGKILL)
Beispiel #2
0
    def handle(self, *args, **options):
        """
        Iterates over all the CRON_CLASSES (or if passed in as a commandline argument)
        and runs them.
        """
        cron_classes = options['cron_classes']
        if cron_classes:
            cron_class_names = cron_classes
        else:
            cron_class_names = getattr(settings, 'CRON_CLASSES', [])

        try:
            crons_to_run = [get_class(x) for x in cron_class_names]
        except Exception:
            error = traceback.format_exc()
            self.stdout.write('Make sure these are valid cron class names: %s\n%s' % (cron_class_names, error))
            return

        for cron_class in crons_to_run:
            run_cron_with_cache_check(
                cron_class,
                force=options['force'],
                silent=options['silent']
            )

        clear_old_log_entries()
        close_old_connections()
Beispiel #3
0
 def _get_instances(cluster):
     try:
         instances.extend(cluster.get_instances())
     except (GanetiApiError, Exception):
         pass
     finally:
         close_old_connections()
Beispiel #4
0
 def get_user(self, username):
     try:
         u = User.objects.get(username=username)
         return u
     except:
         db.close_old_connections()
         raise
    def handle(self, *args, **options):
        try:
            os.chdir(settings.BASE_DIR)
            graceful_startup()
            session = SmSession.objects.get_or_create()[0]
            if session.update_on_startup:
                session.update_available = False
                session.save()
                close_old_connections()

                command = git + ' stash'
                subprocess.call(command, shell=True)  # trying to get rid of settings.sqlite3 change
                command = git + ' reset --hard'
                subprocess.call(command, shell=True)

                command = git + ' pull'
                git_status = subprocess.check_output(command, shell=True)
                # TODO: Make sure the pull actually worked
        except:
            print "Failed to update!"
            try:
                command = git + ' reset'
                subprocess.call(command, shell=True)
                command = git + ' stash apply'
                subprocess.call(command, shell=True)
            except:
                print "Failed to gracefully reset!"
                return False
        finally:
            session = SmSession.objects.get_or_create()[0]
            session.update_on_startup = False
            session.save()
Beispiel #6
0
    def handle(self, *args, **options):
        close_old_connections()

        db = options.get('database')
        connection = connections[db]
        interactive = options.get('interactive')
        settings = dict(connection.settings_dict)
        name = settings['NAME']
        if interactive and self.should_ask:
            msg = ''.join([self.message % {
                'name': name,
            }, self.ask_message])
            confirm = input(msg)
        else:
            confirm = self.default

        if confirm == 'yes':
            backend = get_backend(settings)
            backend.connect()
            try:
                self.execute_sql(backend, **options)
            except Exception as e:
                six.reraise(CommandError, CommandError(self.error_message % {
                    'name': name,
                    'error': e,
                }), sys.exc_info()[2])
            finally:
                backend.close()

            self.post_execute(**options)
Beispiel #7
0
    def schedule_changed(self):
        try:
            close_old_connections()

            # If MySQL is running with transaction isolation level
            # REPEATABLE-READ (default), then we won't see changes done by
            # other transactions until the current transaction is
            # committed (Issue #41).
            try:
                transaction.commit()
            except transaction.TransactionManagementError:
                pass  # not in transaction management.

            last, ts = self._last_timestamp, self.Changes.last_change()
        except DatabaseError as exc:
            logger.exception('Database gave error: %r', exc)
            return False
        except InterfaceError:
            warning(
                'DatabaseScheduler: InterfaceError in schedule_changed(), '
                'waiting to retry in next call...'
            )
            return False

        try:
            if ts and ts > (last if last else ts):
                return True
        finally:
            self._last_timestamp = ts
        return False
Beispiel #8
0
def save_task(task, broker):
    """
    Saves the task package to Django or the cache
    """
    # SAVE LIMIT < 0 : Don't save success
    if not task.get('save', Conf.SAVE_LIMIT > 0) and task['success']:
        return
    # async next in a chain
    if task.get('chain', None):
        tasks.async_chain(task['chain'], group=task['group'], cached=task['cached'], sync=task['sync'], broker=broker)
    # SAVE LIMIT > 0: Prune database, SAVE_LIMIT 0: No pruning
    db.close_old_connections()
    try:
        if task['success'] and 0 < Conf.SAVE_LIMIT <= Success.objects.count():
            Success.objects.last().delete()
        Task.objects.create(id=task['id'],
                            name=task['name'],
                            func=task['func'],
                            hook=task.get('hook'),
                            args=task['args'],
                            kwargs=task['kwargs'],
                            started=task['started'],
                            stopped=task['stopped'],
                            result=task['result'],
                            group=task.get('group'),
                            success=task['success'])
    except Exception as e:
        logger.error(e)
Beispiel #9
0
def check_simulation_version():
    """Will return None if the database is not usable"""
    close_old_connections()
    version = None
    try:
        executable = adsm_executable_command()[0]
        process = subprocess.Popen(executable + " --version --silent", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        try:
            output, error = process.communicate(timeout=200)
            exit_code = process.returncode
        except:
            exit_code = 1
            output = None
        try:
            process.kill()
        except:
            pass

        if exit_code == 0:
            version = output.splitlines()[-1].decode()
    except:
        print("Unable to get Simulation Version!")
        return None
    try:
        SmSession.objects.all().update(simulation_version=version)
    except:
        print("Unable to store Simulation Version!")

    return version
Beispiel #10
0
def reset_db(name, fail_ok=True):
    """Resets the database to a healthy state by any means necessary.  Tries to delete the file, or flush all tables, then runs the migrations from scratch.
     It now checks if the file exists.  This will throw a PermissionError if the user has the DB open in another program."""
    print("Deleting", db_path(name))
    close_old_connections()
    delete_failed = False
    if os.path.exists(db_path(name)):  # your database is corrupted and must be destroyed
        connections[name].close()
        try:
            # or you could http://stackoverflow.com/a/24501130/2291495
            os.remove(db_path(name))
        except PermissionError as err:  # must still be holding onto the file lock, clear out contents instead
            if not fail_ok:
                raise err
            else:  # I already tried manage.py flush.  It doesn't do enough
                execute = connections[name].cursor().execute
                # raw_sql = "select 'drop table ' || name || ';' from sqlite_master where type = 'table';"
                execute("PRAGMA writable_schema = 1;")
                execute("delete from sqlite_master where type in ('table', 'index', 'trigger');")
                execute("PRAGMA writable_schema = 0;")
                execute("VACUUM;")
                execute("PRAGMA INTEGRITY_CHECK;")
                print("===Dropping all Tables===")
    else:
        print(db_path(name), "does not exist")
    #creates a new blank file by migrate
    call_command('migrate', database=name, interactive=False, fake_initial=True)
    if name == 'default':  # create super user
        create_super_user()
Beispiel #11
0
 def tearDown(self):
     from django import db
     db.close_old_connections()
     for cmd in ("DROP DATABASE %(dbname)s;", "DROP USER %(username)s;"):
         complete_cmd = 'sudo -u postgres psql -c "%s"' % (cmd % self.data)
         os.system(complete_cmd)
     BaseTest.tearDown(self)
Beispiel #12
0
def setup_from_none():
    if six.PY3:
        clear_modules()
    from lucterios.framework.settings import fill_appli_settings
    import types
    import gc
    gc.collect()
    lct_modules = []
    glob = LucteriosGlobal()
    _, mod_applis, mod_modules = glob.installed()
    for mod_item in mod_applis:
        lct_modules.append(mod_item[0])
    for mod_item in mod_modules:
        lct_modules.append(mod_item[0])
    try:
        module = types.ModuleType("default_setting")
    except TypeError:
        module = types.ModuleType(six.binary_type("default_setting"))
    setattr(module, '__file__', "")
    setattr(module, 'SECRET_KEY', "default_setting")
    setattr(
        module, 'DATABASES', {'default': {'ENGINE': 'django.db.backends.dummy'}})
    fill_appli_settings("lucterios.standard", lct_modules, module)
    sys.modules["default_setting"] = module
    os.environ.setdefault("DJANGO_SETTINGS_MODULE", "default_setting")
    import django
    from django import db
    django.setup()
    db.close_old_connections()
Beispiel #13
0
def save_task(task):
    """
    Saves the task package to Django or the cache
    """
    # SAVE LIMIT < 0 : Don't save success
    if not task.get("save", Conf.SAVE_LIMIT > 0) and task["success"]:
        return
    # SAVE LIMIT > 0: Prune database, SAVE_LIMIT 0: No pruning
    db.close_old_connections()
    try:
        if task["success"] and 0 < Conf.SAVE_LIMIT <= Success.objects.count():
            Success.objects.last().delete()
        Task.objects.create(
            id=task["id"],
            name=task["name"],
            func=task["func"],
            hook=task.get("hook"),
            args=task["args"],
            kwargs=task["kwargs"],
            started=task["started"],
            stopped=task["stopped"],
            result=task["result"],
            group=task.get("group"),
            success=task["success"],
        )
    except Exception as e:
        logger.error(e)
Beispiel #14
0
def generate_master_hashes():
    """
    Generate missing sha1 hashes
    """

    # mysql does not support remote/streaming cursors
    # to save memory items are loaded from db individually
    values = Media.objects.filter(master_sha1__isnull=True).order_by('pk').values('id').nocache()

    item_ids = [i['id'] for i in values]

    with click.progressbar(item_ids, show_pos=True, width=48,label='Reprocessing {} hashes'.format(len(item_ids))) as bar:

        for item_pk in bar:

            close_old_connections()

            item = Media.objects.get(pk=item_pk)

            if item.master and item.master.path:

                master_sha1 = item.generate_sha1()

                # update media duration
                Media.objects.filter(pk=item.pk).update(master_sha1=master_sha1)
Beispiel #15
0
 def _get_instance_details(instance):
     try:
         instancedetails.extend(generate_json_light(instance, user))
     except (GanetiApiError, Exception):
         pass
     finally:
         close_old_connections()
def worker(bits):
    # We need to reset the connections, otherwise the different processes
    # will try to share the connection, which causes things to blow up.
    from django.db import connections

    for alias, info in connections.databases.items():
        # We need to also tread lightly with SQLite, because blindly wiping
        # out connections (via ``... = {}``) destroys in-memory DBs.
        if 'sqlite3' not in info['ENGINE']:
            try:
                close_old_connections()
                if isinstance(connections.thread_local.connections, dict):
                    del(connections.thread_local.connections[alias])
                else:
                    delattr(connections.thread_local.connections, alias)
            except KeyError:
                pass

    if bits[0] == 'do_update':
        func, model, start, end, total, using, start_date, end_date, verbosity, commit = bits
    elif bits[0] == 'do_remove':
        func, model, pks_seen, start, upper_bound, using, verbosity, commit = bits
    else:
        return

    unified_index = haystack_connections[using].get_unified_index()
    index = unified_index.get_index(model)
    backend = haystack_connections[using].get_backend()

    if func == 'do_update':
        qs = index.build_queryset(start_date=start_date, end_date=end_date)
        do_update(backend, index, qs, start, end, total, verbosity=verbosity, commit=commit)
    else:
        raise NotImplementedError('Unknown function %s' % func)
Beispiel #17
0
    def reload_period_from_analytics(cls, period, verbose=False):
        """Replace the stats for the given period from Google Analytics."""
        counts = googleanalytics.pageviews_by_document(
            *period_dates(period), verbose=verbose)
        if counts:
            # Close any existing connections because our load balancer times
            # them out at 5 minutes and the GA calls take forever.
            close_old_connections()

            # Delete and remake the rows:
            # Horribly inefficient until
            # http://code.djangoproject.com/ticket/9519 is fixed.
            # cls.objects.filter(period=period).delete()

            # Instead, we use raw SQL!
            cursor = connection.cursor()
            cursor.execute(
                'DELETE FROM `dashboards_wikidocumentvisits`'
                '    WHERE `period` = %s',
                [period])

            # Now we create them again with fresh data.
            for doc_id, visits in counts.iteritems():
                cls.objects.create(document=Document(pk=doc_id), visits=visits,
                                   period=period)
        else:
            # Don't erase interesting data if there's nothing to replace it:
            log.warning('Google Analytics returned no interesting data,'
                        ' so I kept what I had.')
def update_worker(args):
    if len(args) != 10:
        LOG.error('update_worker received incorrect arguments: %r', args)
        raise ValueError('update_worker received incorrect arguments')

    model, start, end, total, using, start_date, end_date, verbosity, commit, max_retries = args

    # FIXME: confirm that this is still relevant with modern versions of Django:
    # We need to reset the connections, otherwise the different processes
    # will try to share the connection, which causes things to blow up.
    from django.db import connections

    for alias, info in connections.databases.items():
        # We need to also tread lightly with SQLite, because blindly wiping
        # out connections (via ``... = {}``) destroys in-memory DBs.
        if 'sqlite3' not in info['ENGINE']:
            try:
                close_old_connections()
                if isinstance(connections.thread_local.connections, dict):
                    del connections.thread_local.connections[alias]
                else:
                    delattr(connections.thread_local.connections, alias)
            except KeyError:
                pass

    # Request that the connection clear out any transient sessions, file handles, etc.
    haystack_connections[using].reset_sessions()

    unified_index = haystack_connections[using].get_unified_index()
    index = unified_index.get_index(model)
    backend = haystack_connections[using].get_backend()

    qs = index.build_queryset(start_date=start_date, end_date=end_date)
    do_update(backend, index, qs, start, end, total, verbosity, commit, max_retries)
    return args
Beispiel #19
0
def start():
    if os.path.exists(settings.pid_file):
        with open(settings.pid_file) as f:
            try:
                fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
                fcntl.flock(f, fcntl.LOCK_UN)
            except IOError:
                sys.stdout.write("zmapd is already started\n")
                return

    run_daemon_process(pid_file=settings.pid_file, start_msg="Start zmapd(%s)\n")
    pid_file = open(settings.pid_file)
    fcntl.flock(pid_file, fcntl.LOCK_SH)
    while True:
        time.sleep(1)
        running_jobs = Job.objects.filter(status=Job.STATUS_RUNNING)
        total_bandwidth = 0
        for job in running_jobs:
            total_bandwidth += job.bandwidth
        if total_bandwidth >= settings.max_bandwidth:
            logger.debug(u"Achieve maximum bandwidth:%sM", settings.max_bandwidth)
            continue
        jobs = [x for x in Job.objects.filter(status=Job.STATUS_PENDING).order_by('-priority')]
        db.close_old_connections()
        for j in jobs:
            p = multiprocessing.Process(target=execute_job, args=(j.id,))
            p.start()
Beispiel #20
0
def groups_for_user(environ, username):
    """
    Authorizes a user based on groups
    """

    # We cannot load these modules before calling get_wsgi_application()
    # So they must be defined here.
    from ngw.core import perms
    from ngw.core.models import ContactGroup

    UserModel = auth.get_user_model()
    db.reset_queries()

    try:
        try:
            user = UserModel._default_manager.get_by_natural_key(username)
        except UserModel.DoesNotExist:
            return []
        if not user.is_active:
            return []
        groups = ContactGroup.objects.with_user_perms(
            user.id, wanted_flags=perms.VIEW_FILES, add_column=False)
        return [force_bytes(group.id) for group in groups]
    finally:
        db.close_old_connections()
Beispiel #21
0
 def __init__(self, uuid, receiver_ip, replica, rt=None):
     self.uuid = uuid
     self.receiver_ip = receiver_ip
     self.receiver_port = replica.data_port
     self.replica = replica
     # TODO: may need to send local shareId so it can be verifed remotely
     self.snap_name = '%s_%d_replication' % (replica.share, replica.id)
     self.snap_name += '_1' if (rt is None) else '_%d' % (rt.id + 1)
     self.snap_id = '%s_%s' % (self.uuid, self.snap_name)
     self.rt = rt
     self.rt2 = None
     self.rt2_id = None
     self.rid = replica.id
     self.identity = u'%s-%s' % (self.uuid, self.rid)
     self.sp = None
     # Latest snapshot per Receiver(comes along with receiver-ready)
     self.rlatest_snap = None
     self.ctx = zmq.Context()
     self.msg = ''
     self.update_trail = False
     self.total_bytes_sent = 0
     self.ppid = os.getpid()
     self.max_snap_retain = settings.REPLICATION.get('max_snap_retain')
     db.close_old_connections()
     super(Sender, self).__init__()
Beispiel #22
0
def check_password(environ, username, password):
    """
    Authenticates against Django's auth database
    mod_wsgi docs specify None, True, False as return value depending
    on whether the user exists and authenticates.

    Manually control this function instead of django
    in order to parse environ for correct path
    """

    UserModel = auth.get_user_model()
    # db connection state is managed similarly to the wsgi handler
    # as mod_wsgi may call these functions outside of a request/response cycle
    db.reset_queries()

    try:
        try:
            user = UserModel._default_manager.get_by_natural_key(username)
        except UserModel.DoesNotExist:
            return None
        if not user.is_active:
            return None

        # Method to deny access to specific directories
        # if "/tag0.1/scripts/visualisation" in environ['REQUEST_URI']:
        #    return None

        return user.check_password(password)
    finally:
        db.close_old_connections()
Beispiel #23
0
def import_naadsm_scenario(request, new_name=None):
    if 'POST' in request.method:
        initialized_form = ImportForm(request.POST, request.FILES)
    else:  # GET page for the first time
        initialized_form = ImportForm()
        close_old_connections()  # close old session
        new_scenario(new_name=new_name)  # add a scenario to session

    context = {'form': initialized_form,
               'title': "Import Legacy NAADSM Scenario in XML format",
               'loading_message': "Please wait as we import your file...",
               'error_title': "Import Error:",
               'base_page': 'ScenarioCreator/crispy-model-form.html',
               }

    if initialized_form.is_valid():
        try:
            run_importer(request, new_name=new_name)
            return loading_screen(request)
        except Exception as e:
            import sys
            info = sys.exc_info()
            # stacktrace = info[3]
            # print(stacktrace)
            # traceback.print_exc()

            print(info)
            print(str(info[2]))
            context['form_errors'] = mark_safe(str(info[1]))
            # go on to serve the form normally with Error messages attached
    else:
        context['form_errors'] = initialized_form.errors
    return render(request, 'ScenarioCreator/MainPanel.html', context)  # render in validation error messages
Beispiel #24
0
def process_image_list(l):
    db.close_old_connections()

    pool = Pool()
    pool.starmap(migrate_image_resize, l)
    pool.close()
    pool.join()
Beispiel #25
0
def django_db_management():
    # type: () -> None
    reset_queries()
    close_old_connections()
    try:
        yield
    finally:
        close_old_connections()
Beispiel #26
0
 def has_user(self, username):
     try:
         u = User.objects.get(username = username)
         db.close_old_connections()
         return True
     except:
         db.close_old_connections()
         return False
Beispiel #27
0
    def setUp(self):
        self.client.get('/app/OpenScenario/Roundtrip_test.sqlite3/')

        settings = OutputSettings.objects.first()
        settings.stop_criteria = 'stop-days'
        settings.days = 10
        settings.save()
        close_old_connections()
Beispiel #28
0
Datei: db.py Projekt: GLab/ToMaTo
	def call(*args, **kwargs):
		try:
			return fn(*args, **kwargs)
		finally:
			if transaction.is_dirty():
				transaction.commit()
				db.connections.all()[0].close()
			db.close_old_connections()
 def call_test_func():
     try:
         test_func(*args, **kwargs)
     except Exception as e:
         exceptions.append(e)
         raise
     finally:
         db.close_old_connections()
Beispiel #30
0
def worker(task_queue, result_queue, timer, timeout=Conf.TIMEOUT):
    """
    Takes a task from the task queue, tries to execute it and puts the result back in the result queue
    :type task_queue: multiprocessing.Queue
    :type result_queue: multiprocessing.Queue
    :type timer: multiprocessing.Value
    """
    name = current_process().name
    logger.info(_('{} ready for work at {}').format(name, current_process().pid))
    task_count = 0
    # Start reading the task queue
    for task in iter(task_queue.get, 'STOP'):
        result = None
        timer.value = -1  # Idle
        task_count += 1
        # Get the function from the task
        logger.info(_('{} processing [{}]').format(name, task['name']))
        f = task['func']
        # if it's not an instance try to get it from the string
        if not callable(task['func']):
            try:
                module, func = f.rsplit('.', 1)
                m = importlib.import_module(module)
                f = getattr(m, func)
            except (ValueError, ImportError, AttributeError) as e:
                result = (e, False)
                if error_reporter:
                    error_reporter.report()
                if rollbar:
                    rollbar.report_exc_info()
        # We're still going
        if not result:
            db.close_old_connections()
            timer_value = task['kwargs'].pop('timeout', timeout or 0)
            # signal execution
            pre_execute.send(sender="django_q", func=f, task=task)
            # execute the payload
            timer.value = timer_value  # Busy
            try:
                res = f(*task['args'], **task['kwargs'])
                result = (res, True)
            except Exception as e:
                result = ('{}'.format(e), False)
                if error_reporter:
                    error_reporter.report()
                if rollbar:
                    rollbar.report_exc_info()
        # Process result
        task['result'] = result[0]
        task['success'] = result[1]
        task['stopped'] = timezone.now()
        result_queue.put(task)
        timer.value = -1  # Idle
        # Recycle
        if task_count == Conf.RECYCLE:
            timer.value = -2  # Recycled
            break
    logger.info(_('{} stopped doing work').format(name))
Beispiel #31
0
 def wrapper(*args, **kwargs):
     try:
         return f(*args, **kwargs)
     finally:
         close_old_connections()
Beispiel #32
0
django.setup()

from django.db import close_old_connections

from imdb.utils.logger import get_root_logger
from imdb.db.settings.pool import init_pool
from imdb.session.interface import MySessionInterface

from imdb.rest.ping import Ping
from imdb.rest.login import Login
from imdb.rest.logout import Logout
from imdb.rest.user import User
from imdb.rest.movie import Movie

logger = get_root_logger()
close_old_connections()
init_pool()

app = Flask("imdb")
CORS(app)

app.auth_header_name = 'Authorization'
app.session_interface = MySessionInterface()

app.root_dir = dirname(dirname(abspath(__file__)))
api = Api(app, prefix='/imdb-service/')

logger.info("Setting up Resources")

api.add_resource(Ping, 'ping/')
api.add_resource(Login, 'login/')
Beispiel #33
0
 def __call__(self, scope):
     self.scope = scope
     close_old_connections()
     self.intercept()
     return self.inner(self.scope)
Beispiel #34
0
 def ThreadMain(self, ProcName):
     logger.info('Starting ' + ProcName)
     while 1:
         try:
             if self.mDieFlag == 1: break  # Request for death
             self.mLock.acquire()
             PauseTime, self.Run = ESSProc.objects.filter(
                 Name=ProcName).values_list('Time', 'Run')[0]
             if self.Run == '0':
                 logger.info('Stopping ' + ProcName)
                 ESSProc.objects.filter(Name=ProcName).update(Status='0',
                                                              Run='0',
                                                              PID=0)
                 self.RunFlag = 0
                 self.mLock.release()
                 if Debug: logger.info('RunFlag: 0')
                 time.sleep(2)
                 continue
             # Process Item
             lock = thread.allocate_lock()
             if Debug: logger.info('Start to list worklist')
             #######################################
             # Start to list robot req
             robotQueue_objs = robotQueue.objects.filter(
                 ReqType__in=[50, 51,
                              52], Status=0)  #Get pending robot requests
             for robotQueue_obj in robotQueue_objs:
                 if ESSProc.objects.get(Name=ProcName).Run == '0':
                     logger.info('Stopping ' + ProcName)
                     ESSProc.objects.filter(Name=ProcName).update(
                         Status='0', Run='0', PID=0)
                     thread.interrupt_main()
                     break
                 t_id = robotQueue_obj.MediumID
                 if robotQueue_obj.ReqType == 50:  #Mount
                     ##########################################
                     # Check if tape is already mounted
                     robotdrives_objs = robotdrives.objects.filter(
                         t_id=t_id, status='Mounted')
                     if robotdrives_objs:
                         robotdrives_obj = robotdrives_objs[0]
                         ##########################################
                         # Tape is mounted, check if locked
                         if len(robotdrives_obj.drive_lock) > 0:
                             ########################################
                             # Tape is locked, check if req work_uuid = lock
                             if robotdrives_obj.drive_lock == robotQueue_obj.ReqUUID:
                                 ########################################
                                 # Tape is already locked with req work_uuid
                                 logger.info(
                                     'Already Mounted: ' + str(t_id) +
                                     ' and locked by req work_uuid: ' +
                                     str(robotQueue_obj.ReqUUID))
                                 robotQueue_obj.delete()
                             else:
                                 ########################################
                                 # Tape is locked with another work_uuid
                                 logger.info(
                                     'Tape: ' + str(t_id) +
                                     ' is busy and locked by: ' +
                                     str(robotdrives_obj.drive_lock) +
                                     ' and not req work_uuid: ' +
                                     str(robotQueue_obj.ReqUUID))
                         else:
                             ########################################
                             # Tape is not locked, lock the drive with req work_uuid
                             robotdrives_obj.drive_lock = robotQueue_obj.ReqUUID
                             robotdrives_obj.save(
                                 update_fields=['drive_lock'])
                             logger.info(
                                 'Tape: ' + str(t_id) +
                                 ' is available set lock to req work_uuid: '
                                 + str(robotQueue_obj.ReqUUID))
                             robotQueue_obj.delete()
                     else:
                         ##########################################
                         # Tape is not mounted, check for available tape drives
                         robotdrives_objs = robotdrives.objects.filter(
                             status='Ready')  #Get available tape drives
                         if robotdrives_objs:
                             robotdrives_obj = robotdrives_objs[0]
                             ########################################
                             # Tapedrives is available try to mount tape
                             robotQueue_obj.Status = 5
                             robotQueue_obj.save(update_fields=['Status'])
                             try:
                                 Robot().Mount(t_id,
                                               robotdrives_obj.drive_id,
                                               robotQueue_obj.ReqUUID)
                             except RobotException as e:
                                 logger.error('Problem to mount tape: ' +
                                              t_id + ' Message: ' + e)
                                 robotQueue_obj.Status = 100
                                 robotQueue_obj.save(
                                     update_fields=['Status'])
                             else:
                                 #if returncode == 0:
                                 robotQueue_obj.delete()
                                 if storageMedium.objects.filter(
                                         storageMediumID=t_id).exists():
                                     ######################################################
                                     # Update StorageMediumTable with num of mounts
                                     storageMedium_obj = storageMedium.objects.get(
                                         storageMediumID=t_id)
                                     timestamp_utc = datetime.datetime.utcnow(
                                     ).replace(microsecond=0,
                                               tzinfo=pytz.utc)
                                     timestamp_dst = timestamp_utc.astimezone(
                                         self.tz)
                                     storageMedium_obj.storageMediumMounts += 1
                                     storageMedium_obj.storageMediumLocationStatus = 50
                                     storageMedium_obj.linkingAgentIdentifierValue = AgentIdentifierValue
                                     storageMedium_obj.LocalDBdatetime = timestamp_utc
                                     storageMedium_obj.save(update_fields=[
                                         'storageMediumMounts',
                                         'storageMediumLocationStatus',
                                         'linkingAgentIdentifierValue',
                                         'LocalDBdatetime'
                                     ])
                                     if ExtDBupdate:
                                         ext_res, ext_errno, ext_why = ESSMSSQL.DB(
                                         ).action(
                                             'storageMedium', 'UPD',
                                             ('storageMediumLocationStatus',
                                              50, 'storageMediumMounts',
                                              storageMedium_obj.
                                              storageMediumMounts,
                                              'linkingAgentIdentifierValue',
                                              AgentIdentifierValue),
                                             ('storageMediumID', t_id))
                                         if ext_errno:
                                             logger.error(
                                                 'Failed to update External DB: '
                                                 + str(t_id) + ' error: ' +
                                                 str(ext_why))
                                         else:
                                             storageMedium_obj.ExtDBdatetime = timestamp_utc
                                             storageMedium_obj.save(
                                                 update_fields=[
                                                     'ExtDBdatetime'
                                                 ])
                             #else:
                             #logger.error('Problem to mount tape: ' + t_id + ' Message: ' + str(mountout))
                             #robotQueue_obj.Status=100
                             #robotQueue_obj.save(update_fields=['Status'])
                 elif robotQueue_obj.ReqType in [
                         51, 52
                 ]:  # Unmount(51), F_Unmount(52)
                     ######################################
                     # Check if tape is mounted
                     robotdrives_objs = robotdrives.objects.filter(
                         t_id=t_id)  #Get tape drive that is mounted
                     if robotdrives_objs:
                         robotdrives_obj = robotdrives_objs[0]
                         ################################################
                         # Tape is mounted, check if tape is locked(busy)
                         if len(robotdrives_obj.drive_lock
                                ) == 0 or robotQueue_obj.ReqType == 52:
                             if len(robotdrives_obj.drive_lock
                                    ) > 0 and robotQueue_obj.ReqType == 52:
                                 logger.info(
                                     'Tape %s is locked with work_uuid %s, try to force unmount',
                                     t_id, robotdrives_obj.drive_lock)
                             ###################################################
                             # Try to unmount
                             robotQueue_obj.Status = 5
                             robotQueue_obj.save(update_fields=['Status'])
                             robotdrives_obj.status = 'Unmounting'
                             robotdrives_obj.save(update_fields=['status'])
                             try:
                                 Robot().Unmount(t_id,
                                                 robotdrives_obj.drive_id)
                             except RobotException as e:
                                 logger.error('Problem to unmount tape: ' +
                                              t_id + ' Message: ' + e)
                                 robotQueue_obj.Status = 100
                                 robotQueue_obj.save(
                                     update_fields=['Status'])
                             else:
                                 #if returncode == 0:
                                 robotQueue_obj.delete()
                             #else:
                             #logger.error('Problem to unmount tape: ' + t_id + ' Message: ' + str(mountout))
                             #robotQueue_obj.Status=100
                             #robotQueue_obj.save(update_fields=['Status'])
                         else:
                             #################################################
                             # Tape is locked, skip to unmount
                             logger.info('Tape ' + t_id +
                                         ' is locked, skip to unmount')
                     else:
                         ################################################
                         # Tape is not mounted, skip to try to unmount
                         logger.info('Tape ' + t_id + ' is not mounted')
                         robotQueue_obj.delete()
             db.close_old_connections()
             self.mLock.release()
             time.sleep(int(PauseTime))
         except:
             logger.error('Unexpected error: %s' % (str(sys.exc_info())))
             raise
     self.mDieFlag = 0
Beispiel #35
0
    def ThreadMain(self, ProcName):
        logging.info('Starting ' + ProcName)
        TimeZone = timezone.get_default_timezone_name()
        self.tz = pytz.timezone(TimeZone)
        METS_NAMESPACE = SchemaProfile.objects.get(
            entity='mets_namespace').value
        METS_SCHEMALOCATION = SchemaProfile.objects.get(
            entity='mets_schemalocation').value
        METS_PROFILE = SchemaProfile.objects.get(entity='mets_profile').value
        XLINK_NAMESPACE = SchemaProfile.objects.get(
            entity='xlink_namespace').value
        XSI_NAMESPACE = SchemaProfile.objects.get(entity='xsi_namespace').value
        while 1:
            if self.mDieFlag == 1: break  # Request for death
            self.mLock.acquire()
            self.Time, self.Run = ESSDB.DB().action('ESSProc', 'GET',
                                                    ('Time', 'Run'),
                                                    ('Name', ProcName))[0]
            if self.Run == '0':
                logging.info('Stopping ' + ProcName)
                ESSDB.DB().action('ESSProc', 'UPD',
                                  ('Status', '0', 'Run', '0', 'PID', '0'),
                                  ('Name', ProcName))
                self.RunFlag = 0
                self.mLock.release()
                if Debug: logging.info('RunFlag: 0')
                time.sleep(2)
                continue
            # Process Item
            lock = thread.allocate_lock()
            Cmets_obj = Parameter.objects.get(
                entity='content_descriptionfile').value
            self.IngestTable = ESSDB.DB().action('ESSConfig', 'GET',
                                                 ('Value', ),
                                                 ('Name', 'IngestTable'))[0][0]
            if ExtDBupdate:
                self.ext_IngestTable = self.IngestTable
            else:
                self.ext_IngestTable = ''
            self.dbget, errno, why = ESSDB.DB().action(
                self.IngestTable, 'GET4',
                ('ObjectIdentifierValue', 'ObjectUUID', 'PolicyId',
                 'ObjectSize'), ('StatusProcess', 'BETWEEN', 39, 'AND', 40,
                                 'AND', 'StatusActivity', '=', '0'))
            if errno:
                logging.error('Failed to access Local DB, error: ' + str(why))
            for self.obj in self.dbget:
                self.ok = 1
                self.ProcDB = ESSDB.DB().action('ESSProc', 'GET',
                                                ('Run', 'Pause'),
                                                ('Name', ProcName))[0]
                if self.ProcDB[0] == '0':
                    logging.info('Stopping ' + ProcName)
                    ESSDB.DB().action('ESSProc', 'UPD',
                                      ('Status', '0', 'Run', '0', 'PID', '0'),
                                      ('Name', ProcName))
                    thread.interrupt_main()
                    time.sleep(5)
                    break
                elif self.ProcDB[1] == 1:
                    while 1:
                        time.sleep(60)
                        self.ProcDB = ESSDB.DB().action(
                            'ESSProc', 'GET', ('Run', 'Pause'),
                            ('Name', ProcName))[0]
                        if self.ProcDB[1] == 1:
                            logging.info('Process is in pause state')
                        else:
                            break
                self.ObjectIdentifierValue = self.obj[0]
                self.ObjectUUID = self.obj[1]
                self.PolicyId = self.obj[2]
                self.ObjectSize = self.obj[3]
                ArchivePolicy_obj = ArchivePolicy.objects.get(
                    PolicyStat=1, PolicyID=self.PolicyId)
                if self.ok:
                    ###########################################################
                    # set variables
                    self.AIPpath = ArchivePolicy_obj.AIPpath
                    self.metatype = ArchivePolicy_obj.IngestMetadata
                    self.ChecksumAlgorithm = ArchivePolicy_obj.ChecksumAlgorithm
                    self.CA = dict(ChecksumAlgorithm_CHOICES)[
                        self.ChecksumAlgorithm]
                    self.SIPpath = ArchivePolicy_obj.IngestPath
                    self.p_obj = self.ObjectIdentifierValue + '.tar'
                    self.ObjectPath = os.path.join(self.AIPpath, self.p_obj)
                    self.SIProotpath = os.path.join(self.SIPpath,
                                                    self.ObjectIdentifierValue)
                    if self.metatype in [4]:
                        #self.Cmets_obj = '%s/%s_Content_METS.xml' % (self.ObjectIdentifierValue,self.ObjectIdentifierValue)
                        #self.Cmets_objpath = os.path.join(self.SIPpath,self.Cmets_obj)
                        #self.Cmets_obj = Cmets_obj.replace('{uuid}',self.ObjectIdentifierValue)
                        self.Cmets_obj = Cmets_obj.replace(
                            '{objid}', self.ObjectIdentifierValue)
                        self.Cmets_objpath = os.path.join(
                            self.SIProotpath, self.Cmets_obj)
                    elif self.metatype in [1, 2, 3]:
                        self.Cmets_obj = '%s_Content_METS.xml' % (
                            self.ObjectIdentifierValue)
                        self.Cmets_objpath = os.path.join(
                            self.AIPpath, self.Cmets_obj)
                    self.Pmets_obj = '%s_Package_METS.xml' % (
                        self.ObjectIdentifierValue)
                    self.Pmets_objpath = os.path.join(self.AIPpath,
                                                      self.Pmets_obj)
                    self.AIC_UUID = None
                    self.AIC_UUID_rel_ObjectUUIDs = []
                if self.ok:
                    METS_agent_list = []
                    METS_altRecordID_list = []
                    if self.metatype == 1:
                        ############################################
                        # Object have metatype 1 (METS)
                        self.METS_LABEL = 'ESSArch AIP'
                        # Get SIP Content METS information
                        self.METSfilepath = os.path.join(
                            self.SIPpath,
                            self.ObjectIdentifierValue + '/metadata/SIP/' +
                            self.ObjectIdentifierValue + '_Content_METS.xml')
                        res_info, res_files, res_struct, error, why = ESSMD.getMETSFileList(
                            FILENAME=self.METSfilepath)
                        for agent in res_info[2]:
                            if not (agent[0] == 'CREATOR'
                                    and agent[3] == 'SOFTWARE'):
                                METS_agent_list.append(agent)
                        METS_agent_list.append([
                            'CREATOR', 'INDIVIDUAL', '', AgentIdentifierValue,
                            []
                        ])
                        METS_agent_list.append([
                            'CREATOR', 'OTHER', 'SOFTWARE', 'ESSArch',
                            ['VERSION=%s' % ProcVersion]
                        ])
                    elif self.metatype == 2:
                        ############################################
                        # Object have metatype 2 (RES)
                        self.METS_LABEL = 'Imaging AIP RA'
                        METS_agent_list.append([
                            'ARCHIVIST', 'ORGANIZATION', '', 'Riksarkivet', []
                        ])
                        METS_agent_list.append(
                            ['CREATOR', 'ORGANIZATION', '', 'Riksarkivet', []])
                        METS_agent_list.append([
                            'CREATOR', 'INDIVIDUAL', '', AgentIdentifierValue,
                            []
                        ])
                        METS_agent_list.append([
                            'CREATOR', 'OTHER', 'SOFTWARE', 'ESSArch',
                            ['VERSION=%s' % ProcVersion]
                        ])
                    elif self.metatype == 3:
                        ############################################
                        # Object have metatype 3 (ADDML)
                        self.METS_LABEL = 'Born Digital AIP RA'
                        METS_agent_list.append([
                            'ARCHIVIST', 'ORGANIZATION', '', 'Riksarkivet', []
                        ])
                        METS_agent_list.append(
                            ['CREATOR', 'ORGANIZATION', '', 'Riksarkivet', []])
                        METS_agent_list.append([
                            'CREATOR', 'INDIVIDUAL', '', AgentIdentifierValue,
                            []
                        ])
                        METS_agent_list.append([
                            'CREATOR', 'OTHER', 'SOFTWARE', 'ESSArch',
                            ['VERSION=%s' % ProcVersion]
                        ])
                    elif self.metatype in [4]:
                        ############################################
                        # Object have metatype 4 (eARD METS)
                        res_info, res_files, res_struct, error, why = ESSMD.getMETSFileList(
                            FILENAME=self.Cmets_objpath)
                        for agent in res_info[2]:
                            #if not (agent[0] == 'CREATOR' and agent[3] == 'SOFTWARE'):
                            METS_agent_list.append(agent)
                        self.METS_LABEL = res_info[0][0]
                        METS_agent_list.append([
                            'CREATOR', None, 'INDIVIDUAL', None,
                            AgentIdentifierValue, []
                        ])
                        METS_agent_list.append([
                            'CREATOR', None, 'OTHER', 'SOFTWARE', 'ESSArch',
                            ['VERSION=%s' % ProcVersion]
                        ])
                        for altRecordID in res_info[3]:
                            METS_altRecordID_list.append(altRecordID)
                logging.debug('self.obj: ' + str(self.obj))
                if self.ChecksumAlgorithm > 0:  #self.ChecksumAlgorithm 1 = MD5, 2 = SHA-256
                    self.startCalTime = datetime.timedelta(
                        seconds=time.localtime()[5],
                        minutes=time.localtime()[4],
                        hours=time.localtime()[3])
                    errno, why = ESSPGM.DB().SetAIPstatus(
                        self.IngestTable, self.ext_IngestTable,
                        AgentIdentifierValue, self.ObjectUUID, 40, 5)
                    if errno:
                        logging.error('Failed to update DB status for AIP: ' +
                                      str(self.ObjectIdentifierValue) +
                                      ' error: ' + str(why))
                    logging.info('Start create Package METS for: ' +
                                 self.ObjectIdentifierValue)
                    if self.ok:
                        ###########################################################
                        # Create PMETS for AIP package
                        self.M_CHECKSUM, errno, why = ESSPGM.Check().checksum(
                            self.Cmets_objpath, self.CA)
                        if errno:
                            self.event_info = 'Problem to get checksum for METS object for AIP package: ' + str(
                                self.Cmets_objpath)
                            logging.error(self.event_info)
                            ESSPGM.Events().create('1030', '',
                                                   'ESSArch AIPCreator',
                                                   ProcVersion, '1',
                                                   self.event_info, 2,
                                                   self.ObjectIdentifierValue)
                            self.ok = 0
                        self.M_statinfo = os.stat(self.Cmets_objpath)
                        self.M_SIZE = self.M_statinfo.st_size
                        self.M_utc_mtime = datetime.datetime.utcfromtimestamp(
                            self.M_statinfo.st_mtime).replace(tzinfo=pytz.utc)
                        self.M_lociso_mtime = self.M_utc_mtime.astimezone(
                            self.tz).isoformat()
                        self.P_CHECKSUM, errno, why = ESSPGM.Check().checksum(
                            self.ObjectPath, self.CA)
                        if errno:
                            self.event_info = 'Problem to get checksum for AIP package: ' + str(
                                self.ObjectPath)
                            logging.error(self.event_info)
                            ESSPGM.Events().create('1040', '',
                                                   'ESSArch AIPChecksum',
                                                   ProcVersion, '1',
                                                   self.event_info, 2,
                                                   self.ObjectIdentifierValue)
                            self.ok = 0
                        self.P_statinfo = os.stat(self.ObjectPath)
                        self.P_SIZE = self.P_statinfo.st_size
                        self.P_utc_mtime = datetime.datetime.utcfromtimestamp(
                            self.P_statinfo.st_mtime).replace(tzinfo=pytz.utc)
                        self.P_lociso_mtime = self.P_utc_mtime.astimezone(
                            self.tz).isoformat()

                        if self.metatype in [1, 2, 3]:
                            self.PMETSdoc = ESSMD.createPMets(
                                ID=self.ObjectIdentifierValue,
                                LABEL=self.METS_LABEL,
                                AGENT=METS_agent_list,
                                P_SIZE=self.P_SIZE,
                                P_CREATED=self.P_lociso_mtime,
                                P_CHECKSUM=self.P_CHECKSUM,
                                P_CHECKSUMTYPE=self.CA,
                                M_SIZE=self.M_SIZE,
                                M_CREATED=self.M_lociso_mtime,
                                M_CHECKSUM=self.M_CHECKSUM,
                                M_CHECKSUMTYPE=self.CA,
                            )
                            errno, why = ESSMD.writeToFile(
                                self.PMETSdoc, self.Pmets_objpath)
                            if errno:
                                self.event_info = 'Problem to write PMETS to file for AIP package: ' + str(
                                    self.Pmets_objpath)
                                logging.error(self.event_info)
                                ESSPGM.Events().create(
                                    '1040', '', 'ESSArch AIPChecksum',
                                    ProcVersion, '1', self.event_info, 2,
                                    self.ObjectIdentifierValue)
                                time.sleep(2)
                                self.ok = 0
                        elif self.metatype in [4]:
                            ms_files = []
                            ms_files.append([
                                'amdSec', None, 'techMD', 'techMD001', None,
                                None,
                                'ID%s' % str(uuid.uuid1()), 'URL',
                                'file:%s/%s' %
                                (self.ObjectIdentifierValue, self.Cmets_obj),
                                'simple', self.M_CHECKSUM, self.CA,
                                self.M_SIZE, 'text/xml', self.M_lociso_mtime,
                                'OTHER', 'METS', None
                            ])

                            ms_files.append([
                                'fileSec', None, None, None, None, None,
                                'ID%s' % str(uuid.uuid1()), 'URL',
                                'file:%s' % self.p_obj, 'simple',
                                self.P_CHECKSUM, self.CA, self.P_SIZE,
                                'application/x-tar', self.P_lociso_mtime,
                                'tar', 'techMD001', None
                            ])
                            # define namespaces
                            self.namespacedef = 'xmlns:mets="%s"' % METS_NAMESPACE
                            self.namespacedef += ' xmlns:xlink="%s"' % XLINK_NAMESPACE
                            self.namespacedef += ' xmlns:xsi="%s"' % XSI_NAMESPACE
                            self.namespacedef += ' xsi:schemaLocation="%s %s"' % (
                                METS_NAMESPACE, METS_SCHEMALOCATION)

                            errno, info_list = ESSMD.Create_IP_mets(
                                ObjectIdentifierValue=self.
                                ObjectIdentifierValue,
                                METS_ObjectPath=self.Pmets_objpath,
                                agent_list=METS_agent_list,
                                altRecordID_list=METS_altRecordID_list,
                                file_list=ms_files,
                                namespacedef=self.namespacedef,
                                METS_LABEL=self.METS_LABEL,
                                METS_PROFILE=METS_PROFILE,
                                METS_TYPE='AIP',
                                METS_DocumentID=self.Pmets_obj,
                                TimeZone=TimeZone)
                            if errno:
                                logging.error(
                                    'Problem to create Package METS file, why: %s'
                                    % str(info_list))

                    self.ObjectMessageDigest = self.P_CHECKSUM
                    self.stopCalTime = datetime.timedelta(
                        seconds=time.localtime()[5],
                        minutes=time.localtime()[4],
                        hours=time.localtime()[3])
                    self.CalTime = self.stopCalTime - self.startCalTime
                    self.ObjectSizeMB = self.ObjectSize / 1048576
                    if self.CalTime.seconds < 1:
                        self.CalTime = datetime.timedelta(
                            seconds=1
                        )  #Fix min time to 1 second if it is zero.
                    self.CalMBperSEC = int(self.ObjectSizeMB) / int(
                        self.CalTime.seconds)
                    logging.info('Finished calculate checksum: ' +
                                 self.ObjectIdentifierValue + ' , ' +
                                 str(self.CalMBperSEC) + ' MB/Sec and Time: ' +
                                 str(self.CalTime))

                    if self.ok:
                        self.timestamp_utc = datetime.datetime.utcnow(
                        ).replace(microsecond=0, tzinfo=pytz.utc)
                        self.timestamp_dst = self.timestamp_utc.astimezone(
                            self.tz)
                        res, errno, why = ESSDB.DB().action(
                            self.IngestTable, 'UPD',
                            ('ObjectMessageDigestAlgorithm',
                             self.ChecksumAlgorithm, 'ObjectMessageDigest',
                             self.ObjectMessageDigest, 'MetaObjectSize',
                             self.M_SIZE, 'LastEventDate',
                             self.timestamp_utc.replace(tzinfo=None),
                             'linkingAgentIdentifierValue',
                             AgentIdentifierValue, 'LocalDBdatetime',
                             self.timestamp_utc.replace(tzinfo=None)),
                            ('ObjectIdentifierValue',
                             self.ObjectIdentifierValue))
                        if errno:
                            logging.error('Failed to update Local DB: ' +
                                          str(self.ObjectIdentifierValue) +
                                          ' error: ' + str(why))
                        if errno == 0 and ExtDBupdate:
                            ext_res, ext_errno, ext_why = ESSMSSQL.DB().action(
                                self.IngestTable, 'UPD',
                                ('ObjectMessageDigestAlgorithm',
                                 self.ChecksumAlgorithm, 'ObjectMessageDigest',
                                 self.ObjectMessageDigest, 'MetaObjectSize',
                                 self.M_SIZE, 'LastEventDate',
                                 self.timestamp_dst.replace(tzinfo=None),
                                 'linkingAgentIdentifierValue',
                                 AgentIdentifierValue),
                                ('ObjectIdentifierValue',
                                 self.ObjectIdentifierValue))
                            if ext_errno:
                                logging.error(
                                    'Failed to update External DB: ' +
                                    str(self.ObjectIdentifierValue) +
                                    ' error: ' + str(ext_why))
                            else:
                                res, errno, why = ESSDB.DB().action(
                                    self.IngestTable, 'UPD',
                                    ('ExtDBdatetime',
                                     self.timestamp_utc.replace(tzinfo=None)),
                                    ('ObjectIdentifierValue',
                                     self.ObjectIdentifierValue))
                                if errno:
                                    logging.error(
                                        'Failed to update Local DB: ' +
                                        str(self.ObjectIdentifierValue) +
                                        ' error: ' + str(why))

                    if self.ok and self.metatype == 4:
                        ####################################################
                        # Create AIC METS File:
                        aic_obj = ArchiveObject.objects.filter(
                            relaic_set__UUID=self.ObjectUUID)[:1]
                        if aic_obj:
                            self.AIC_UUID = aic_obj.get().ObjectUUID
                            logging.info(
                                'Succeeded to get AIC_UUID: %s from DB' %
                                self.AIC_UUID)
                        else:
                            logging.warning(
                                'AIC not found for IP object: %s, skip to create AIC METS file'
                                % self.ObjectUUID)
                    if self.ok and self.AIC_UUID:
                        ip_obj_list = ArchiveObject.objects.filter(
                            Q(StatusProcess=3000)
                            | Q(ObjectUUID=self.ObjectUUID),
                            reluuid_set__AIC_UUID=self.AIC_UUID).order_by(
                                'Generation')
                        if ip_obj_list:
                            logging.info('Start create AIC METS: ' +
                                         self.AIC_UUID)
                            self.AICmets_objpath = os.path.join(
                                self.AIPpath, self.AIC_UUID + '_AIC_METS.xml')
                            ms_files = []
                            for ip_obj in ip_obj_list:
                                logging.info(
                                    'Add IP: %s to AIC METS: %s' %
                                    (ip_obj.ObjectUUID, self.AIC_UUID))
                                ms_files.append([
                                    'fileSec', None, None, None, None, None,
                                    'ID%s' % str(uuid.uuid1()), 'URL',
                                    'file:%s' % ip_obj.ObjectUUID, 'simple',
                                    ip_obj.ObjectMessageDigest,
                                    dict(ChecksumAlgorithm_CHOICES)[
                                        ip_obj.ObjectMessageDigestAlgorithm],
                                    ip_obj.ObjectSize, 'application/x-tar',
                                    ip_obj.CreateDate, 'IP Package', None, None
                                ])

                            # define namespaces
                            self.namespacedef = 'xmlns:mets="%s"' % METS_NAMESPACE
                            self.namespacedef += ' xmlns:xlink="%s"' % XLINK_NAMESPACE
                            self.namespacedef += ' xmlns:xsi="%s"' % XSI_NAMESPACE
                            self.namespacedef += ' xsi:schemaLocation="%s %s"' % (
                                METS_NAMESPACE, METS_SCHEMALOCATION)

                            errno, info_list = ESSMD.Create_IP_mets(
                                ObjectIdentifierValue=self.AIC_UUID,
                                METS_ObjectPath=self.AICmets_objpath,
                                agent_list=[],
                                altRecordID_list=[],
                                file_list=ms_files,
                                namespacedef=self.namespacedef,
                                METS_LABEL='AIC relation to IP',
                                METS_PROFILE=METS_PROFILE,
                                METS_TYPE='AIC',
                                METS_DocumentID=self.AIC_UUID +
                                '_AIC_METS.xml',
                                TimeZone=TimeZone)
                            if errno:
                                logging.error(
                                    'Problem to create AIC METS file, why: %s'
                                    % str(info_list))
                        else:
                            logging.error(
                                'Problem to get objects related to AIC_UUID: %s from DB'
                                % (self.AIC_UUID))
                            self.ok = 0

                    if self.ok:
                        errno, why = ESSPGM.DB().SetAIPstatus(
                            self.IngestTable, self.ext_IngestTable,
                            AgentIdentifierValue, self.ObjectUUID, 49, 0)
                        if errno:
                            logging.error(
                                'Failed to update DB status for AIP: ' +
                                str(self.ObjectIdentifierValue) + ' error: ' +
                                str(why))
                        else:
                            self.event_info = 'Succeeded to create checksum for Object: %s' % self.ObjectIdentifierValue
                            logging.info(self.event_info)
                            ESSPGM.Events().create('1040', '',
                                                   'ESSArch AIPChecksum',
                                                   ProcVersion, '0',
                                                   self.event_info, 2,
                                                   self.ObjectIdentifierValue)
                    else:
                        errno, why = ESSPGM.DB().SetAIPstatus(
                            self.IngestTable, self.ext_IngestTable,
                            AgentIdentifierValue, self.ObjectUUID, 40, 100)
                        if errno:
                            logging.error(
                                'Failed to update DB status for AIP: ' +
                                str(self.ObjectIdentifierValue) + ' error: ' +
                                str(why))
                        else:
                            self.event_info = 'Failed to create checksum for Object: %s' % self.ObjectIdentifierValue
                            logging.error(self.event_info)
                            ESSPGM.Events().create('1040', '',
                                                   'ESSArch AIPChecksum',
                                                   ProcVersion, '1',
                                                   self.event_info, 2,
                                                   self.ObjectIdentifierValue)
                elif self.ChecksumAlgorithm == 0:  #self.ChecksumAlgorithm 0 = No checksum
                    logging.info('Skip creation of checksum: ' +
                                 self.ObjectIdentifierValue)
                    self.ObjectMessageDigest = ''
                    self.MetaObjectSize = os.stat(self.Cmets_objpath)[6]
                    self.timestamp_utc = datetime.datetime.utcnow().replace(
                        microsecond=0, tzinfo=pytz.utc)
                    self.timestamp_dst = self.timestamp_utc.astimezone(self.tz)
                    res, errno, why = ESSDB.DB().action(
                        self.IngestTable, 'UPD',
                        ('ObjectMessageDigestAlgorithm',
                         self.ChecksumAlgorithm, 'ObjectMessageDigest',
                         self.ObjectMessageDigest, 'StatusProcess', '49',
                         'StatusActivity', '0', 'MetaObjectSize',
                         self.MetaObjectSize, 'LastEventDate',
                         self.timestamp_utc.replace(tzinfo=None),
                         'linkingAgentIdentifierValue', AgentIdentifierValue,
                         'LocalDBdatetime',
                         self.timestamp_utc.replace(tzinfo=None)),
                        ('ObjectIdentifierValue', self.ObjectIdentifierValue))
                    if errno:
                        logging.error('Failed to update Local DB: ' +
                                      str(self.ObjectIdentifierValue) +
                                      ' error: ' + str(why))
                    else:
                        ESSPGM.Events().create('1040', '',
                                               'ESSArch AIPChecksum',
                                               ProcVersion, '0',
                                               'Skip creation of checksum', 2,
                                               self.ObjectIdentifierValue)
                    if errno == 0 and ExtDBupdate:
                        ext_res, ext_errno, ext_why = ESSMSSQL.DB().action(
                            self.IngestTable, 'UPD',
                            ('ObjectMessageDigestAlgorithm',
                             self.ChecksumAlgorithm, 'ObjectMessageDigest',
                             self.ObjectMessageDigest, 'StatusProcess', '49',
                             'StatusActivity', '0', 'MetaObjectSize',
                             self.MetaObjectSize, 'LastEventDate',
                             self.timestamp_dst.replace(tzinfo=None),
                             'linkingAgentIdentifierValue',
                             AgentIdentifierValue),
                            ('ObjectIdentifierValue',
                             self.ObjectIdentifierValue))
                        if ext_errno:
                            logging.error('Failed to update External DB: ' +
                                          str(self.ObjectIdentifierValue) +
                                          ' error: ' + str(ext_why))
                        else:
                            res, errno, why = ESSDB.DB().action(
                                self.IngestTable, 'UPD',
                                ('ExtDBdatetime',
                                 self.timestamp_utc.replace(tzinfo=None)),
                                ('ObjectIdentifierValue',
                                 self.ObjectIdentifierValue))
                            if errno:
                                logging.error('Failed to update Local DB: ' +
                                              str(self.ObjectIdentifierValue) +
                                              ' error: ' + str(why))
            db.close_old_connections()
            self.mLock.release()
            time.sleep(int(self.Time))
        self.mDieFlag = 0
Beispiel #36
0
    def __init__(self, debug=False, verbosity=1):
        """
        Spawn greenlets for handling websockets and PostgreSQL calls.

        Only one pgworker should be started for any process, and ideally only
        one instance per node as well (ie: use IPC to communicate events between
        processes on the same host).  This class handles the former (per
        process), per host IPC is an excercise left to you, dear reader - pull
        requests are of course welcome.  ;)

        For the record, there's no technical reason that you can't have multiple
        pgworker instances running on one host (or even one process), it's just
        inefficient to send exactly the same traffic multiple times between your
        web servers and your database host.
        """
        import logging
        from django.apps import apps
        from django.utils.module_loading import import_string
        from dddp.websocket import DDPWebSocketApplication

        self.verbosity = verbosity
        self._stop_event = gevent.event.Event()
        self._stop_event.set()  # start in stopped state.
        self.logger = logging.getLogger('dddp.launcher')

        if DDPLauncher.pgworker is None:
            from django.db import connection, close_old_connections
            from dddp.postgres import PostgresGreenlet
            # shutdown existing connections
            close_old_connections()

            DDPLauncher.pgworker = PostgresGreenlet(connection)

        # use settings.WSGI_APPLICATION or fallback to default Django WSGI app
        from django.conf import settings
        self.wsgi_app = None
        if hasattr(settings, 'WSGI_APPLICATION'):
            self.wsgi_name = settings.WSGI_APPLICATION
            try:
                self.wsgi_app = import_string(self.wsgi_name)
            except ImportError:
                pass
        if self.wsgi_app is None:
            from django.core.wsgi import get_wsgi_application
            self.wsgi_app = get_wsgi_application()
            self.wsgi_name = str(self.wsgi_app.__class__)

        self.api = apps.get_app_config('dddp').api
        self.api.pgworker = DDPLauncher.pgworker
        DDPWebSocketApplication.api = self.api

        # setup PostgresGreenlet to multiplex DB calls
        DDPWebSocketApplication.pgworker = self.pgworker

        self.resource = geventwebsocket.Resource(
            collections.OrderedDict([
                (r'/websocket', DDPWebSocketApplication),
                (r'^/sockjs/\d+/\w+/websocket$', DDPWebSocketApplication),
                (r'^/sockjs/\d+/\w+/xhr$', ddpp_sockjs_xhr),
                (r'^/sockjs/info$', ddpp_sockjs_info),
                (r'^/(?!(websocket|sockjs)/)', self.wsgi_app),
            ]), )

        self.servers = []
        self.threads = []
Beispiel #37
0
 def wrapped_f(*args, **kwargs):
     try:
         start = datetime.now()
         output = f(*args, **kwargs)
         logger.info("Time taken for %s is %s", self.api_key,
                     datetime.now() - start)
         return output
     except KeyError as e:
         exception_trace = traceback.format_exc()
         parsed_request = parse_request_for_error(request)
         logger.error("Exception in %s API:\n%s Request: \n%s",
                      self.api_key, exception_trace, parsed_request)
         logger.error("Missing key : " + e.__repr__())
         return error_response(400,
                               "{} is required".format(e.__repr__()))
     except (ValidationError, TypeError) as e:
         exception_trace = traceback.format_exc()
         parsed_request = parse_request_for_error(request)
         logger.error("Exception in %s API:\n%s Request: \n%s",
                      self.api_key, exception_trace, parsed_request)
         logger.error("Missing key : " + e.__repr__())
         return error_response(
             400,
             "Bad Request object. Missing Parameters or wrong endpoint")
     except ValueError as e:
         exception_trace = traceback.format_exc()
         parsed_request = parse_request_for_error(request)
         logger.error("Exception in %s API:\n%s Request: \n%s",
                      self.api_key, exception_trace, parsed_request)
         logger.error("Missing key : " + e.__repr__())
         return error_response(
             400, "Bad Request. Incorrect Parameter types")
     except (ObjectDoesNotExist, MultipleObjectsReturned) as e:
         exception_trace = traceback.format_exc()
         parsed_request = parse_request_for_error(request)
         logger.error("Exception in %s API:\n%s Request: \n%s",
                      self.api_key, exception_trace, parsed_request)
         logger.error("Missing key : " + e.__repr__())
         return error_response(
             422,
             "Unprocessable Entity, no records found or multiple records found"
         )
     except PermissionDenied as e:
         exception_trace = traceback.format_exc()
         parsed_request = parse_request_for_error(request)
         logger.error("Exception in %s API:\n%s Request: \n%s",
                      self.api_key, exception_trace, parsed_request)
         logger.error("Missing key : " + e.__repr__())
         return error_response(403, "Permission Denied")
     except CustomError as error:
         exception_trace = traceback.format_exc()
         parsed_request = parse_request_for_error(request)
         logger.error("Exception in %s API: %s\n%s \nRequest: %s",
                      self.api_key, error.get_message(),
                      exception_trace, parsed_request)
         return error_response(error.get_error_code(),
                               error.get_message())
     except IntegrityError as e:
         exception_trace = traceback.format_exc()
         parsed_request = parse_request_for_error(request)
         logger.error("Exception in %s API:\n%s Request: \n%s",
                      self.api_key, exception_trace, parsed_request)
         db_err_code, _ = e.args
         if db_err_code == 1062:
             return error_response(400, "Duplicate Entry")
         return error_response(500, "Internal Server Error")
     except Exception as e:
         exception_trace = traceback.format_exc()
         parsed_request = parse_request_for_error(request)
         logger.error("Exception in %s API:\n%s Request: \n%s",
                      self.api_key, exception_trace, parsed_request)
         logger.error("Missing key : " + e.__repr__())
         return error_response(500, "Internal Server Error")
     finally:
         close_old_connections()
Beispiel #38
0
def new_game(request):
    user = request.user
    hosted = Game.objects.create(white_player=user)

    close_old_connections()
    return HttpResponseRedirect(reverse('game', args=[hosted.label]))
Beispiel #39
0
def solo_game(request):
    user = request.user
    solo = Game.objects.create(white_player=user, black_player=user)

    close_old_connections()
    return HttpResponseRedirect(reverse('game', args=[solo.label]))
Beispiel #40
0
 def thread_handler(self, loop, *args, **kwargs):
     try:
         return super().thread_handler(loop, *args, **kwargs)
     finally:
         close_old_connections()
Beispiel #41
0
def handle_creation(job):
    global logger
    data = json.loads(job.body)

    try:
        application = InstanceApplication.objects.get(
            id=data["application_id"])
    except ObjectDoesNotExist:
        logger.warn("Unable to find application #%d, burying" %
                    data["application_id"])
        try_log(
            mail_admins, "Burying job #%d" % job.jid,
            "Please inspect job #%d (application %d) manually" %
            (job.jid, data["application_id"]))
        job.bury()
        close_old_connections()
        return
    finally:
        close_old_connections()

    logger.info("Handling %s (job: %d)", application.hostname,
                application.job_id)
    while True:
        sleep(15)
        logger.info("Checking %s (job: %d)", application.hostname,
                    application.job_id)
        status = application.cluster.get_job_status(application.job_id)
        if status["end_ts"]:
            logger.info("%s (job: %d) done. Status: %s", application.hostname,
                        application.job_id, status["status"])
            if status["status"] == "error":
                application.status = STATUS_FAILED
                application.backend_message = smart_str(status["opresult"])
                application.save()
                logger.warn("%s (job: %d) failed. Notifying admins",
                            application.hostname, application.job_id)
                try_log(
                    mail_admins, "Instance creation failure for %s on %s" %
                    (application.hostname, application.cluster),
                    json.dumps(status, indent=2))
            else:
                application.status = STATUS_SUCCESS
                application.backend_message = None
                application.save()
                logger.info("Mailing %s about %s", application.applicant.email,
                            application.hostname)

                fqdn = Site.objects.get_current().domain
                instance_url = "https://%s%s" % \
                               (fqdn, urlresolvers.reverse("instance-detail",
                                                args=(application.cluster.slug,
                                                      application.hostname)))
                mail_body = render_to_string(
                    "instances/emails/instance_created_mail.txt", {
                        "application": application,
                        "instance_url": instance_url,
                        "BRANDING": settings.BRANDING
                    })
                mail_body_managers = render_to_string(
                    "instances/emails/instance_created_mail.txt", {
                        "application": application,
                        "reviewer": application.reviewer,
                        "instance_url": instance_url,
                        "BRANDING": settings.BRANDING
                    })
                try_log(
                    send_mail, settings.EMAIL_SUBJECT_PREFIX +
                    "Instance %s is ready" % application.hostname, mail_body,
                    settings.SERVER_EMAIL, [application.applicant.email])
                logger.info("Mailing managers about %s" % application.hostname)
                try_log(mail_managers,
                        "Instance %s is ready" % application.hostname,
                        mail_body_managers)
            job.delete()
            close_old_connections()
            break
        job.touch()
Beispiel #42
0
 def run_cronjob_in_thread(self, logs_count):
     call(self.wait_3sec_cron)
     self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1)
     db.close_old_connections()
Beispiel #43
0
 def inner(*args, **kwargs):
     try:
         return fn(*args, **kwargs)
     finally:
         if not HUEY.always_eager:
             close_old_connections()
Beispiel #44
0
def scheduler(broker=None):
    """
    Creates a task from a schedule at the scheduled time and schedules next run
    """
    if not broker:
        broker = get_broker()
    db.close_old_connections()
    try:
        with db.transaction.atomic():
            for s in Schedule.objects.select_for_update().exclude(
                    repeats=0).filter(next_run__lt=timezone.now()):
                args = ()
                kwargs = {}
                # get args, kwargs and hook
                if s.kwargs:
                    try:
                        # eval should be safe here because dict()
                        kwargs = eval('dict({})'.format(s.kwargs))
                    except SyntaxError:
                        kwargs = {}
                if s.args:
                    args = ast.literal_eval(s.args)
                    # single value won't eval to tuple, so:
                    if type(args) != tuple:
                        args = (args, )
                q_options = kwargs.get('q_options', {})
                if s.hook:
                    q_options['hook'] = s.hook
                # set up the next run time
                if not s.schedule_type == s.ONCE:
                    next_run = arrow.get(s.next_run)
                    while True:
                        if s.schedule_type == s.MINUTES:
                            next_run = next_run.replace(
                                minutes=+(s.minutes or 1))
                        elif s.schedule_type == s.HOURLY:
                            next_run = next_run.replace(hours=+1)
                        elif s.schedule_type == s.DAILY:
                            next_run = next_run.replace(days=+1)
                        elif s.schedule_type == s.WEEKLY:
                            next_run = next_run.replace(weeks=+1)
                        elif s.schedule_type == s.MONTHLY:
                            next_run = next_run.replace(months=+1)
                        elif s.schedule_type == s.QUARTERLY:
                            next_run = next_run.replace(months=+3)
                        elif s.schedule_type == s.YEARLY:
                            next_run = next_run.replace(years=+1)
                        if Conf.CATCH_UP or next_run > arrow.utcnow():
                            break
                    s.next_run = next_run.datetime
                    s.repeats += -1
                # send it to the cluster
                q_options['broker'] = broker
                q_options['group'] = q_options.get('group', s.name or s.id)
                kwargs['q_options'] = q_options
                s.task = django_q.tasks.async_task(s.func, *args, **kwargs)
                # log it
                if not s.task:
                    logger.error(
                        _('{} failed to create a task from schedule [{}]').
                        format(current_process().name, s.name or s.id))
                else:
                    logger.info(
                        _('{} created a task from schedule [{}]').format(
                            current_process().name, s.name or s.id))
                # default behavior is to delete a ONCE schedule
                if s.schedule_type == s.ONCE:
                    if s.repeats < 0:
                        s.delete()
                        continue
                    # but not if it has a positive repeats
                    s.repeats = 0
                # save the schedule
                s.save()
    except Exception as e:
        logger.error(e)
Beispiel #45
0
 def inner(*args, **kwargs):
     try:
         return fn(*args, **kwargs)
     finally:
         if not HUEY.immediate and not getattr(HUEY, 'testing', False):
             close_old_connections()
Beispiel #46
0
 def foo(*args, **kwargs):
     try:
         return fn(*args, **kwargs)
     finally:
         close_old_connections()
 def _close_old_connections(self, *args, **kwargs):
     db.close_old_connections()
Beispiel #48
0
def query(request):
    """
    获取SQL查询结果
    :param request:
    :return:
    """
    instance_name = request.POST.get('instance_name')
    sql_content = request.POST.get('sql_content')
    db_name = request.POST.get('db_name')
    tb_name = request.POST.get('tb_name')
    limit_num = int(request.POST.get('limit_num', 0))
    schema_name = request.POST.get('schema_name', None)
    user = request.user

    result = {'status': 0, 'msg': 'ok', 'data': {}}
    try:
        instance = user_instances(
            request.user).get(instance_name=instance_name)
    except Instance.DoesNotExist:
        result['status'] = 1
        result['msg'] = '你所在组未关联该实例'
        return HttpResponse(json.dumps(result),
                            content_type='application/json')

    # 服务器端参数验证
    if None in [sql_content, db_name, instance_name, limit_num]:
        result['status'] = 1
        result['msg'] = '页面提交参数可能为空'
        return HttpResponse(json.dumps(result),
                            content_type='application/json')

    try:
        config = SysConfig()
        # 查询前的检查,禁用语句检查,语句切分
        query_engine = get_engine(instance=instance)
        query_check_info = query_engine.query_check(db_name=db_name,
                                                    sql=sql_content)
        if query_check_info.get('bad_query'):
            # 引擎内部判断为 bad_query
            result['status'] = 1
            result['msg'] = query_check_info.get('msg')
            return HttpResponse(json.dumps(result),
                                content_type='application/json')
        if query_check_info.get(
                'has_star') and config.get('disable_star') is True:
            # 引擎内部判断为有 * 且禁止 * 选项打开
            result['status'] = 1
            result['msg'] = query_check_info.get('msg')
            return HttpResponse(json.dumps(result),
                                content_type='application/json')
        sql_content = query_check_info['filtered_sql']

        # 查询权限校验,并且获取limit_num
        priv_check_info = query_priv_check(user, instance, db_name,
                                           sql_content, limit_num)
        if priv_check_info['status'] == 0:
            limit_num = priv_check_info['data']['limit_num']
            priv_check = priv_check_info['data']['priv_check']
        else:
            result['status'] = 1
            result['msg'] = priv_check_info['msg']
            return HttpResponse(json.dumps(result),
                                content_type='application/json')
        # explain的limit_num设置为0
        limit_num = 0 if re.match(r"^explain",
                                  sql_content.lower()) else limit_num

        # 对查询sql增加limit限制或者改写语句
        sql_content = query_engine.filter_sql(sql=sql_content,
                                              limit_num=limit_num)

        # 先获取查询连接,用于后面查询复用连接以及终止会话
        query_engine.get_connection(db_name=db_name)
        thread_id = query_engine.thread_id
        max_execution_time = int(config.get('max_execution_time', 60))
        # 执行查询语句,并增加一个定时终止语句的schedule,timeout=max_execution_time
        if thread_id:
            schedule_name = f'query-{time.time()}'
            run_date = (datetime.datetime.now() +
                        datetime.timedelta(seconds=max_execution_time))
            add_kill_conn_schedule(schedule_name, run_date, instance.id,
                                   thread_id)
        with FuncTimer() as t:
            # 获取主从延迟信息
            seconds_behind_master = query_engine.seconds_behind_master
            query_result = query_engine.query(db_name,
                                              sql_content,
                                              limit_num,
                                              schema_name=schema_name,
                                              tb_name=tb_name)
        query_result.query_time = t.cost
        # 返回查询结果后删除schedule
        if thread_id:
            del_schedule(schedule_name)

        # 查询异常
        if query_result.error:
            result['status'] = 1
            result['msg'] = query_result.error
        # 数据脱敏,仅对查询无错误的结果集进行脱敏,并且按照query_check配置是否返回
        elif config.get('data_masking'):
            try:
                with FuncTimer() as t:
                    masking_result = query_engine.query_masking(
                        db_name, sql_content, query_result)
                masking_result.mask_time = t.cost
                # 脱敏出错
                if masking_result.error:
                    # 开启query_check,直接返回异常,禁止执行
                    if config.get('query_check'):
                        result['status'] = 1
                        result['msg'] = f'数据脱敏异常:{masking_result.error}'
                    # 关闭query_check,忽略错误信息,返回未脱敏数据,权限校验标记为跳过
                    else:
                        logger.warning(
                            f'数据脱敏异常,按照配置放行,查询语句:{sql_content},错误信息:{masking_result.error}'
                        )
                        query_result.error = None
                        result['data'] = query_result.__dict__
                # 正常脱敏
                else:
                    result['data'] = masking_result.__dict__
            except Exception as msg:
                # 抛出未定义异常,并且开启query_check,直接返回异常,禁止执行
                if config.get('query_check'):
                    result['status'] = 1
                    result['msg'] = f'数据脱敏异常,请联系管理员,错误信息:{msg}'
                # 关闭query_check,忽略错误信息,返回未脱敏数据,权限校验标记为跳过
                else:
                    logger.warning(
                        f'数据脱敏异常,按照配置放行,查询语句:{sql_content},错误信息:{msg}')
                    query_result.error = None
                    result['data'] = query_result.__dict__
        # 无需脱敏的语句
        else:
            result['data'] = query_result.__dict__

        # 仅将成功的查询语句记录存入数据库
        if not query_result.error:
            result['data']['seconds_behind_master'] = seconds_behind_master
            if int(limit_num) == 0:
                limit_num = int(query_result.affected_rows)
            else:
                limit_num = min(int(limit_num),
                                int(query_result.affected_rows))
            query_log = QueryLog(username=user.username,
                                 user_display=user.display,
                                 db_name=db_name,
                                 instance_name=instance.instance_name,
                                 sqllog=sql_content,
                                 effect_row=limit_num,
                                 cost_time=query_result.query_time,
                                 priv_check=priv_check,
                                 hit_rule=query_result.mask_rule_hit,
                                 masking=query_result.is_masked)
            # 防止查询超时
            if connection.connection and not connection.is_usable():
                close_old_connections()
            query_log.save()
    except Exception as e:
        logger.error(
            f'查询异常报错,查询语句:{sql_content}\n,错误信息:{traceback.format_exc()}')
        result['status'] = 1
        result['msg'] = f'查询异常报错,错误信息:{e}'
        return HttpResponse(json.dumps(result),
                            content_type='application/json')
    # 返回查询结果
    try:
        return HttpResponse(json.dumps(result,
                                       cls=ExtendJSONEncoderFTime,
                                       bigint_as_string=True),
                            content_type='application/json')
    # 虽然能正常返回,但是依然会乱码
    except UnicodeDecodeError:
        return HttpResponse(json.dumps(result,
                                       default=str,
                                       bigint_as_string=True,
                                       encoding='latin1'),
                            content_type='application/json')
Beispiel #49
0
 def wrapper(*args, **kwargs):
     close_old_connections()
     return func(*args, **kwargs)
Beispiel #50
0
def clear_cluster_users_cache(cluster_slug):
    for user in User.objects.all():
        cache.delete("user:%s:index:instances" % user.username)
    cache.delete("cluster:%s:instances" % cluster_slug)
    close_old_connections()
Beispiel #51
0
def run_bkgd_search():
    """
    Runs background queries.
    """
    execute_filter_queries()
    close_old_connections()
Beispiel #52
0
def import_pgdb_file(tree, user):
    global logs
    global done
    logs = []
    done = False
    root = tree.getroot()
    # all students
    for s in root[0]:
        try:
            if len(Student.objects.filter(student_num=int(s[0].text))) != 0:
                print(f"Student with number {s[0].text} already exists")
                logs.append(f"Student with number {s[0].text} \t ({s[4].text}, {s[3].text}) already exists")
                continue

            s_obj = Student(
                student_num=int(s[0].text),
                cur_grade_num=int(s[1].text),
                homeroom_str=f"{s[2].text}",
                first=s[3].text.strip(),
                last=s[4].text.strip(),
                legal=s[5].text.strip(),
                sex=s[6].text.strip(),
                grad_year=int(s[7].text),
                active=(True if s[8].text == "yes" else False),
            )
            s_obj.save(user)

            for g in s[9]:

                g_obj = s_obj.get_grade(int(g[0].text))
                g_obj.anecdote = g[2].text or ""

                g_obj.set_term1_avg(float(g[3].text), user)
                g_obj.set_term2_avg(float(g[4].text), user)

                for p in g[5]:
                    if (len(PointCodes.objects.filter(catagory=p[0].text).filter(
                            code=int(p[1].text))) == 0):
                        type = PointCodes(catagory=p[0].text, code=int(p[1].text),
                                          description=str(p[0].text) + str(p[1].text))
                        type.save()
                    else:
                        type = PointCodes.objects.filter(catagory=p[0].text).get(code=int(p[1].text))

                    g_obj.add_point(Points(type=type, amount=float(p[2].text)), user)

                g_obj.calc_points_total("SE")
                g_obj.calc_points_total("AT")
                g_obj.calc_points_total("FA")
                g_obj.save()

            logs.append(f"Added student {s[0].text} \t ({s[4].text}, {s[3].text}) successfully")
        except Exception as e:
            raise e
            student_num = int(s[0].text)
            print(f"Failed to add student {int(s[0].text)}")
            logs.append(f"Failed to add student {s[0].text} \t ({s[4].text}, {s[3].text}) {e}")

            # delete the partially formed student
            if len(Student.objects.filter(student_num=student_num)) != 0:
                Student.objects.get(student_num=student_num).delete()

    for plist in root[1]:
        try:
            if PlistCutoff.objects.filter(year=int(plist[0].text)).exists():
                p = PlistCutoff.objects.get(year=int(plist[0].text))
                p.grade_8_T1 = float(plist[1].text)
                p.grade_8_T2 = float(plist[2].text)
                p.grade_9_T1 = float(plist[3].text)
                p.grade_9_T2 = float(plist[4].text)
                p.grade_10_T1 = float(plist[5].text)
                p.grade_10_T2 = float(plist[6].text)
                p.grade_11_T1 = float(plist[7].text)
                p.grade_11_T2 = float(plist[8].text)
                p.grade_12_T1 = float(plist[9].text)
                p.grade_12_T2 = float(plist[10].text)
                p.save(user)
            else:
                PlistCutoff.objects.get(year=int(plist[0].text),
                                        grade_8_T1=float(plist[1].text), grade_8_T2=float(plist[2].text),
                                        grade_9_T1=float(plist[3].text), grade_9_T2=float(plist[4].text),
                                        grade_10_T1=float(plist[5].text), grade_10_T2=float(plist[6].text),
                                        grade_11_T1=float(plist[7].text), grade_11_T2=float(plist[8].text),
                                        grade_12_T1=float(plist[9].text), grade_12_T2=float(plist[10].text),
                                        ).save(user)
            logs.append(f"Configured Plist Cutoffs for {plist[0].text}-{int(plist[0].text) + 1}")
        except Exception as e:
            logs.append(f"Failed to add Plist Cutoffs for {plist[0].text}-{int(plist[0].text) + 1}: {e}")

    for c in root[2]:
        try:
            if PointCodes.objects.filter(catagory=c[2].text, code=int(c[0].text)).exists():
                obj = PointCodes.objects.get(catagory=c[2].text, code=int(c[0].text))
                obj.description = c[1].text
                obj.save()
            else:
                PointCodes.objects.create(catagory=c[2].text, code=int(c[0].text), description=c[1].text)
            logs.append(
                f"Created code definition for {c[2].text}{c[0].text} with desc. {c[1].text}")
        except Exception as e:
            logs.append(f"Failed to create code definition for {c[2].text}{c[0].text} with desc. {c[1].text}: {e}")
    done = True
    close_old_connections()
Beispiel #53
0
 def IdleUnmountProc(self, q):
     while 1:
         try:
             try:
                 if q.get_nowait() == 'STOP':
                     break
             except Empty:
                 pass
             numreadydrives = robotdrives.objects.filter(
                 status='Ready').count()
             robotdrives_objs = robotdrives.objects.filter(status='Mounted')
             if robotdrives_objs:
                 #################################################
                 # Found mounted tape
                 nummountreq = robotQueue.objects.filter(ReqType=50,
                                                         Status=0).count()
                 for robotdrives_obj in robotdrives_objs:
                     if len(robotdrives_obj.drive_lock) == 0:
                         #################################################
                         # Tape is not locked
                         if int(
                                 robotdrives_obj.IdleTime
                         ) > 10 and nummountreq > 0 and numreadydrives == 0:
                             ##################################################################################################
                             # Pendning mount request in RobotReqQueue. Set IdleTime to 10 sec.
                             robotdrives_obj.IdleTime = 10
                             robotdrives_obj.save(
                                 update_fields=['IdleTime'])
                             logger.info('Setting IdleTime to ' +
                                         str(robotdrives_obj.IdleTime) +
                                         ' sec, for tape id: ' +
                                         str(robotdrives_obj.t_id))
                         elif int(robotdrives_obj.IdleTime) == 9999:
                             ##################################################################
                             # Tape is new_unlocked(9999). Set IdleTime to 3600 sec.
                             robotdrives_obj.IdleTime = 3600
                             robotdrives_obj.save(
                                 update_fields=['IdleTime'])
                             logger.info('Setting IdleTime to ' +
                                         str(robotdrives_obj.IdleTime) +
                                         ' sec, for tape id: ' +
                                         str(robotdrives_obj.t_id))
                         if int(robotdrives_obj.IdleTime) < 9999:
                             ########################################################################
                             # Tape is already discoverd as unlocked(<9999). Count down IdleTime with 1 sec.
                             robotdrives_obj.IdleTime -= 1
                             robotdrives_obj.save(
                                 update_fields=['IdleTime'])
                             logger.debug(
                                 'Count down -1 sec setting IdleTime to ' +
                                 str(robotdrives_obj.IdleTime) +
                                 ' sec, for tape id: ' +
                                 str(robotdrives_obj.t_id))
                         if int(robotdrives_obj.IdleTime) <= 0:
                             ####################################################
                             # Tape IdleTime is zero(0). Try to unmount the tape
                             logger.info('Start to unmount tape: ' +
                                         str(robotdrives_obj.t_id))
                             robotQueue_obj = robotQueue()
                             robotQueue_obj.ReqType = 51  # Unmount
                             robotQueue_obj.ReqPurpose = 'TLD - IdleUnmountProc'
                             robotQueue_obj.Status = 0  # Pending
                             robotQueue_obj.MediumID = robotdrives_obj.t_id
                             robotQueue_obj.user = '******'
                             robotQueue_obj.save()
                             while 1:
                                 robot_objs = robot.objects.filter(
                                     t_id=robotdrives_obj.t_id,
                                     drive_id='99')
                                 if robot_objs:
                                     logger.info(
                                         'Success to unmount tape: ' +
                                         str(robotdrives_obj.t_id))
                                     break
                                 else:
                                     if not robotQueue.objects.filter(
                                             id=robotQueue_obj.id).exists():
                                         logger.warning(
                                             'Add a second unmount request for: %s'
                                             % robotdrives_obj.t_id)
                                         robotQueue_obj = robotQueue()
                                         robotQueue_obj.ReqType = 51  # Unmount
                                         robotQueue_obj.ReqPurpose = 'TLD - IdleUnmountProc'
                                         robotQueue_obj.Status = 0  # Pending
                                         robotQueue_obj.MediumID = robotdrives_obj.t_id
                                         robotQueue_obj.user = '******'
                                         robotQueue_obj.save()
                                         time.sleep(1)
                                     else:
                                         logger.debug(
                                             'Wait for unmounting of: ' +
                                             str(robotdrives_obj.t_id))
                                         time.sleep(10)
                     else:
                         #################################################
                         # Tape is locked. Set IdleTime to 9999.
                         robotdrives_obj.IdleTime = 9999
                         robotdrives_obj.save(update_fields=['IdleTime'])
                         logger.debug(
                             'Tape is locked setting IdleTime to ' +
                             str(robotdrives_obj.IdleTime) +
                             ' sec, for tape id: ' +
                             str(robotdrives_obj.t_id))
             db.close_old_connections()
             time.sleep(1)
         except:
             logger.error('Unexpected error: %s' % (str(sys.exc_info())))
             raise
     logger.info('Idle Unmount Process stopped')
Beispiel #54
0
def close_connections():
    close_old_connections()
    def update_backend(self, label, using):
        backend = haystack_connections[using].get_backend()
        unified_index = haystack_connections[using].get_unified_index()

        for model in haystack_get_models(label):
            try:
                index = unified_index.get_index(model)
            except NotHandled:
                if self.verbosity >= 2:
                    self.stdout.write("Skipping '%s' - no index." % model)
                continue

            if self.workers > 0:
                # workers resetting connections leads to references to models / connections getting
                # stale and having their connection disconnected from under them. Resetting before
                # the loop continues and it accesses the ORM makes it better.
                close_old_connections()

            qs = index.build_queryset(using=using,
                                      start_date=self.start_date,
                                      end_date=self.end_date)

            total = qs.count()

            if self.verbosity >= 1:
                self.stdout.write(
                    u"Indexing %d %s" %
                    (total, force_text(model._meta.verbose_name_plural)))

            batch_size = self.batchsize or backend.batch_size

            if self.workers > 0:
                ghetto_queue = []

            max_pk = None
            for start in range(0, total, batch_size):
                end = min(start + batch_size, total)

                if self.workers == 0:
                    max_pk = do_update(backend,
                                       index,
                                       qs,
                                       start,
                                       end,
                                       total,
                                       verbosity=self.verbosity,
                                       commit=self.commit,
                                       max_retries=self.max_retries,
                                       last_max_pk=max_pk)
                else:
                    ghetto_queue.append(
                        (model, start, end, total, using, self.start_date,
                         self.end_date, self.verbosity, self.commit,
                         self.max_retries))

            if self.workers > 0:
                pool = multiprocessing.Pool(self.workers)

                successful_tasks = pool.map(update_worker, ghetto_queue)

                if len(ghetto_queue) != len(successful_tasks):
                    self.stderr.write(
                        'Queued %d tasks but only %d completed' %
                        (len(ghetto_queue), len(successful_tasks)))
                    for i in ghetto_queue:
                        if i not in successful_tasks:
                            self.stderr.write('Incomplete task: %s' % repr(i))

                pool.close()
                pool.join()

            if self.remove:
                if self.start_date or self.end_date or total <= 0:
                    # They're using a reduced set, which may not incorporate
                    # all pks. Rebuild the list with everything.
                    qs = index.index_queryset().values_list('pk', flat=True)
                    database_pks = set(smart_bytes(pk) for pk in qs)

                    total = len(database_pks)
                else:
                    database_pks = set(
                        smart_bytes(pk)
                        for pk in qs.values_list('pk', flat=True))

                # Since records may still be in the search index but not the local database
                # we'll use that to create batches for processing.
                # See https://github.com/django-haystack/django-haystack/issues/1186
                index_total = SearchQuerySet(
                    using=backend.connection_alias).models(model).count()

                # Retrieve PKs from the index. Note that this cannot be a numeric range query because although
                # pks are normally numeric they can be non-numeric UUIDs or other custom values. To reduce
                # load on the search engine, we only retrieve the pk field, which will be checked against the
                # full list obtained from the database, and the id field, which will be used to delete the
                # record should it be found to be stale.
                index_pks = SearchQuerySet(
                    using=backend.connection_alias).models(model)
                index_pks = index_pks.values_list('pk', 'id')

                # We'll collect all of the record IDs which are no longer present in the database and delete
                # them after walking the entire index. This uses more memory than the incremental approach but
                # avoids needing the pagination logic below to account for both commit modes:
                stale_records = set()

                for start in range(0, index_total, batch_size):
                    upper_bound = start + batch_size

                    # If the database pk is no longer present, queue the index key for removal:
                    for pk, rec_id in index_pks[start:upper_bound]:
                        if smart_bytes(pk) not in database_pks:
                            stale_records.add(rec_id)

                if stale_records:
                    if self.verbosity >= 1:
                        self.stdout.write("  removing %d stale records." %
                                          len(stale_records))

                    for rec_id in stale_records:
                        # Since the PK was not in the database list, we'll delete the record from the search
                        # index:
                        if self.verbosity >= 2:
                            self.stdout.write("  removing %s." % rec_id)

                        backend.remove(rec_id, commit=self.commit)
Beispiel #56
0
def archive(archive_id):
    """
    执行数据库归档
    :return:
    """
    archive_info = ArchiveConfig.objects.get(id=archive_id)
    s_ins = archive_info.src_instance
    src_db_name = archive_info.src_db_name
    src_table_name = archive_info.src_table_name
    condition = archive_info.condition
    no_delete = archive_info.no_delete
    sleep = archive_info.sleep
    mode = archive_info.mode

    # 获取归档表的字符集信息
    s_engine = get_engine(s_ins)
    s_db = s_engine.schema_object.databases[src_db_name]
    s_tb = s_db.tables[src_table_name]
    s_charset = s_tb.options['charset'].value
    if s_charset is None:
        s_charset = s_db.options['charset'].value

    pt_archiver = PtArchiver()
    # 准备参数
    source = fr"h={s_ins.host},u={s_ins.user},p={s_ins.password}," \
        fr"P={s_ins.port},D={src_db_name},t={src_table_name},A={s_charset}"
    args = {
        "no-version-check": True,
        "source": source,
        "where": condition,
        "progress": 5000,
        "statistics": True,
        "charset": 'utf8',
        "limit": 10000,
        "txn-size": 1000,
        "sleep": sleep
    }

    # 归档到目标实例
    if mode == 'dest':
        d_ins = archive_info.dest_instance
        dest_db_name = archive_info.dest_db_name
        dest_table_name = archive_info.dest_table_name
        # 目标表的字符集信息
        d_engine = get_engine(d_ins)
        d_db = d_engine.schema_object.databases[dest_db_name]
        d_tb = d_db.tables[dest_table_name]
        d_charset = d_tb.options['charset'].value
        if d_charset is None:
            d_charset = d_db.options['charset'].value
        # dest
        dest = fr"h={d_ins.host},u={d_ins.user},p={d_ins.password},P={d_ins.port}," \
            fr"D={dest_db_name},t={dest_table_name},A={d_charset}"
        args['dest'] = dest
        if no_delete:
            args['no-delete'] = True
    elif mode == 'file':
        output_directory = os.path.join(settings.BASE_DIR,
                                        'downloads/archiver')
        os.makedirs(output_directory, exist_ok=True)
        args[
            'file'] = f'{output_directory}/{s_ins.instance_name}-{src_db_name}-{src_table_name}.txt'
        if no_delete:
            args['no-delete'] = True
    elif mode == 'purge':
        args['purge'] = True

    # 参数检查
    args_check_result = pt_archiver.check_args(args)
    if args_check_result['status'] == 1:
        return JsonResponse(args_check_result)
    # 参数转换
    cmd_args = pt_archiver.generate_args2cmd(args, shell=True)
    # 执行命令,获取结果
    select_cnt = 0
    insert_cnt = 0
    delete_cnt = 0
    with FuncTimer() as t:
        p = pt_archiver.execute_cmd(cmd_args, shell=True)
        stdout = ''
        for line in iter(p.stdout.readline, ''):
            if re.match(r'^SELECT\s(\d+)$', line, re.I):
                select_cnt = re.findall(r'^SELECT\s(\d+)$', line)
            elif re.match(r'^INSERT\s(\d+)$', line, re.I):
                insert_cnt = re.findall(r'^INSERT\s(\d+)$', line)
            elif re.match(r'^DELETE\s(\d+)$', line, re.I):
                delete_cnt = re.findall(r'^DELETE\s(\d+)$', line)
            stdout += f'{line}\n'
    statistics = stdout
    # 获取异常信息
    stderr = p.stderr.read()
    if stderr:
        statistics = stdout + stderr

    # 判断归档结果
    select_cnt = int(select_cnt[0]) if select_cnt else 0
    insert_cnt = int(insert_cnt[0]) if insert_cnt else 0
    delete_cnt = int(delete_cnt[0]) if delete_cnt else 0
    error_info = ''
    success = True
    if stderr:
        error_info = f'命令执行报错:{stderr}'
        success = False
    if mode == 'dest':
        # 删除源数据,判断删除数量和写入数量
        if not no_delete and (insert_cnt != delete_cnt):
            error_info = f"删除和写入数量不一致:{insert_cnt}!={delete_cnt}"
            success = False
    elif mode == 'file':
        # 删除源数据,判断查询数量和删除数量
        if not no_delete and (select_cnt != delete_cnt):
            error_info = f"查询和删除数量不一致:{select_cnt}!={delete_cnt}"
            success = False
    elif mode == 'purge':
        # 直接删除。判断查询数量和删除数量
        if select_cnt != delete_cnt:
            error_info = f"查询和删除数量不一致:{select_cnt}!={delete_cnt}"
            success = False

    # 执行信息保存到数据库
    if connection.connection and not connection.is_usable():
        close_old_connections()
    # 更新最后归档时间
    ArchiveConfig(
        id=archive_id,
        last_archive_time=t.end).save(update_fields=['last_archive_time'])
    # 替换密码信息后保存
    ArchiveLog.objects.create(
        archive=archive_info,
        cmd=cmd_args.replace(s_ins.password, '***').replace(
            d_ins.password, '***') if mode == 'dest' else cmd_args.replace(
                s_ins.password, '***'),
        condition=condition,
        mode=mode,
        no_delete=no_delete,
        sleep=sleep,
        select_cnt=select_cnt,
        insert_cnt=insert_cnt,
        delete_cnt=delete_cnt,
        statistics=statistics,
        success=success,
        error_info=error_info,
        start_time=t.start,
        end_time=t.end)
    if not success:
        raise Exception(f'{error_info}\n{statistics}')
Beispiel #57
0
 def run(self):
     with transaction.atomic():
         Task.objects.next()
         self.locked.set()
         self.exit.wait(timeout=5)
     close_old_connections()
Beispiel #58
0
def goods_out(uc_shopid, template_shop_ids, batch_id, days):
    """
    输出选品结果给运营angela查看
    :return:
    """
    # conn_ucenter = connections['ucenter']
    # cursor_ucenter = conn_ucenter.cursor()
    conn_ai = connections['default']
    cursor_ai = conn_ai.cursor()
    # conn_dmstore = connections['dmstore']
    # cursor_dmstore = conn_dmstore.cursor()
    print(
        "时间,门店id,门店名称,一级分类,二级分类,三级分类,四级分类,配送类型,商品编码,商品名称,商品upc,策略标签,商品角色,上品优先级排名,商品实际销售4周预期psd金额,商品实际销售4周预期psd,组内门店4周预期psd金额,组内门店4周预期psd,关联分值"
    )

    select_sql = "select * from goods_goodsselectionhistory where uc_shopid={} and batch_id='{}' and upc is not NULL and goods_role in (1,2,4,0,3)"
    select_sql_02 = "select mch_goods_code from goods_goodsselectionhistory where uc_shopid={} and batch_id='{}' and upc is NULL and goods_role=3"
    select_sql_03 = "SELECT mch_goods_code,relation_score from goods_goodsselectionhistory WHERE uc_shopid={} and batch_id='{}'"
    cursor_ai.execute(select_sql.format(uc_shopid, batch_id))
    all_data = cursor_ai.fetchall()

    # tt = []
    # for i in all_data:
    #     tt.append(i[10])

    cursor_ai.execute(select_sql_02.format(uc_shopid, batch_id))
    all_data_02 = cursor_ai.fetchall()

    cursor_ai.execute(select_sql_03.format(uc_shopid, batch_id))
    relation_score = cursor_ai.fetchall()
    relation_score_dict = {}
    for d in relation_score:
        if d[1]:
            relation_score_dict[d[0]] = d[1]
    conn_ai.close()
    tem = ""
    for data in all_data[:]:
        close_old_connections()
        conn_ucenter = connections['ucenter']
        cursor_ucenter = conn_ucenter.cursor()
        conn_dmstore = connections['dmstore']
        cursor_dmstore = conn_dmstore.cursor()
        # print('==================================================')
        # if not data[19] in [ 1, 3]:
        #     continue

        # print('data',data)
        "时间,门店id,门店名称,一级分类,二级分类,三级分类,配送类型,商品编码,商品名称,商品upc,策略标签,商品角色	,上品优先级排名,商品实际销售4周预期psd,商品实际销售4周预期psd金额,组内门店4周预期psd	组内门店4周预期psd金额	全店4周预期psd	全店4周预期psd金额"
        line_str = ""  # 一条记录
        line_str += str(data[12])  # 时间
        line_str += ","

        line_str += str(data[2])  #门店id
        line_str += ","

        shop_name_sql = "select shop_name from uc_shop a where id={}"
        # close_old_connections()
        cursor_ucenter.execute(shop_name_sql.format(uc_shopid))
        try:
            line_str += str(cursor_ucenter.fetchone()[0])  #门店名称
        except:
            line_str += str('None')  # 门店名称
        tem = line_str
        line_str += ","

        class_type_sql = "select display_first_cat_id,display_second_cat_id,display_third_cat_id,display_fourth_cat_id,delivery_type from uc_merchant_goods where mch_goods_code='{}' and width > 0"
        cursor_ucenter.execute(class_type_sql.format(str(data[10])))
        class_type_data_all = cursor_ucenter.fetchall()
        try:
            class_type_data = class_type_data_all[0]
            # print(i)
            if len(class_type_data_all) > 1:
                for d in class_type_data_all:
                    if d[0] != '0' and d[0] != '':
                        class_type_data = d
        except:
            class_type_data = None

        # class_type_sql = "SELECT DISTINCT first_cate_id,second_cate_id,third_cate_id from goods WHERE neighbor_goods_id ={} AND corp_id=2"
        # cursor_dmstore.execute(class_type_sql.format(data[10]))
        # class_type_data = cursor_dmstore.fetchone()
        # print('分类',data[10],class_type_data)
        try:
            line_str += str(class_type_data[0])  # 一级分类
            line_str += ","
        except:
            line_str += str('None')
            line_str += ","
        try:
            line_str += str(class_type_data[1])  # 二级分类
            line_str += ","
        except:
            line_str += str('None')
            line_str += ","
        try:
            line_str += str(class_type_data[2])  # 三级分类
            line_str += ","
        except:
            line_str += str('None')
            line_str += ","

        try:
            line_str += str(class_type_data[3])  # 四级分类
            line_str += ","
        except:
            line_str += str('None')
            line_str += ","

        delivery_type_sql = "select DISTINCT a.supplier_goods_code,b.delivery_attr from uc_supplier_goods a LEFT JOIN uc_supplier_delivery b on a.delivery_type=b.delivery_code where a.supplier_id = 1 and order_status = 1 AND supplier_goods_code={}"
        cursor_ucenter.execute(delivery_type_sql.format(data[10]))

        delivery_type_dict = {1: '日配', 2: '非日配', '1': '日配', '2': '非日配'}
        try:
            delivery = cursor_ucenter.fetchone()[1]
            # print(data[21])
            # line_str += str(delivery_type_dict[data[21]])  # 配送类型
            line_str += str(delivery_type_dict[delivery])  # 配送类型
            line_str += ","
        except:
            line_str += str('None')
            line_str += ","

        line_str += str(data[10])  # 商品编码
        line_str += ","

        line_str += str(data[5])  # 商品名称
        line_str += ","

        line_str += str(data[4])  # 商品upc
        line_str += ","

        #策略标签
        which_strategy_dict = {
            0: '结构品',
            1: '畅销品',
            2: '关联品',
            3: '品库可定商品',
            4: '品谱选品',
            5: '决策树标签选品',
            6: '人工临时加品',
            7: '网红品',
            101: '保留策略',
            201: '原陈列非保留品',
            301: '品库可订商品',
            401: '不可订货商品'
        }
        # print('data[19]',data[19])
        # if data[19] == 1:
        try:
            if data[15] == 1:
                line_str += str(which_strategy_dict[0])  # 策略标签
            elif data[16] == 1:
                line_str += str(which_strategy_dict[1])  # 策略标签
            elif data[17] == 1:
                line_str += str(which_strategy_dict[2])  # 策略标签
            elif data[19] in [0, 2, 3, 4]:
                line_str += str(which_strategy_dict[data[20]])  # 策略标签
            else:
                line_str += str('None')  # 策略标签
        except:
            line_str += str('None')  # 策略标签
        # else:
        #     line_str += str('None')  # 策略标签
        line_str += ","

        # if data[19] == 1:
        #     try:
        #         line_str += str(which_strategy_dict[data[20]])  # 策略标签
        #     except:
        #         line_str += str('None')  # 策略标签
        # else:
        #     line_str += str('None')  # 策略标签
        # line_str += ","

        # 商品角色
        goods_role_dict = {0: '保护品', 1: '必上', 2: '必下', 3: '可选上架', 4: '可选下架'}
        if data[19] in [0, 1, 2, 3, 4]:
            try:
                line_str += str(goods_role_dict[data[19]])  # 商品角色
            except:
                line_str += str('None')  # 商品角色
        else:
            line_str += str('None')  # 商品角色
        line_str += ","

        # 上品优先级排名
        if data[19] == 1 or data[19] == 3:
            try:
                line_str += str(data[14])  # 上品优先级排名
            except:
                line_str += str('None')  #上品优先级排名
        else:
            line_str += str('None')  # 上品优先级排名
        line_str += ","

        # now = datetime.datetime.now()
        # now_date = now.strftime('%Y-%m-%d %H:%M:%S')

        now_date = data[12].strftime('%Y-%m-%d %H:%M:%S')

        week_ago = (
            data[12] -
            datetime.timedelta(days=days)).strftime('%Y-%m-%d %H:%M:%S')
        #商品实际销售4周预期psd,商品实际销售4周预期psd金额,组内门店4周预期psd	组内门店4周预期psd金额	全店4周预期psd	全店4周预期psd金额
        psd_sql = "select sum(p.amount),g.first_cate_id,g.second_cate_id,g.third_cate_id,g.price,p.name from dmstore.payment_detail as p left join dmstore.goods as g on p.goods_id=g.id where p.create_time > '{}' and p.create_time < '{}' and p.shop_id ={} and g.neighbor_goods_id={};"
        # close_old_connections()
        cursor_dmstore.execute(
            psd_sql.format(week_ago, now_date, data[1], data[10]))
        # print(psd_sql.format(week_ago,now_date,data[1],data[10]))
        psd_data = cursor_dmstore.fetchone()
        # print('psd_data',psd_data)
        if psd_data[0]:
            line_str += str(psd_data[0] / days)  # psd金额
            line_str += ","
            try:
                line_str += str(psd_data[0] / (days * psd_data[4]))  # psd
            except:
                line_str += str(0)  # psd
            line_str += ","
        else:
            line_str += str(0)  # psd金额
            line_str += ","
            line_str += str(0)  # psd
            line_str += ","

        psd_sql_shops = "select sum(p.amount), COUNT(DISTINCT shop_id),g.price,p.name from dmstore.payment_detail as p left join dmstore.goods as g on p.goods_id=g.id where p.create_time > '{}' and p.create_time < '{}' and p.shop_id in ({}) and g.neighbor_goods_id={};"
        # close_old_connections()
        cursor_dmstore.execute(
            psd_sql_shops.format(week_ago, now_date, template_shop_ids,
                                 data[10]))
        psd_data_shops = cursor_dmstore.fetchone()
        # print('psd_data_shops',psd_data_shops)
        if psd_data_shops[0]:
            line_str += str(psd_data_shops[0] /
                            (days * psd_data_shops[1]))  # psd金额,同组
            line_str += ","
            try:
                line_str += str(
                    psd_data_shops[0] /
                    (days * psd_data_shops[1] * psd_data_shops[2]))  # psd,同组
            except:
                line_str += str(0)
            line_str += ","
        else:
            line_str += str(0)  # psd金额,同组
            line_str += ","
            line_str += str(0)  # psd,同组
            line_str += ","

        try:
            line_str += str(data[22])
        except:
            line_str += str(0)
        print(line_str)
        conn_ucenter.close()
        conn_dmstore.close()

    tem_mch_list = [i[0] for i in all_data_02]
    conn_ucenter = connections['ucenter']
    cursor_ucenter = conn_ucenter.cursor()

    sql2 = "SELECT mch_goods_code,upc ,goods_name,display_first_cat_id,display_second_cat_id,display_third_cat_id,display_fourth_cat_id,delivery_type from uc_merchant_goods where mch_goods_code ='{}'"
    # d = cursor_ucenter.fetchall()
    # print("订货0的mch的len", len(tem_mch_list))
    # print("订货0的len", len(d))

    for t in tem_mch_list[:]:
        try:
            cursor_ucenter.execute(sql2.format(str(t)))
            data = cursor_ucenter.fetchall()
            i = data[0]
            # print(i)
            if len(data) > 1:
                for d in data:
                    if d[3] != '0' and d[3] != '':
                        i = d

            delivery_type_sql = "select DISTINCT a.supplier_goods_code,b.delivery_attr from uc_supplier_goods a LEFT JOIN uc_supplier_delivery b on a.delivery_type=b.delivery_code where a.supplier_id = 1 and order_status = 1 AND supplier_goods_code={}"
            cursor_ucenter.execute(delivery_type_sql.format(t))
            delivery_type_dict = {1: '日配', 2: '非日配', '1': '日配', '2': '非日配'}
            delivery_str = ''
            try:
                delivery = cursor_ucenter.fetchone()[1]
                delivery_str = str(delivery_type_dict[delivery])  # 配送类型
            except:
                delivery_str = str('None')
            try:
                relation_score_value = relation_score_dict[i[0]]
            except:
                relation_score_value = 0

            print("{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}".format(
                tem, i[3], i[4], i[5], i[6], delivery_str, i[0], i[2], i[1],
                '品库可订商品', '可选上架', 0, None, None, None, None,
                relation_score_value))
        except:
            continue
    conn_ucenter.close()
Beispiel #59
0
    def handle(self, *args, **options):
        start = datetime.now()  # Measure the time it takes to run the script.
        day = options['date']
        if not day:
            day = (datetime.now() - timedelta(days=1)).strftime('%Y-%m-%d')

        sep = options['separator']

        if options['stats_source'] == 's3':
            filepath = 's3://' + '/'.join([
                settings.AWS_STATS_S3_BUCKET, settings.AWS_STATS_S3_PREFIX,
                'download_counts', day, '000000_0'
            ])

        elif options['stats_source'] == 'file':
            folder = options['folder_name']
            folder = path.join(settings.TMP_PATH, folder, day)
            filepath = path.join(folder, 'download_counts.hive')

        # Make sure we're not trying to update with mismatched data.
        if get_date(filepath, sep) != day:
            raise CommandError('%s file contains data for another day' %
                               filepath)

        # First, make sure we don't have any existing counts for the same day,
        # or it would just increment again the same data.
        DownloadCount.objects.filter(date=day).delete()

        # Memoize the files to addon relations and the DownloadCounts.
        download_counts = {}

        # Perf: preload all the files and slugs once and for all.
        # This builds two dicts:
        # - One where each key (the file_id we get from the hive query) has
        #   the addon_id as value.
        # - One where each key (the add-on slug) has the add-on_id as value.
        files_to_addon = dict(
            File.objects.values_list('id', 'version__addon_id'))
        slugs_to_addon = dict(
            Addon.unfiltered.exclude(status=amo.STATUS_NULL).values_list(
                'slug', 'id'))

        # Only accept valid sources, which are constants. The source must
        # either be exactly one of the "full" valid sources, or prefixed by one
        # of the "prefix" valid sources.
        fulls = amo.DOWNLOAD_SOURCES_FULL
        prefixes = amo.DOWNLOAD_SOURCES_PREFIX

        count_file = get_stats_data(filepath)
        for index, line in enumerate(count_file):
            if index and (index % 1000000) == 0:
                log.info('Processed %s lines' % index)

            splitted = line[:-1].split(sep)

            if len(splitted) != 4:
                log.debug('Badly formatted row: %s' % line)
                continue

            day, counter, id_or_slug, src = splitted
            try:
                # Clean up data.
                id_or_slug = id_or_slug.strip()
                counter = int(counter)
            except ValueError:
                # Ignore completely invalid data.
                continue

            if id_or_slug.strip().isdigit():
                # If it's a digit, then it should be a file id.
                try:
                    id_or_slug = int(id_or_slug)
                except ValueError:
                    continue

                # Does this file exist?
                if id_or_slug in files_to_addon:
                    addon_id = files_to_addon[id_or_slug]
                # Maybe it's an add-on ?
                elif id_or_slug in files_to_addon.values():
                    addon_id = id_or_slug
                else:
                    # It's an integer we don't recognize, ignore the row.
                    continue
            else:
                # It's probably a slug.
                if id_or_slug in slugs_to_addon:
                    addon_id = slugs_to_addon[id_or_slug]
                else:
                    # We've exhausted all possibilities, ignore this row.
                    continue

            if not is_valid_source(src, fulls=fulls, prefixes=prefixes):
                continue

            # Memoize the DownloadCount.
            if addon_id in download_counts:
                dc = download_counts[addon_id]
            else:
                dc = DownloadCount(date=day, addon_id=addon_id, count=0)
                download_counts[addon_id] = dc

            # We can now fill the DownloadCount object.
            dc.count += counter
            dc.sources = update_inc(dc.sources, src, counter)

        # Close all old connections in this thread before we start creating the
        # `DownloadCount` values.
        # https://github.com/mozilla/addons-server/issues/6886
        # If the calculation above takes too long it might happen that we run
        # into `wait_timeout` problems and django doesn't reconnect properly
        # (potentially because of misconfiguration).
        # Django will re-connect properly after it notices that all
        # connections are closed.
        close_old_connections()

        # Create in bulk: this is much faster.
        DownloadCount.objects.bulk_create(download_counts.values(), 100)

        log.info('Processed a total of %s lines' % (index + 1))
        log.debug('Total processing time: %s' % (datetime.now() - start))

        if options['stats_source'] == 'file':
            # Clean up file.
            log.debug('Deleting {path}'.format(path=filepath))
            unlink(filepath)
 def _cb(self, status):
     # when using mysql, we need to reconnect because this is running in a new process
     db.close_old_connections()
     t = Task.objects.get(pk=self._task_id)
     t.status = status
     t.save()